diff --git a/.golangci.yml b/.golangci.yml
index e566cfa72df..3f730986a07 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -54,6 +54,10 @@ linters:
- common-false-positives
- legacy
- std-error-handling
+ rules:
+ - linters:
+ - staticcheck
+ text: 'labels.MetricName is deprecated: .*'
formatters:
enable:
- gofmt
diff --git a/go.mod b/go.mod
index c6a43f8f0eb..f0f92c77b2d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/cortexproject/cortex
-go 1.25.5
+go 1.25.3
require (
github.com/Masterminds/squirrel v1.5.4
@@ -22,7 +22,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v1.0.0
github.com/gorilla/mux v1.8.1
- github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
+ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/hashicorp/consul/api v1.32.0
github.com/hashicorp/go-cleanhttp v0.5.2
@@ -42,15 +42,15 @@ require (
github.com/prometheus/alertmanager v0.29.0
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_model v0.6.2
- github.com/prometheus/common v0.67.1
+ github.com/prometheus/common v0.67.4
// Prometheus maps version 3.x.y to tags v0.30x.y.
- github.com/prometheus/prometheus v0.306.0
+ github.com/prometheus/prometheus v0.308.0
github.com/segmentio/fasthash v1.0.3
github.com/sony/gobreaker v1.0.0
github.com/spf13/afero v1.11.0
github.com/stretchr/testify v1.11.1
github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488
- github.com/thanos-io/promql-engine v0.0.0-20251224085502-3988aa4704b5
+ github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674
github.com/thanos-io/thanos v0.40.1-0.20260112164636-49dde505913b
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5
@@ -60,14 +60,14 @@ require (
go.opentelemetry.io/contrib/propagators/aws v1.36.0
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel/bridge/opentracing v1.36.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/trace v1.38.0
go.uber.org/atomic v1.11.0
golang.org/x/net v0.48.0
golang.org/x/sync v0.19.0
- golang.org/x/time v0.12.0
+ golang.org/x/time v0.13.0
google.golang.org/grpc v1.76.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -75,8 +75,8 @@ require (
require (
github.com/VictoriaMetrics/fastcache v1.12.2
- github.com/aws/aws-sdk-go-v2 v1.39.2
- github.com/aws/aws-sdk-go-v2/config v1.31.12
+ github.com/aws/aws-sdk-go-v2 v1.39.6
+ github.com/aws/aws-sdk-go-v2/config v1.31.17
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1
github.com/axiomhq/hyperloglog v0.2.6
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
@@ -93,7 +93,7 @@ require (
github.com/prometheus/procfs v0.16.1
github.com/sercand/kuberesolver/v5 v5.1.1
github.com/tjhop/slog-gokit v0.1.4
- go.opentelemetry.io/collector/pdata v1.35.0
+ go.opentelemetry.io/collector/pdata v1.45.0
go.uber.org/automaxprocs v1.6.0
google.golang.org/protobuf v1.36.10
)
@@ -101,17 +101,17 @@ require (
require (
cel.dev/expr v0.24.0 // indirect
cloud.google.com/go v0.120.0 // indirect
- cloud.google.com/go/auth v0.16.2 // indirect
+ cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
cloud.google.com/go/storage v1.50.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect
@@ -120,24 +120,24 @@ require (
github.com/andybalholm/brotli v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.18.16 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 // indirect
- github.com/aws/smithy-go v1.23.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
+ github.com/aws/smithy-go v1.23.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/caio/go-tdigest v3.1.0+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
- github.com/cenkalti/backoff/v5 v5.0.2 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
github.com/coder/quartz v0.2.1 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
@@ -182,15 +182,15 @@ require (
github.com/gobwas/glob v0.2.3 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/gogo/googleapis v1.4.1 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/btree v1.1.3 // indirect
- github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
+ github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
- github.com/googleapis/gax-go/v2 v2.14.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@@ -208,7 +208,7 @@ require (
github.com/klauspost/crc32 v1.3.0 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
- github.com/knadh/koanf/v2 v2.2.1 // indirect
+ github.com/knadh/koanf/v2 v2.3.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
@@ -218,7 +218,7 @@ require (
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect
- github.com/miekg/dns v1.1.66 // indirect
+ github.com/miekg/dns v1.1.68 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
@@ -227,13 +227,13 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/ncw/swift v1.0.53 // indirect
github.com/oklog/run v1.2.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 // indirect
github.com/parquet-go/bitpack v1.0.0 // indirect
github.com/parquet-go/jsonlite v1.0.0 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
@@ -242,9 +242,9 @@ require (
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus-community/prom-label-proxy v0.11.1 // indirect
- github.com/prometheus/exporter-toolkit v0.14.1 // indirect
- github.com/prometheus/otlptranslator v0.0.2 // indirect
- github.com/prometheus/sigv4 v0.2.1 // indirect
+ github.com/prometheus/exporter-toolkit v0.15.0 // indirect
+ github.com/prometheus/otlptranslator v1.0.0 // indirect
+ github.com/prometheus/sigv4 v0.3.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/rantav/go-grpc-channelz v0.0.4 // indirect
github.com/redis/rueidis v1.0.61 // indirect
@@ -267,28 +267,25 @@ require (
go.mongodb.org/mongo-driver v1.17.4 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
- go.opentelemetry.io/collector/component v1.35.0 // indirect
- go.opentelemetry.io/collector/confmap v1.35.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
- go.opentelemetry.io/collector/consumer v1.35.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
- go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
- go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
- go.opentelemetry.io/collector/processor v1.35.0 // indirect
+ go.opentelemetry.io/collector/component v1.45.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.45.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 // indirect
+ go.opentelemetry.io/collector/consumer v1.45.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.45.0 // indirect
+ go.opentelemetry.io/collector/pipeline v1.45.0 // indirect
+ go.opentelemetry.io/collector/processor v1.45.0 // indirect
go.opentelemetry.io/collector/semconv v0.128.0 // indirect
- go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
go.opentelemetry.io/contrib/propagators/autoprop v0.61.0 // indirect
go.opentelemetry.io/contrib/propagators/b3 v1.36.0 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 // indirect
go.opentelemetry.io/contrib/propagators/ot v1.36.0 // indirect
- go.opentelemetry.io/otel/log v0.12.2 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
@@ -297,23 +294,22 @@ require (
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
golang.org/x/crypto v0.46.0 // indirect
- golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
+ golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.39.0 // indirect
gonum.org/v1/gonum v0.16.0 // indirect
- google.golang.org/api v0.239.0 // indirect
- google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
+ google.golang.org/api v0.252.0 // indirect
+ google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
gopkg.in/telebot.v3 v3.3.8 // indirect
- k8s.io/apimachinery v0.33.1 // indirect
- k8s.io/client-go v0.33.1 // indirect
+ k8s.io/apimachinery v0.34.1 // indirect
+ k8s.io/client-go v0.34.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
)
// Using cortex fork of weaveworks/common
@@ -349,5 +345,7 @@ replace google.golang.org/grpc => google.golang.org/grpc v1.71.2
// See https://github.com/envoyproxy/go-control-plane/issues/1083 as this version introduces checksum mismatch.
exclude github.com/envoyproxy/go-control-plane/envoy v1.32.3
-// TODO: update it in next PR
-replace github.com/prometheus/otlptranslator => github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
+// Required by Prometheus v0.308+ config package for OTLP translation strategy types.
+replace github.com/prometheus/otlptranslator => github.com/prometheus/otlptranslator v1.0.0
+
+replace github.com/thanos-io/thanos => github.com/yeya24/thanos v0.2.2-0.20260203193035-ba37115033af
diff --git a/go.sum b/go.sum
index acf2ffb76d4..74c84f20f69 100644
--- a/go.sum
+++ b/go.sum
@@ -122,8 +122,8 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar
cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
-cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
-cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
+cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
+cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
@@ -780,14 +780,14 @@ cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvo
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@@ -800,8 +800,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
@@ -865,40 +865,44 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
-github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
-github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I=
-github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
-github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8=
-github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
+github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
+github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
+github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
+github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1 h1:MXUnj1TKjwQvotPPHFMfynlUljcpl5UccMrkiauKdWI=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1/go.mod h1:fe3UQAYwylCQRlGnihsqU/tTQkrc2nrW/IhWYwlW9vg=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0 h1:5qBb1XV/D18qtCHd3bmmxoVglI+fZ4QWuS/EB8kIXYQ=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0/go.mod h1:NDdDLLW5PtLLXN661gKcvJvqAH5OBXsfhMlmKVu1/pY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2 h1:oeICOX/+D0XXV1aMYJPXVe3CO37zYr7fB6HFgxchleU=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2/go.mod h1:rrhqfkXfa2DSNq0RyFhnnFEAyI+yJB4+2QlZKeJvMjs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 h1:34ojKW9OV123FZ6Q8Nua3Uwy6yVTcshZ+gLE4gpMDEs=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6/go.mod h1:sXXWh1G9LKKkNbuR0f0ZPd/IvDXlMGiag40opt4XEgY=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4 h1:/1o2AYwHJojUDeMvQNyJiKZwcWCc3e4kQuTXqRLuThc=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4/go.mod h1:Nn2xx6HojGuNMtUFxxz/nyNLSS+tHMRsMhe3+W3wB5k=
github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 h1:c0hINjMfDQvQLJJxfNNcIaLYVLC7E0W2zOQOVVKLnnU=
github.com/aws/aws-sdk-go-v2/service/sns v1.38.5/go.mod h1:E427ZzdOMWh/4KtD48AGfbWLX14iyw9URVOdIwtv80o=
-github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s=
-github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo=
-github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA=
-github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8=
-github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
-github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
+github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
+github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/axiomhq/hyperloglog v0.2.6 h1:sRhvvF3RIXWQgAXaTphLp4yJiX4S0IN3MWTaAgZoRJw=
github.com/axiomhq/hyperloglog v0.2.6/go.mod h1:YjX/dQqCR/7QYX0g8mu8UZAjpIenz1FKM71UEsjFoTo=
github.com/baidubce/bce-sdk-go v0.9.230 h1:HzELBKiD7QAgYqZ1qHZexoI2A3Lo/6zYGQFvcUbS5cA=
@@ -923,8 +927,8 @@ github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6L
github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
-github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
@@ -989,12 +993,12 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0=
github.com/dhui/dktest v0.4.3/go.mod h1:zNK8IwktWzQRm6I/l2Wjp7MakiyaFWv4G1hjmodmMTs=
-github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o=
-github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
+github.com/digitalocean/godo v1.168.0 h1:mlORtUcPD91LQeJoznrH3XvfvgK3t8Wvrpph9giUT/Q=
+github.com/digitalocean/godo v1.168.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ=
-github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
+github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -1015,8 +1019,8 @@ github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 h1:oP4q0fw+fOSWn3
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6/go.mod h1:iL2twTeMvZnrg54ZoPDNfJaJaqy0xIQFuBdrLsmspwQ=
github.com/emersion/go-smtp v0.24.0 h1:g6AfoF140mvW0vLNPD/LuCBLEAdlxOjIXqbIkJIS6Wk=
github.com/emersion/go-smtp v0.24.0/go.mod h1:ZtRRkbTyp2XTHCA+BmyTFTrj8xY4I+b4McvHxCU2gsQ=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
@@ -1056,8 +1060,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
@@ -1173,8 +1177,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg=
github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU=
-github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
-github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y=
github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
@@ -1225,8 +1229,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
-github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
-github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -1272,8 +1276,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
-github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
-github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
+github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8 h1:ZI8gCoCjGzPsum4L21jHdQs8shFBIQih1TM9Rd/c+EQ=
+github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
@@ -1305,20 +1309,20 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38
github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
-github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
-github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
+github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
+github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
-github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo=
+github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
@@ -1330,16 +1334,16 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
-github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
-github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
+github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
+github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -1388,14 +1392,14 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
-github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
+github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af h1:ScAYf8O+9xTqTJPZH8MIlUfO+ak8cb31rW1aYJgS+jE=
+github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM=
-github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA=
+github.com/hetznercloud/hcloud-go/v2 v2.29.0 h1:LzNFw5XLBfftyu3WM1sdSLjOZBlWORtz2hgGydHaYV8=
+github.com/hetznercloud/hcloud-go/v2 v2.29.0/go.mod h1:XBU4+EDH2KVqu2KU7Ws0+ciZcX4ygukQl/J0L5GS8P8=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible h1:yNjwdvn9fwuN6Ouxr0xHM0cVu03YMUWUyFmu2van/Yc=
@@ -1409,8 +1413,6 @@ github.com/ionos-cloud/sdk-go/v6 v6.0.4 h1:4LoWeM7WtcDqYDjlntqQ3fD6XaENlCw2YqiVW
github.com/ionos-cloud/sdk-go/v6 v6.0.4/go.mod h1:UE3V/2DjnqD5doOqtjYqzJRMpI1RiwrvuuSEPX1pdnk=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
-github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -1448,8 +1450,8 @@ github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpb
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
-github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE=
-github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
+github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM=
+github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1480,8 +1482,8 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg=
-github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA=
+github.com/linode/linodego v1.60.0 h1:SgsebJFRCi+lSmYy+C40wmKZeJllGGm+W12Qw4+yVdI=
+github.com/linode/linodego v1.60.0/go.mod h1:1+Bt0oTz5rBnDOJbGhccxn7LYVytXTIIfAy7QYmijDs=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
@@ -1523,8 +1525,8 @@ github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQo
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
-github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
+github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
+github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
@@ -1559,8 +1561,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiYgOq6hK4w=
@@ -1585,14 +1588,12 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 h1:D5aGQCErSCb4sKIHoZhgR4El6AzgviTRYlHUpbSFqDo=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0/go.mod h1:ZjeRsA5oaVk89fg5D+iXStx2QncmhAvtGbdSumT07H4=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 h1:6/j0Ta8ZJnmAFVEoC3aZ1Hs19RB4fHzlN6kOZhsBJqM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0/go.mod h1:VfA8xHz4xg7Fyj5bBsCDbOO3iVYzDn9wP/QFsjcAE5c=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 h1:iRNX/ueuad1psOVgnNkxuQmXxvF3ze5ZZCP66xKFk/w=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0/go.mod h1:bW09lo3WgHsPsZ1mgsJvby9wCefT5o13patM5phdfIU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -1676,12 +1677,12 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI=
-github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q=
-github.com/prometheus/exporter-toolkit v0.14.1 h1:uKPE4ewweVRWFainwvAcHs3uw15pjw2dk3I7b+aNo9o=
-github.com/prometheus/exporter-toolkit v0.14.1/go.mod h1:di7yaAJiaMkcjcz48f/u4yRPwtyuxTU5Jr4EnM2mhtQ=
-github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE=
-github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
+github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
+github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=
+github.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk=
+github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
+github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@@ -1689,16 +1690,14 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
-github.com/prometheus/prometheus v0.306.0 h1:Q0Pvz/ZKS6vVWCa1VSgNyNJlEe8hxdRlKklFg7SRhNw=
-github.com/prometheus/prometheus v0.306.0/go.mod h1:7hMSGyZHt0dcmZ5r4kFPJ/vxPQU99N5/BGwSPDxeZrQ=
-github.com/prometheus/sigv4 v0.2.1 h1:hl8D3+QEzU9rRmbKIRwMKRwaFGyLkbPdH5ZerglRHY0=
-github.com/prometheus/sigv4 v0.2.1/go.mod h1:ySk6TahIlsR2sxADuHy4IBFhwEjRGGsfbbLGhFYFj6Q=
+github.com/prometheus/prometheus v0.308.0 h1:kVh/5m1n6m4cSK9HYTDEbMxzuzCWyEdPdKSxFRxXj04=
+github.com/prometheus/prometheus v0.308.0/go.mod h1:xXYKzScyqyFHihpS0UsXpC2F3RA/CygOs7wb4mpdusE=
+github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
+github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rantav/go-grpc-channelz v0.0.4 h1:8GvqhA6siQVBsZYzal3yHhyJ9YiHEJx7RtSH2Jvm9Co=
github.com/rantav/go-grpc-channelz v0.0.4/go.mod h1:HodrRmnnH1zXcEEfK7EJrI23YMPMT7uvyAYkq2JUIcI=
-github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
-github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/redis/rueidis v1.0.61 h1:AkbCMeTyjFSQraGaNYncg3unMCTYGr6Y8WOqGhDOQu4=
github.com/redis/rueidis v1.0.61/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
@@ -1720,8 +1719,8 @@ github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfF
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35 h1:8xfn1RzeI9yoCUuEwDy08F+No6PcKZGEDOQ6hrRyLts=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35/go.mod h1:47B1d/YXmSAxlJxUJxClzHR6b3T4M1WyCvwENPQNBWc=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM=
@@ -1752,11 +1751,12 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.3 h1:GsZGmRRc/3GJLmCUnsZswirr5wfLRrwavbnL/renOqg=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.3/go.mod h1:HBCXJGPgdRulplDzhrmwC+Dak9B/x0nzNtmOpu+1Ahg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -1787,10 +1787,8 @@ github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1
github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM=
github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 h1:khBsQLLRoF1KzXgTlwFZa6mC32bwYUUAu/AeP49V7UM=
github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0=
-github.com/thanos-io/promql-engine v0.0.0-20251224085502-3988aa4704b5 h1:hIg9M9TRha/qaLDdtwsTWsTDkewGHleVZaV2JsLY1vA=
-github.com/thanos-io/promql-engine v0.0.0-20251224085502-3988aa4704b5/go.mod h1:MOFN0M1nDMcWZg1t4iF39sOard/K4SWgO/HHSODeDIc=
-github.com/thanos-io/thanos v0.40.1-0.20260112164636-49dde505913b h1:KIQzAcxtdxi3PhrOpGP5t/TP7NBZqYvvcUvlu0q8fEQ=
-github.com/thanos-io/thanos v0.40.1-0.20260112164636-49dde505913b/go.mod h1:B9TgiYdhZdVxB1jXi4hRV+XDhiMmhHFykb8cxsZyWG8=
+github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674 h1:C5yBEuIZCaeLh90lcUGfnGepmwDfGGYLu6+w7RxR7og=
+github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674/go.mod h1:uzn40oZHPXvfdP498h+MiRL2fN7RF519gNaV3LyhChc=
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM=
@@ -1812,6 +1810,8 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
+github.com/yeya24/thanos v0.2.2-0.20260203193035-ba37115033af h1:7BchkWxg+wfa/21myRZ5IxGvfrC9aLX4LWKTmzbLp+I=
+github.com/yeya24/thanos v0.2.2-0.20260203193035-ba37115033af/go.mod h1:2EZLdOCRR9WgDsUWCLfwCphjMKRmDdnSR47x4CwUgOQ=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1849,53 +1849,49 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
-go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ=
-go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc=
-go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw=
-go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI=
-go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI=
-go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM=
-go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ=
-go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0=
-go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk=
-go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw=
-go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg=
-go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs=
-go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU=
-go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A=
-go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY=
-go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4=
-go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
-go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA=
-go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw=
-go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E=
-go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw=
-go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ=
-go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE=
-go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY=
-go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY=
-go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU=
-go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
-go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs=
-go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM=
-go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY=
-go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc=
-go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc=
-go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00=
+go.opentelemetry.io/collector/component v1.45.0 h1:gGFfVdbQ+1YuyUkJjWo85I7euu3H/CiupuzCHv8OgHA=
+go.opentelemetry.io/collector/component v1.45.0/go.mod h1:xoNFnRKE8Iv6gmlqAKgjayWraRnDcYLLgrPt9VgyO2g=
+go.opentelemetry.io/collector/component/componentstatus v0.139.0 h1:bQmkv1t7xW7uIDireE0a2Am4IMOprXm6zQr/qDtGCIA=
+go.opentelemetry.io/collector/component/componentstatus v0.139.0/go.mod h1:ibZOohpG0u081/NaT/jMCTsKwRbbwwxWrjZml+owpyM=
+go.opentelemetry.io/collector/component/componenttest v0.139.0 h1:x9Yu2eYhrHxdZ7sFXWtAWVjQ3UIraje557LgNurDC2I=
+go.opentelemetry.io/collector/component/componenttest v0.139.0/go.mod h1:S9cj+qkf9FgHMzjvlYsLwQKd9BiS7B7oLZvxvlENM/c=
+go.opentelemetry.io/collector/confmap v1.45.0 h1:7M7TTlpzX4r+mIzP/ARdxZBAvI4N+1V96phDane+akU=
+go.opentelemetry.io/collector/confmap v1.45.0/go.mod h1:AE1dnkjv0T9gptsh5+mTX0XFGdXx0n7JS4b7CcPfJ6Q=
+go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 h1:uQGpFuWnTCXqdMbI3gDSvkwU66/kF/aoC0kVMrit1EM=
+go.opentelemetry.io/collector/confmap/xconfmap v0.139.0/go.mod h1:d0ucaeNq2rojFRSQsCHF/gkT3cgBx5H2bVkPQMj57ck=
+go.opentelemetry.io/collector/consumer v1.45.0 h1:TtqXxgW+1GSCwdoohq0fzqnfqrZBKbfo++1XRj8mrEA=
+go.opentelemetry.io/collector/consumer v1.45.0/go.mod h1:pJzqTWBubwLt8mVou+G4/Hs23b3m425rVmld3LqOYpY=
+go.opentelemetry.io/collector/consumer/consumertest v0.139.0 h1:06mu43mMO7l49ASJ/GEbKgTWcV3py5zE/pKhNBZ1b3k=
+go.opentelemetry.io/collector/consumer/consumertest v0.139.0/go.mod h1:gaeCpRQGbCFYTeLzi+Z2cTDt40GiIa3hgIEgLEmiC78=
+go.opentelemetry.io/collector/consumer/xconsumer v0.139.0 h1:FhzDv+idglnrfjqPvnUw3YAEOkXSNv/FuNsuMiXQwcY=
+go.opentelemetry.io/collector/consumer/xconsumer v0.139.0/go.mod h1:yWrg/6FE/A4Q7eo/Mg++CzkBoSILHdeMnTlxV3serI0=
+go.opentelemetry.io/collector/featuregate v1.45.0 h1:D06hpf1F2KzKC+qXLmVv5e8IZpgCyZVeVVC8iOQxVmw=
+go.opentelemetry.io/collector/featuregate v1.45.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
+go.opentelemetry.io/collector/pdata v1.45.0 h1:q4XaISpeX640BcwXwb2mKOVw/gb67r22HjGWl8sbWsk=
+go.opentelemetry.io/collector/pdata v1.45.0/go.mod h1:5q2f001YhwMQO8QvpFhCOa4Cq/vtwX9W4HRMsXkU/nE=
+go.opentelemetry.io/collector/pdata/pprofile v0.139.0 h1:UA5TgFzYmRuJN3Wz0GR1efLUfjbs5rH0HTaxfASpTR8=
+go.opentelemetry.io/collector/pdata/pprofile v0.139.0/go.mod h1:sI5qHt+zzE2fhOWFdJIaiDBR0yGGjD4A4ZvDFU0tiHk=
+go.opentelemetry.io/collector/pdata/testdata v0.139.0 h1:n7O5bmLLhc3T6PePV4447fFcI/6QWcMhBsLtfCaD0do=
+go.opentelemetry.io/collector/pdata/testdata v0.139.0/go.mod h1:fxZ2VrhYLYBLHYBHC1XQRKZ6IJXwy0I2rPaaRlebYaY=
+go.opentelemetry.io/collector/pipeline v1.45.0 h1:sn9JJAEBe3XABTkWechMk0eH60QMBjjNe5V+ccBl+Uo=
+go.opentelemetry.io/collector/pipeline v1.45.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
+go.opentelemetry.io/collector/processor v1.45.0 h1:GH5km9BkDQOoz7MR0jzTnzB1Kb5vtKzPwa/wDmRg2dQ=
+go.opentelemetry.io/collector/processor v1.45.0/go.mod h1:wdlaTTC3wqlZIJP9R9/SLc2q7h+MFGARsxfjgPtwbes=
+go.opentelemetry.io/collector/processor/processortest v0.139.0 h1:30akUdruFNG7EDpayuBhXoX2lV+hcfxW9Gl3Z6MYHb0=
+go.opentelemetry.io/collector/processor/processortest v0.139.0/go.mod h1:RTll3UKHrqj/VS6RGjTHtuGIJzyLEwFhbw8KuCL3pjo=
+go.opentelemetry.io/collector/processor/xprocessor v0.139.0 h1:O9x9RF/OG8gZ+HrOcB4f6F1fjniby484xf2D8GBxgqU=
+go.opentelemetry.io/collector/processor/xprocessor v0.139.0/go.mod h1:hqGhEZ1/PftD/QHaYna0o1xAqZUsb7GhqpOiaTTDJnQ=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
-go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
-go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 h1:lREC4C0ilyP4WibDhQ7Gg2ygAQFP8oR07Fst/5cafwI=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0/go.mod h1:HfvuU0kW9HewH14VCOLImqKvUgONodURG7Alj/IrnGI=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/contrib/propagators/autoprop v0.61.0 h1:cxOVDJ30qfzV27G5p9WMtJUB/3cXC0iL+u9EV1fSOws=
go.opentelemetry.io/contrib/propagators/autoprop v0.61.0/go.mod h1:Y+xiUbWetg65vAroDZcIzJ5wyPNWRH32EoIV9rIaa0g=
go.opentelemetry.io/contrib/propagators/aws v1.36.0 h1:Txhy/1LZIbbnutftc5pdU8Y9vOQuAkuIOFXuLsdDejs=
@@ -1912,16 +1908,12 @@ go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/bridge/opentracing v1.36.0 h1:GWGmcYhMCu6+K/Yz5KWSETU/esd/mkVGx+77uKtLjpk=
go.opentelemetry.io/otel/bridge/opentracing v1.36.0/go.mod h1:bW7xTHgtWSNqY8QjhqXzloXBkw3iQIa8uBqCF/0EUbc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
-go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
-go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
-go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg=
-go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
@@ -1940,8 +1932,14 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
-go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
+go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
+go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ=
+go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -2009,8 +2007,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
-golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
-golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -2355,8 +2353,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
-golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
+golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -2511,8 +2509,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ
google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
-google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=
-google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
+google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
+google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
@@ -2663,8 +2661,8 @@ google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mR
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
-google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
-google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
+google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
+google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
@@ -2770,16 +2768,16 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
-k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
-k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
-k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
-k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4=
-k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA=
+k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
+k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
+k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
+k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
+k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
@@ -2837,12 +2835,12 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/integration/e2e/util.go b/integration/e2e/util.go
index b46339fb462..49795257ce9 100644
--- a/integration/e2e/util.go
+++ b/integration/e2e/util.go
@@ -26,7 +26,6 @@ import (
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/runutil"
- "go.uber.org/atomic"
cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
)
@@ -271,7 +270,6 @@ func CreateNHBlock(
seriesSize int64,
) (id ulid.ULID, err error) {
headOpts := tsdb.DefaultHeadOptions()
- headOpts.EnableNativeHistograms = *atomic.NewBool(true)
headOpts.ChunkDirRoot = filepath.Join(dir, "chunks")
headOpts.ChunkRange = 10000000000
random := rand.New(rand.NewSource(time.Now().UnixNano()))
diff --git a/integration/parse_query_api_test.go b/integration/parse_query_api_test.go
index 833fecb6db3..ccbcde84b66 100644
--- a/integration/parse_query_api_test.go
+++ b/integration/parse_query_api_test.go
@@ -67,8 +67,8 @@ func TestParseQueryAPIQuerier(t *testing.T) {
// check for AST contents.
require.Contains(t, string(parsed.Data), "\"op\":\"/\"")
- require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`)
- require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`)
+ require.Contains(t, string(parsed.Data), `"lhs":{"anchored":false,"matchers":[{"name":"__name__","type":"=","value":"foo"}]`)
+ require.Contains(t, string(parsed.Data), `"rhs":{"anchored":false,"matchers":[{"name":"__name__","type":"=","value":"bar"}]`)
}
func TestParseQueryAPIQueryFrontend(t *testing.T) {
@@ -129,6 +129,6 @@ func TestParseQueryAPIQueryFrontend(t *testing.T) {
// check for AST contents.
require.Contains(t, string(parsed.Data), "\"op\":\"/\"")
- require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`)
- require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`)
+ require.Contains(t, string(parsed.Data), `"lhs":{"anchored":false,"matchers":[{"name":"__name__","type":"=","value":"foo"}]`)
+ require.Contains(t, string(parsed.Data), `"rhs":{"anchored":false,"matchers":[{"name":"__name__","type":"=","value":"bar"}]`)
}
diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go
index 2b30e8aa587..79f7323998b 100644
--- a/pkg/api/handlers.go
+++ b/pkg/api/handlers.go
@@ -242,6 +242,8 @@ func NewQuerierHandler(
false,
querierCfg.LookbackDelta,
false,
+ false,
+ nil,
)
// Let's clear all codecs to create the instrumented ones
api.ClearCodecs()
diff --git a/pkg/configs/api/api.go b/pkg/configs/api/api.go
index 029528ab784..bc06b9db505 100644
--- a/pkg/configs/api/api.go
+++ b/pkg/configs/api/api.go
@@ -19,6 +19,7 @@ import (
"github.com/gorilla/mux"
amconfig "github.com/prometheus/alertmanager/config"
amtemplate "github.com/prometheus/alertmanager/template"
+ "github.com/prometheus/common/model"
"github.com/cortexproject/cortex/pkg/configs/db"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
@@ -34,7 +35,8 @@ var (
// Config configures Configs API
type Config struct {
- Notifications NotificationsConfig `yaml:"notifications"`
+ Notifications NotificationsConfig `yaml:"notifications"`
+ NameValidationScheme model.ValidationScheme `yaml:"-"`
}
// NotificationsConfig configures Alertmanager notifications method.
@@ -182,7 +184,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, fmt.Sprintf("Invalid Alertmanager config: %v", err), http.StatusBadRequest)
return
}
- if err := validateRulesFiles(cfg); err != nil {
+ if err := validateRulesFiles(cfg, a.cfg.NameValidationScheme); err != nil {
level.Error(logger).Log("msg", "invalid rules", "err", err)
http.Error(w, fmt.Sprintf("Invalid rules: %v", err), http.StatusBadRequest)
return
@@ -242,8 +244,11 @@ func validateAlertmanagerConfig(cfg string, noCfg NotificationsConfig) error {
return nil
}
-func validateRulesFiles(c userconfig.Config) error {
- _, err := c.RulesConfig.Parse()
+func validateRulesFiles(c userconfig.Config, nameValidationScheme model.ValidationScheme) error {
+ if nameValidationScheme == model.UnsetValidation {
+ nameValidationScheme = model.UTF8Validation
+ }
+ _, err := c.RulesConfig.Parse(nameValidationScheme)
return err
}
diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go
index d218c9788eb..562816d1dc5 100644
--- a/pkg/configs/userconfig/config.go
+++ b/pkg/configs/userconfig/config.go
@@ -7,6 +7,7 @@ import (
"github.com/go-kit/log"
"github.com/pkg/errors"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/rules"
@@ -229,13 +230,13 @@ func (c RulesConfig) Equal(o RulesConfig) bool {
}
// Parse parses and validates the content of the rule files in a RulesConfig
-// according to the passed rule format version.
-func (c RulesConfig) Parse() (map[string][]rules.Rule, error) {
+// according to the passed rule format version, using the given name validation scheme.
+func (c RulesConfig) Parse(nameValidationScheme model.ValidationScheme) (map[string][]rules.Rule, error) {
switch c.FormatVersion {
case RuleFormatV1:
return nil, fmt.Errorf("unsupported rule format version %v", c.FormatVersion)
case RuleFormatV2:
- return c.parseV2()
+ return c.parseV2(nameValidationScheme)
default:
return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion)
}
@@ -243,24 +244,25 @@ func (c RulesConfig) Parse() (map[string][]rules.Rule, error) {
// ParseFormatted returns the rulefmt map of a users rules configs. It allows
// for rules to be mapped to disk and read by the prometheus rules manager.
-func (c RulesConfig) ParseFormatted() (map[string]rulefmt.RuleGroups, error) {
+// It uses the given name validation scheme for validation.
+func (c RulesConfig) ParseFormatted(nameValidationScheme model.ValidationScheme) (map[string]rulefmt.RuleGroups, error) {
switch c.FormatVersion {
case RuleFormatV1:
return nil, fmt.Errorf("unsupported rule format version %v", c.FormatVersion)
case RuleFormatV2:
- return c.parseV2Formatted()
+ return c.parseV2Formatted(nameValidationScheme)
default:
return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion)
}
}
-// parseV2 parses and validates the content of the rule files in a RulesConfig
+// parseV2Formatted parses and validates the content of the rule files in a RulesConfig
// according to the Prometheus 2.x rule format.
-func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) {
+func (c RulesConfig) parseV2Formatted(nameValidationScheme model.ValidationScheme) (map[string]rulefmt.RuleGroups, error) {
ruleMap := map[string]rulefmt.RuleGroups{}
for fn, content := range c.Files {
- rgs, errs := rulefmt.Parse([]byte(content), false)
+ rgs, errs := rulefmt.Parse([]byte(content), false, nameValidationScheme)
for _, err := range errs { // return just the first error, if any
return nil, err
}
@@ -283,11 +285,11 @@ func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) {
// would otherwise have to ensure to convert the rulefmt.RuleGroup only exactly
// once, not for every evaluation (or risk losing alert pending states). So
// it's probably better to just return a set of rules.Rule here.
-func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) {
+func (c RulesConfig) parseV2(nameValidationScheme model.ValidationScheme) (map[string][]rules.Rule, error) {
groups := map[string][]rules.Rule{}
for fn, content := range c.Files {
- rgs, errs := rulefmt.Parse([]byte(content), false)
+ rgs, errs := rulefmt.Parse([]byte(content), false, nameValidationScheme)
if len(errs) > 0 {
return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0])
}
diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go
index d17dae574d0..b09038a6c61 100644
--- a/pkg/configs/userconfig/config_test.go
+++ b/pkg/configs/userconfig/config_test.go
@@ -129,7 +129,7 @@ groups:
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
- rules, err := tc.cfg.Parse()
+ rules, err := tc.cfg.Parse(model.UTF8Validation)
if tc.err != nil {
require.Equal(t, err, tc.err)
} else {
@@ -193,7 +193,7 @@ func TestParseFormatted(t *testing.T) {
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
- rules, err := tc.cfg.ParseFormatted()
+ rules, err := tc.cfg.ParseFormatted(model.UTF8Validation)
if tc.err != nil {
require.Equal(t, err, tc.err)
} else {
diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go
index 3b56b6a8fb8..819b430badb 100644
--- a/pkg/cortex/modules.go
+++ b/pkg/cortex/modules.go
@@ -626,7 +626,7 @@ func (t *Cortex) initRulerStorage() (serv services.Service, err error) {
return
}
- t.RulerStorage, err = ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, rules.FileLoader{}, util_log.Logger, prometheus.DefaultRegisterer)
+ t.RulerStorage, err = ruler.NewRuleStore(context.Background(), t.Cfg.RulerStorage, t.Overrides, rules.FileLoader{}, util_log.Logger, prometheus.DefaultRegisterer, t.Cfg.NameValidationScheme)
return
}
@@ -651,6 +651,7 @@ func (t *Cortex) initRuler() (serv services.Service, err error) {
t.Cfg.Ruler.FrontendTimeout = t.Cfg.Querier.Timeout
t.Cfg.Ruler.PrometheusHTTPPrefix = t.Cfg.API.PrometheusHTTPPrefix
t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort
+ t.Cfg.Ruler.NameValidationScheme = t.Cfg.NameValidationScheme
metrics := ruler.NewRuleEvalMetrics(t.Cfg.Ruler, prometheus.DefaultRegisterer)
rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer)
@@ -724,6 +725,7 @@ func (t *Cortex) initConfig() (serv services.Service, err error) {
return
}
+ t.Cfg.Configs.API.NameValidationScheme = t.Cfg.NameValidationScheme
t.ConfigAPI = configAPI.New(t.ConfigDB, t.Cfg.Configs.API)
t.ConfigAPI.RegisterRoutes(t.Server.HTTP)
return services.NewIdleService(nil, func(_ error) error {
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index cd2d742b5ab..e7a00099e05 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -4188,11 +4188,12 @@ func TestDistributor_Push_Relabel(t *testing.T) {
expectedSeries: labels.FromStrings("__name__", "foo", "cluster", "two"),
metricRelabelConfigs: []*relabel.Config{
{
- SourceLabels: []model.LabelName{"cluster"},
- Action: relabel.DefaultRelabelConfig.Action,
- Regex: relabel.DefaultRelabelConfig.Regex,
- TargetLabel: "cluster",
- Replacement: "two",
+ SourceLabels: []model.LabelName{"cluster"},
+ Action: relabel.DefaultRelabelConfig.Action,
+ Regex: relabel.DefaultRelabelConfig.Regex,
+ TargetLabel: "cluster",
+ Replacement: "two",
+ NameValidationScheme: model.LegacyValidation,
},
},
},
@@ -4205,9 +4206,10 @@ func TestDistributor_Push_Relabel(t *testing.T) {
expectedSeries: labels.FromStrings("__name__", "bar", "cluster", "two"),
metricRelabelConfigs: []*relabel.Config{
{
- SourceLabels: []model.LabelName{"__name__"},
- Action: relabel.Drop,
- Regex: relabel.MustNewRegexp("(foo)"),
+ SourceLabels: []model.LabelName{"__name__"},
+ Action: relabel.Drop,
+ Regex: relabel.MustNewRegexp("(foo)"),
+ NameValidationScheme: model.LegacyValidation,
},
},
},
@@ -4375,9 +4377,10 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing
t.Parallel()
metricRelabelConfigs := []*relabel.Config{
{
- SourceLabels: []model.LabelName{"__name__"},
- Action: relabel.Drop,
- Regex: relabel.MustNewRegexp("(foo)"),
+ SourceLabels: []model.LabelName{"__name__"},
+ Action: relabel.Drop,
+ Regex: relabel.MustNewRegexp("(foo)"),
+ NameValidationScheme: model.LegacyValidation,
},
}
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index a93801d523f..badf51146ad 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -2896,7 +2896,6 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) {
OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(),
OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax,
EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks.
- EnableNativeHistograms: true, // Always enable Native Histograms. Gate keeping is done though a per-tenant limit at ingestion.
BlockChunkQuerierFunc: i.blockChunkQuerierFunc(userID),
}, nil)
if err != nil {
diff --git a/pkg/querier/dummy.go b/pkg/querier/dummy.go
index 16dc7388763..11dbefaa637 100644
--- a/pkg/querier/dummy.go
+++ b/pkg/querier/dummy.go
@@ -3,6 +3,7 @@ package querier
import (
"net/url"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
)
@@ -25,6 +26,11 @@ func (DummyTargetRetriever) TargetsDroppedCounts() map[string]int {
return map[string]int{}
}
+// ScrapePoolConfig implements TargetRetriever.
+func (DummyTargetRetriever) ScrapePoolConfig(string) (*config.ScrapeConfig, error) {
+ return nil, nil
+}
+
// DummyAlertmanagerRetriever implements AlertmanagerRetriever.
type DummyAlertmanagerRetriever struct{}
diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go
index daafd709a8d..03b11368d30 100644
--- a/pkg/querier/error_translate_queryable_test.go
+++ b/pkg/querier/error_translate_queryable_test.go
@@ -191,6 +191,8 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine promql.QueryE
false,
5*time.Minute,
false,
+ false,
+ nil,
)
promRouter := route.New().WithPrefix("/api/v1")
diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go
index 0b8d591c2a4..9021faebb75 100644
--- a/pkg/querier/stats_renderer_test.go
+++ b/pkg/querier/stats_renderer_test.go
@@ -93,6 +93,8 @@ func Test_StatsRenderer(t *testing.T) {
false,
5*time.Minute,
false,
+ false,
+ nil,
)
promRouter := route.New().WithPrefix("/api/v1")
diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go
index 86dafc528ed..177782e607b 100644
--- a/pkg/ruler/api.go
+++ b/pkg/ruler/api.go
@@ -142,7 +142,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) {
}
state := strings.ToLower(req.URL.Query().Get("state"))
- if state != "" && state != firingStateFilter && state != pendingStateFilter && state != inactiveStateFilter {
+ if state != "" && state != firingStateFilter && state != pendingStateFilter && state != inactiveStateFilter && state != unknownStateFilter {
util_api.RespondError(logger, w, v1.ErrBadData, fmt.Sprintf("unsupported state value %q", state), http.StatusBadRequest)
return
}
diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go
index 2485f5b927d..d37084a3e47 100644
--- a/pkg/ruler/api_test.go
+++ b/pkg/ruler/api_test.go
@@ -141,7 +141,7 @@ func Test_stripEvaluationFields(t *testing.T) {
"lastEvaluation": "2025-01-24T12:04:26.441418-08:00",
"name": "UP_ALERT",
"query": "up < 1",
- "state": "inactive",
+ "state": "unknown",
"type": "alerting"
}
]
@@ -191,7 +191,7 @@ func Test_stripEvaluationFields(t *testing.T) {
"lastEvaluation": "0001-01-01T00:00:00Z",
"name": "UP_ALERT",
"query": "up < 1",
- "state": "inactive",
+ "state": "unknown",
"type": "alerting"
}
]
@@ -291,7 +291,7 @@ func TestRuler_rules(t *testing.T) {
&alertingRule{
Name: "UP_ALERT",
Query: "up < 1",
- State: "inactive",
+ State: "unknown",
Health: "unknown",
Type: "alerting",
Alerts: []*Alert{},
@@ -350,7 +350,7 @@ func TestRuler_rules_special_characters(t *testing.T) {
&alertingRule{
Name: "UP_ALERT",
Query: "up < 1",
- State: "inactive",
+ State: "unknown",
Health: "unknown",
Type: "alerting",
Alerts: []*Alert{},
@@ -408,7 +408,7 @@ func TestRuler_rules_limit(t *testing.T) {
&alertingRule{
Name: "UP_ALERT",
Query: "up < 1",
- State: "inactive",
+ State: "unknown",
Health: "unknown",
Type: "alerting",
Alerts: []*Alert{},
diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go
index 49d259956ac..d44a0d95829 100644
--- a/pkg/ruler/manager.go
+++ b/pkg/ruler/manager.go
@@ -362,7 +362,7 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string, userManag
}
return resp, err
},
- }, logger, userManagerRegistry, r.notifiersDiscoveryMetrics)
+ }, r.cfg.NameValidationScheme, logger, userManagerRegistry, r.notifiersDiscoveryMetrics)
n.run()
@@ -445,7 +445,7 @@ func (r *DefaultMultiTenantManager) Stop() {
r.userExternalLabels.cleanup()
}
-func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error {
+func (m *DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error {
var errs []error
if g.Name == "" {
@@ -478,7 +478,7 @@ func (*DefaultMultiTenantManager) ValidateRuleGroup(g rulefmt.RuleGroup) []error
}
// Validate other rule fields using Prometheus validation
- for _, err := range r.Validate(rulefmt.RuleNode{}) {
+ for _, err := range r.Validate(rulefmt.RuleNode{}, m.cfg.NameValidationScheme) {
// Skip expression validation errors since we handle them above
if !strings.Contains(err.Error(), "could not parse expression") {
var ruleName string
diff --git a/pkg/ruler/notifier.go b/pkg/ruler/notifier.go
index e1c238f1295..a904946dc0b 100644
--- a/pkg/ruler/notifier.go
+++ b/pkg/ruler/notifier.go
@@ -45,11 +45,11 @@ type rulerNotifier struct {
logger gklog.Logger
}
-func newRulerNotifier(o *notifier.Options, l gklog.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics) *rulerNotifier {
+func newRulerNotifier(o *notifier.Options, nameValidationScheme model.ValidationScheme, l gklog.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics) *rulerNotifier {
sdCtx, sdCancel := context.WithCancel(context.Background())
slogger := util_log.GoKitLogToSlog(l)
return &rulerNotifier{
- notifier: notifier.NewManager(o, slogger),
+ notifier: notifier.NewManager(o, nameValidationScheme, slogger),
sdCancel: sdCancel,
sdManager: discovery.NewManager(sdCtx, slogger, registerer, sdMetrics),
logger: l,
diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go
index a965b33f39d..6135ddb083e 100644
--- a/pkg/ruler/ruler.go
+++ b/pkg/ruler/ruler.go
@@ -20,6 +20,7 @@ import (
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
@@ -84,6 +85,7 @@ const (
firingStateFilter string = "firing"
pendingStateFilter string = "pending"
inactiveStateFilter string = "inactive"
+ unknownStateFilter string = "unknown"
unknownHealthFilter string = "unknown"
okHealthFilter string = "ok"
@@ -177,6 +179,9 @@ type Config struct {
LivenessCheckTimeout time.Duration `yaml:"liveness_check_timeout"`
ThanosEngine engine.ThanosEngineConfig `yaml:"thanos_engine"`
+
+ // NameValidationScheme is the scheme for validating metric and label names (set from root config).
+ NameValidationScheme model.ValidationScheme `yaml:"-"`
}
// Validate config and returns error on failure
diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go
index c2464f3a297..e5738945cb4 100644
--- a/pkg/ruler/ruler_test.go
+++ b/pkg/ruler/ruler_test.go
@@ -26,6 +26,7 @@ import (
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
@@ -82,6 +83,7 @@ func defaultRulerConfig(t testing.TB) Config {
cfg.Ring.FinalSleep = 0
cfg.Ring.ReplicationFactor = 1
cfg.EnableQueryStats = false
+ cfg.NameValidationScheme = model.UTF8Validation
return cfg
}
@@ -902,7 +904,7 @@ func TestGetRules(t *testing.T) {
"No Sharding with Alert state filter for inactive alerts": {
sharding: false,
rulesRequest: RulesRequest{
- State: inactiveStateFilter,
+ State: unknownStateFilter, // Prometheus v2.x uses "unknown" for unevaluated alerts
MaxRuleGroups: -1,
},
rulerStateMap: rulerStateMapAllActive,
@@ -1613,7 +1615,8 @@ func TestGetRulesFromBackup(t *testing.T) {
require.Equal(t, aRule.Rule.Alert, bRule.Rule.Alert)
require.Equal(t, aRule.Rule.For, bRule.Rule.For)
require.Equal(t, aRule.Rule.KeepFiringFor, bRule.Rule.KeepFiringFor)
- require.Equal(t, aRule.State, bRule.State)
+ // Live rules may report "unknown", backup rules use "inactive"; both are valid for unevaluated alerts
+ require.True(t, (aRule.State == bRule.State) || ((aRule.State == "unknown" || aRule.State == "inactive") && (bRule.State == "unknown" || bRule.State == "inactive")), "State: %s vs %s", aRule.State, bRule.State)
require.Equal(t, aRule.Alerts, bRule.Alerts)
} else {
require.Equal(t, aRule.Rule.Record, bRule.Rule.Record)
@@ -1840,7 +1843,8 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) {
require.Equal(t, aRule.Rule.Alert, bRule.Rule.Alert)
require.Equal(t, aRule.Rule.For, bRule.Rule.For)
require.Equal(t, aRule.Rule.KeepFiringFor, bRule.Rule.KeepFiringFor)
- require.Equal(t, aRule.State, bRule.State)
+ // Live rules may report "unknown", backup rules use "inactive"; both are valid for unevaluated alerts
+ require.True(t, (aRule.State == bRule.State) || ((aRule.State == "unknown" || aRule.State == "inactive") && (bRule.State == "unknown" || bRule.State == "inactive")), "State: %s vs %s", aRule.State, bRule.State)
require.Equal(t, aRule.Alerts, bRule.Alerts)
} else {
require.Equal(t, aRule.Rule.Record, bRule.Rule.Record)
diff --git a/pkg/ruler/rulestore/configdb/store.go b/pkg/ruler/rulestore/configdb/store.go
index 50f931f9279..3ef63478e0f 100644
--- a/pkg/ruler/rulestore/configdb/store.go
+++ b/pkg/ruler/rulestore/configdb/store.go
@@ -4,6 +4,8 @@ import (
"context"
"errors"
+ "github.com/prometheus/common/model"
+
"github.com/cortexproject/cortex/pkg/configs/client"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
"github.com/cortexproject/cortex/pkg/ruler/rulespb"
@@ -16,9 +18,10 @@ const (
// ConfigRuleStore is a concrete implementation of RuleStore that sources rules from the config service
type ConfigRuleStore struct {
- configClient client.Client
- since userconfig.ID
- ruleGroupList map[string]rulespb.RuleGroupList
+ configClient client.Client
+ nameValidationScheme model.ValidationScheme
+ since userconfig.ID
+ ruleGroupList map[string]rulespb.RuleGroupList
}
func (c *ConfigRuleStore) SupportsModifications() bool {
@@ -26,11 +29,12 @@ func (c *ConfigRuleStore) SupportsModifications() bool {
}
// NewConfigRuleStore constructs a ConfigRuleStore
-func NewConfigRuleStore(c client.Client) *ConfigRuleStore {
+func NewConfigRuleStore(c client.Client, nameValidationScheme model.ValidationScheme) *ConfigRuleStore {
return &ConfigRuleStore{
- configClient: c,
- since: 0,
- ruleGroupList: make(map[string]rulespb.RuleGroupList),
+ configClient: c,
+ nameValidationScheme: nameValidationScheme,
+ since: 0,
+ ruleGroupList: make(map[string]rulespb.RuleGroupList),
}
}
@@ -60,7 +64,7 @@ func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rul
delete(c.ruleGroupList, user)
continue
}
- rMap, err := cfg.Config.ParseFormatted()
+ rMap, err := cfg.Config.ParseFormatted(c.nameValidationScheme)
if err != nil {
return nil, err
}
diff --git a/pkg/ruler/rulestore/configdb/store_test.go b/pkg/ruler/rulestore/configdb/store_test.go
index 9cec9d1928d..a98a1cfa38b 100644
--- a/pkg/ruler/rulestore/configdb/store_test.go
+++ b/pkg/ruler/rulestore/configdb/store_test.go
@@ -6,11 +6,11 @@ import (
"testing"
time "time"
- "github.com/cortexproject/cortex/pkg/configs/userconfig"
-
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/cortexproject/cortex/pkg/configs/client"
+ "github.com/cortexproject/cortex/pkg/configs/userconfig"
)
var zeroTime time.Time
@@ -34,7 +34,7 @@ func Test_ConfigRuleStoreError(t *testing.T) {
err: fmt.Errorf("Error"),
}
- store := NewConfigRuleStore(mock)
+ store := NewConfigRuleStore(mock, model.UTF8Validation)
_, err := store.ListAllRuleGroups(context.Background())
assert.Equal(t, mock.err, err, "Unexpected error returned")
@@ -53,7 +53,7 @@ func Test_ConfigRuleStoreReturn(t *testing.T) {
err: nil,
}
- store := NewConfigRuleStore(mock)
+ store := NewConfigRuleStore(mock, model.UTF8Validation)
rules, _ := store.ListAllRuleGroups(context.Background())
assert.Equal(t, 1, len(rules["user"]))
@@ -72,7 +72,7 @@ func Test_ConfigRuleStoreDelete(t *testing.T) {
err: nil,
}
- store := NewConfigRuleStore(mock)
+ store := NewConfigRuleStore(mock, model.UTF8Validation)
_, _ = store.ListAllRuleGroups(context.Background())
mock.cfgs["user"] = userconfig.VersionedRulesConfig{
@@ -98,7 +98,7 @@ func Test_ConfigRuleStoreAppend(t *testing.T) {
err: nil,
}
- store := NewConfigRuleStore(mock)
+ store := NewConfigRuleStore(mock, model.UTF8Validation)
_, _ = store.ListAllRuleGroups(context.Background())
delete(mock.cfgs, "user")
@@ -135,7 +135,7 @@ func Test_ConfigRuleStoreSinceSet(t *testing.T) {
err: nil,
}
- store := NewConfigRuleStore(mock)
+ store := NewConfigRuleStore(mock, model.UTF8Validation)
_, _ = store.ListAllRuleGroups(context.Background())
assert.Equal(t, userconfig.ID(100), store.since)
diff --git a/pkg/ruler/rulestore/local/local.go b/pkg/ruler/rulestore/local/local.go
index 88835fcb3ef..b196ad6b3ea 100644
--- a/pkg/ruler/rulestore/local/local.go
+++ b/pkg/ruler/rulestore/local/local.go
@@ -7,6 +7,7 @@ import (
"path/filepath"
"github.com/pkg/errors"
+ "github.com/prometheus/common/model"
promRules "github.com/prometheus/prometheus/rules"
"github.com/cortexproject/cortex/pkg/ruler/rulespb"
@@ -30,18 +31,20 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
//
// cfg.Directory / userID / namespace
type Client struct {
- cfg Config
- loader promRules.GroupLoader
+ cfg Config
+ loader promRules.GroupLoader
+ nameValidationScheme model.ValidationScheme
}
-func NewLocalRulesClient(cfg Config, loader promRules.GroupLoader) (*Client, error) {
+func NewLocalRulesClient(cfg Config, loader promRules.GroupLoader, nameValidationScheme model.ValidationScheme) (*Client, error) {
if cfg.Directory == "" {
return nil, errors.New("directory required for local rules config")
}
return &Client{
- cfg: cfg,
- loader: loader,
+ cfg: cfg,
+ loader: loader,
+ nameValidationScheme: nameValidationScheme,
}, nil
}
@@ -175,7 +178,7 @@ func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (
func (l *Client) loadAllRulesGroupsForUserAndNamespace(_ context.Context, userID string, namespace string) (rulespb.RuleGroupList, error) {
filename := filepath.Join(l.cfg.Directory, userID, namespace)
- rulegroups, allErrors := l.loader.Load(filename, false)
+ rulegroups, allErrors := l.loader.Load(filename, false, l.nameValidationScheme)
if len(allErrors) > 0 {
return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename)
}
diff --git a/pkg/ruler/rulestore/local/local_test.go b/pkg/ruler/rulestore/local/local_test.go
index 19a6b44b6ec..8cd5345d9b6 100644
--- a/pkg/ruler/rulestore/local/local_test.go
+++ b/pkg/ruler/rulestore/local/local_test.go
@@ -66,7 +66,7 @@ func TestClient_LoadAllRuleGroups(t *testing.T) {
client, err := NewLocalRulesClient(Config{
Directory: dir,
- }, promRules.FileLoader{})
+ }, promRules.FileLoader{}, model.UTF8Validation)
require.NoError(t, err)
ctx := context.Background()
diff --git a/pkg/ruler/storage.go b/pkg/ruler/storage.go
index 498b32d71da..1106f38e8d7 100644
--- a/pkg/ruler/storage.go
+++ b/pkg/ruler/storage.go
@@ -5,6 +5,7 @@ import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
promRules "github.com/prometheus/prometheus/rules"
"github.com/cortexproject/cortex/pkg/configs/client"
@@ -16,7 +17,7 @@ import (
)
// NewRuleStore returns a rule store backend client based on the provided cfg.
-func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, loader promRules.GroupLoader, logger log.Logger, reg prometheus.Registerer) (rulestore.RuleStore, error) {
+func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.TenantConfigProvider, loader promRules.GroupLoader, logger log.Logger, reg prometheus.Registerer, nameValidationScheme model.ValidationScheme) (rulestore.RuleStore, error) {
if cfg.Backend == configdb.Name {
c, err := client.New(cfg.ConfigDB)
@@ -24,11 +25,11 @@ func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket.
return nil, err
}
- return configdb.NewConfigRuleStore(c), nil
+ return configdb.NewConfigRuleStore(c, nameValidationScheme), nil
}
if cfg.Backend == local.Name {
- return local.NewLocalRulesClient(cfg.Local, loader)
+ return local.NewLocalRulesClient(cfg.Local, loader, nameValidationScheme)
}
bucketClient, err := bucket.NewClient(ctx, cfg.Config, nil, "ruler-storage", logger, reg)
diff --git a/pkg/util/push/appender.go b/pkg/util/push/appender.go
new file mode 100644
index 00000000000..11d710aba90
--- /dev/null
+++ b/pkg/util/push/appender.go
@@ -0,0 +1,115 @@
+package push
+
+import (
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
+)
+
+// collectingAppender implements prometheusremotewrite.CombinedAppender by
+// accumulating samples, histograms, exemplars and metadata into buffers that
+// can be converted to prompb TimeSeries and MetricMetadata.
+type collectingAppender struct {
+ series map[string]*collectedSeries
+ metadata map[string]prompb.MetricMetadata
+}
+
+type collectedSeries struct {
+ labels labels.Labels
+ samples []prompb.Sample
+ exemplars []prompb.Exemplar
+ histograms []prompb.Histogram
+}
+
+// newCollectingAppender returns a new collectingAppender that implements
+// prometheusremotewrite.CombinedAppender and accumulates all appended data
+// for later retrieval via TimeSeries() and Metadata().
+func newCollectingAppender() *collectingAppender {
+ return &collectingAppender{
+ series: make(map[string]*collectedSeries),
+ metadata: make(map[string]prompb.MetricMetadata),
+ }
+}
+
+func (c *collectingAppender) AppendSample(ls labels.Labels, meta prometheusremotewrite.Metadata, ct, t int64, v float64, es []exemplar.Exemplar) error {
+ c.recordMetadata(meta)
+ s := c.getOrCreateSeries(ls)
+ s.samples = append(s.samples, prompb.Sample{Value: v, Timestamp: t})
+ for _, e := range es {
+ s.exemplars = append(s.exemplars, exemplarToProm(e))
+ }
+ return nil
+}
+
+func (c *collectingAppender) AppendHistogram(ls labels.Labels, meta prometheusremotewrite.Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error {
+ if h == nil {
+ return nil
+ }
+ c.recordMetadata(meta)
+ s := c.getOrCreateSeries(ls)
+ s.histograms = append(s.histograms, prompb.FromIntHistogram(t, h))
+ for _, e := range es {
+ s.exemplars = append(s.exemplars, exemplarToProm(e))
+ }
+ return nil
+}
+
+func (c *collectingAppender) recordMetadata(meta prometheusremotewrite.Metadata) {
+ key := meta.MetricFamilyName
+ if key == "" {
+ return
+ }
+ if _, ok := c.metadata[key]; ok {
+ return
+ }
+ c.metadata[key] = prompb.MetricMetadata{
+ Type: prompb.FromMetadataType(meta.Type),
+ MetricFamilyName: meta.MetricFamilyName,
+ Help: meta.Help,
+ Unit: meta.Unit,
+ }
+}
+
+func (c *collectingAppender) getOrCreateSeries(ls labels.Labels) *collectedSeries {
+ key := ls.String()
+ if s, ok := c.series[key]; ok {
+ return s
+ }
+ s := &collectedSeries{labels: labels.NewBuilder(ls).Labels()}
+ c.series[key] = s
+ return s
+}
+
+func exemplarToProm(e exemplar.Exemplar) prompb.Exemplar {
+ return prompb.Exemplar{
+ Labels: prompb.FromLabels(e.Labels, nil),
+ Value: e.Value,
+ Timestamp: e.Ts,
+ }
+}
+
+// TimeSeries returns the accumulated time series in prompb form.
+func (c *collectingAppender) TimeSeries() []prompb.TimeSeries {
+ out := make([]prompb.TimeSeries, 0, len(c.series))
+ for _, s := range c.series {
+ ts := prompb.TimeSeries{
+ Labels: prompb.FromLabels(s.labels, nil),
+ Samples: s.samples,
+ Exemplars: s.exemplars,
+ Histograms: s.histograms,
+ }
+ out = append(out, ts)
+ }
+ return out
+}
+
+// Metadata returns the accumulated metric metadata in prompb form.
+func (c *collectingAppender) Metadata() []prompb.MetricMetadata {
+ out := make([]prompb.MetricMetadata, 0, len(c.metadata))
+ for _, m := range c.metadata {
+ out = append(out, m)
+ }
+ return out
+}
diff --git a/pkg/util/push/appender_test.go b/pkg/util/push/appender_test.go
new file mode 100644
index 00000000000..e47ee7eebbe
--- /dev/null
+++ b/pkg/util/push/appender_test.go
@@ -0,0 +1,231 @@
+package push
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCollectingAppender_AppendSample_SingleSeries(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "cpu_usage", "job", "test")
+
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeGauge, Help: "CPU usage", Unit: "percent"},
+ MetricFamilyName: "cpu_usage",
+ }
+
+ err := c.AppendSample(ls, meta, 0, 1000, 42.5, nil)
+ require.NoError(t, err)
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 1)
+ assert.Equal(t, []prompb.Label{{Name: "__name__", Value: "cpu_usage"}, {Name: "job", Value: "test"}}, ts[0].Labels)
+ require.Len(t, ts[0].Samples, 1)
+ assert.Equal(t, 42.5, ts[0].Samples[0].Value)
+ assert.Equal(t, int64(1000), ts[0].Samples[0].Timestamp)
+ assert.Nil(t, ts[0].Exemplars)
+ assert.Nil(t, ts[0].Histograms)
+
+ md := c.Metadata()
+ require.Len(t, md, 1)
+ assert.Equal(t, prompb.MetricMetadata_GAUGE, md[0].Type)
+ assert.Equal(t, "cpu_usage", md[0].MetricFamilyName)
+ assert.Equal(t, "CPU usage", md[0].Help)
+ assert.Equal(t, "percent", md[0].Unit)
+}
+
+func TestCollectingAppender_AppendSample_MultipleSamplesSameSeries(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "requests_total")
+
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeCounter},
+ MetricFamilyName: "requests_total",
+ }
+
+ require.NoError(t, c.AppendSample(ls, meta, 0, 1000, 1, nil))
+ require.NoError(t, c.AppendSample(ls, meta, 0, 2000, 2, nil))
+ require.NoError(t, c.AppendSample(ls, meta, 0, 3000, 3, nil))
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 1)
+ require.Len(t, ts[0].Samples, 3)
+ assert.Equal(t, []prompb.Sample{
+ {Value: 1, Timestamp: 1000},
+ {Value: 2, Timestamp: 2000},
+ {Value: 3, Timestamp: 3000},
+ }, ts[0].Samples)
+}
+
+func TestCollectingAppender_AppendSample_MultipleSeries(t *testing.T) {
+ c := newCollectingAppender()
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeGauge},
+ MetricFamilyName: "metric",
+ }
+
+ require.NoError(t, c.AppendSample(labels.FromStrings("__name__", "metric", "a", "1"), meta, 0, 1000, 10, nil))
+ require.NoError(t, c.AppendSample(labels.FromStrings("__name__", "metric", "a", "2"), meta, 0, 1000, 20, nil))
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 2)
+ // Order is map iteration order; just check we have both label sets and values.
+ names := make(map[string]float64)
+ for _, s := range ts {
+ var a string
+ for _, l := range s.Labels {
+ if l.Name == "a" {
+ a = l.Value
+ break
+ }
+ }
+ require.Len(t, s.Samples, 1)
+ names[a] = s.Samples[0].Value
+ }
+ assert.Equal(t, 10.0, names["1"])
+ assert.Equal(t, 20.0, names["2"])
+}
+
+func TestCollectingAppender_AppendSample_WithExemplars(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "latency")
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeHistogram},
+ MetricFamilyName: "latency",
+ }
+ exemplars := []exemplar.Exemplar{
+ {Labels: labels.FromStrings("trace_id", "abc"), Value: 0.5, Ts: 1001, HasTs: true},
+ }
+
+ err := c.AppendSample(ls, meta, 0, 1000, 0.42, exemplars)
+ require.NoError(t, err)
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 1)
+ require.Len(t, ts[0].Exemplars, 1)
+ assert.Equal(t, []prompb.Label{{Name: "trace_id", Value: "abc"}}, ts[0].Exemplars[0].Labels)
+ assert.Equal(t, 0.5, ts[0].Exemplars[0].Value)
+ assert.Equal(t, int64(1001), ts[0].Exemplars[0].Timestamp)
+}
+
+func TestCollectingAppender_AppendHistogram(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "request_duration_seconds")
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeHistogram, Help: "Request latency", Unit: "seconds"},
+ MetricFamilyName: "request_duration_seconds",
+ }
+
+ h := &histogram.Histogram{
+ CounterResetHint: histogram.UnknownCounterReset,
+ Schema: 0,
+ ZeroThreshold: 0.001,
+ ZeroCount: 0,
+ Count: 10,
+ Sum: 1.5,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
+ PositiveBuckets: []int64{5, 3},
+ NegativeSpans: nil,
+ NegativeBuckets: nil,
+ }
+
+ err := c.AppendHistogram(ls, meta, 0, 2000, h, nil)
+ require.NoError(t, err)
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 1)
+ require.Len(t, ts[0].Histograms, 1)
+ assert.Equal(t, int64(2000), ts[0].Histograms[0].Timestamp)
+ assert.Equal(t, 1.5, ts[0].Histograms[0].Sum)
+ assert.Equal(t, int32(0), ts[0].Histograms[0].Schema)
+ assert.Nil(t, ts[0].Samples)
+
+ md := c.Metadata()
+ require.Len(t, md, 1)
+ assert.Equal(t, prompb.MetricMetadata_HISTOGRAM, md[0].Type)
+ assert.Equal(t, "request_duration_seconds", md[0].MetricFamilyName)
+}
+
+func TestCollectingAppender_AppendHistogram_NilHistogramIgnored(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "x")
+ meta := prometheusremotewrite.Metadata{MetricFamilyName: "x"}
+
+ err := c.AppendHistogram(ls, meta, 0, 1000, nil, nil)
+ require.NoError(t, err)
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 0)
+}
+
+func TestCollectingAppender_Metadata_EmptyMetricFamilyNameIgnored(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "y")
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeGauge},
+ MetricFamilyName: "",
+ }
+
+ err := c.AppendSample(ls, meta, 0, 1000, 1, nil)
+ require.NoError(t, err)
+
+ md := c.Metadata()
+ assert.Len(t, md, 0)
+}
+
+func TestCollectingAppender_Metadata_DeduplicatedByFamilyName(t *testing.T) {
+ c := newCollectingAppender()
+ meta := prometheusremotewrite.Metadata{
+ Metadata: metadata.Metadata{Type: model.MetricTypeCounter, Help: "help", Unit: "bytes"},
+ MetricFamilyName: "http_requests_total",
+ }
+
+ require.NoError(t, c.AppendSample(labels.FromStrings("__name__", "http_requests_total", "method", "GET"), meta, 0, 1000, 1, nil))
+ require.NoError(t, c.AppendSample(labels.FromStrings("__name__", "http_requests_total", "method", "POST"), meta, 0, 1000, 1, nil))
+
+ md := c.Metadata()
+ require.Len(t, md, 1)
+ assert.Equal(t, "http_requests_total", md[0].MetricFamilyName)
+}
+
+func TestCollectingAppender_TimeSeries_EmptyInitially(t *testing.T) {
+ c := newCollectingAppender()
+ assert.Empty(t, c.TimeSeries())
+ assert.Empty(t, c.Metadata())
+}
+
+func TestCollectingAppender_MixedSamplesAndHistograms(t *testing.T) {
+ c := newCollectingAppender()
+ ls := labels.FromStrings("__name__", "mixed")
+ meta := prometheusremotewrite.Metadata{MetricFamilyName: "mixed", Metadata: metadata.Metadata{Type: model.MetricTypeGauge}}
+
+ require.NoError(t, c.AppendSample(ls, meta, 0, 1000, 1.0, nil))
+
+ h := &histogram.Histogram{
+ CounterResetHint: histogram.GaugeType,
+ Schema: -1,
+ ZeroThreshold: 0,
+ ZeroCount: 0,
+ Count: 1,
+ Sum: 2.0,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []int64{1},
+ }
+ require.NoError(t, c.AppendHistogram(ls, meta, 0, 2000, h, nil))
+
+ ts := c.TimeSeries()
+ require.Len(t, ts, 1)
+ require.Len(t, ts[0].Samples, 1)
+ require.Len(t, ts[0].Histograms, 1)
+ assert.Equal(t, 1.0, ts[0].Samples[0].Value)
+ assert.Equal(t, 2.0, ts[0].Histograms[0].Sum)
+}
diff --git a/pkg/util/push/otlp.go b/pkg/util/push/otlp.go
index e81b18515d1..664c904f756 100644
--- a/pkg/util/push/otlp.go
+++ b/pkg/util/push/otlp.go
@@ -176,7 +176,8 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) (
}
func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distributor.OTLPConfig, overrides *validation.Overrides, userID string, logger log.Logger) ([]prompb.TimeSeries, []prompb.MetricMetadata, error) {
- promConverter := prometheusremotewrite.NewPrometheusConverter()
+ collector := newCollectingAppender()
+ promConverter := prometheusremotewrite.NewPrometheusConverter(collector)
settings := prometheusremotewrite.Settings{
AddMetricSuffixes: true,
DisableTargetInfo: cfg.DisableTargetInfo,
@@ -205,7 +206,7 @@ func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distribu
level.Warn(logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
}
- return promConverter.TimeSeries(), promConverter.Metadata(), err
+ return collector.TimeSeries(), collector.Metadata(), err
}
func makeLabels(in []prompb.Label) []cortexpb.LabelAdapter {
diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go
index 67dc70f8b0e..ea4887ce33b 100644
--- a/pkg/util/push/otlp_test.go
+++ b/pkg/util/push/otlp_test.go
@@ -248,7 +248,7 @@ func TestOTLP_AllowDeltaTemporality(t *testing.T) {
overrides := validation.NewOverrides(limits, nil)
promSeries, metadata, err := convertToPromTS(ctx, metrics, cfg, overrides, "user-1", logger)
require.Equal(t, sortTimeSeries(test.expectedSeries), sortTimeSeries(promSeries))
- require.Equal(t, test.expectedMetadata, metadata)
+ require.ElementsMatch(t, test.expectedMetadata, metadata)
if test.expectedErr != "" {
require.Equal(t, test.expectedErr, err.Error())
} else {
@@ -340,6 +340,8 @@ func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHin
Schema: 0,
ZeroThreshold: 1e-128,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
+ NegativeSpans: []prompb.BucketSpan{},
+ PositiveSpans: []prompb.BucketSpan{},
Timestamp: ts.UnixMilli(),
ResetHint: hint,
},
@@ -519,11 +521,23 @@ func TestOTLPConvertToPromTS(t *testing.T) {
tsList, metadata, err := convertToPromTS(ctx, d, test.cfg, overrides, "user-1", logger)
require.NoError(t, err)
- // test metadata conversion
- require.Equal(t, 1, len(metadata))
- require.Equal(t, prompb.MetricMetadata_MetricType(1), metadata[0].Type)
- require.Equal(t, "test_counter_total", metadata[0].MetricFamilyName)
- require.Equal(t, "test-counter-description", metadata[0].Help)
+ // test metadata conversion (counter + optionally target_info)
+ expectedMetadataLen := 1
+ if !test.cfg.DisableTargetInfo {
+ expectedMetadataLen = 2 // test_counter_total + target_info
+ }
+ require.Equal(t, expectedMetadataLen, len(metadata))
+ var counterMetadata *prompb.MetricMetadata
+ for i := range metadata {
+ if metadata[i].MetricFamilyName == "test_counter_total" {
+ counterMetadata = &metadata[i]
+ break
+ }
+ }
+ require.NotNil(t, counterMetadata)
+ require.Equal(t, prompb.MetricMetadata_MetricType(1), counterMetadata.Type)
+ require.Equal(t, "test_counter_total", counterMetadata.MetricFamilyName)
+ require.Equal(t, "test-counter-description", counterMetadata.Help)
if test.cfg.DisableTargetInfo {
require.Equal(t, 1, len(tsList)) // test_counter_total
diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go
index b1314c882af..2f14b5cab8c 100644
--- a/pkg/util/validation/limits.go
+++ b/pkg/util/validation/limits.go
@@ -32,6 +32,7 @@ var errCompilingQueryPriorityRegex = errors.New("error compiling query priority
var errDuplicatePerLabelSetLimit = errors.New("duplicate per labelSet limits found. Make sure they are all unique")
var errInvalidLabelName = errors.New("invalid label name")
var errInvalidLabelValue = errors.New("invalid label value")
+var errInvalidMetricRelabelConfigs = errors.New("invalid metric_relabel_configs")
// Supported values for enum limits
const (
@@ -400,6 +401,17 @@ func (l *Limits) Validate(nameValidationScheme model.ValidationScheme, shardByAl
return err
}
+ for i, rc := range l.MetricRelabelConfigs {
+ if rc == nil {
+ level.Error(util_log.Logger).Log("msg", "invalid metric_relabel_configs", "index", i, "err", "nil config")
+ return errInvalidMetricRelabelConfigs
+ }
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ level.Error(util_log.Logger).Log("msg", "invalid metric_relabel_configs", "index", i, "err", err)
+ return errInvalidMetricRelabelConfigs
+ }
+ }
+
return nil
}
diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go
index 3e938c0ea00..807b3d8e2b4 100644
--- a/pkg/util/validation/limits_test.go
+++ b/pkg/util/validation/limits_test.go
@@ -139,6 +139,53 @@ func TestLimits_Validate(t *testing.T) {
expected: errInvalidLabelValue,
nameValidationScheme: model.UTF8Validation,
},
+ "metric_relabel_configs nil entry": {
+ limits: Limits{
+ MetricRelabelConfigs: []*relabel.Config{nil},
+ },
+ expected: errInvalidMetricRelabelConfigs,
+ },
+ "metric_relabel_configs valid config": {
+ limits: Limits{
+ MetricRelabelConfigs: []*relabel.Config{
+ {
+ SourceLabels: []model.LabelName{"__name__"},
+ Action: relabel.Drop,
+ Regex: relabel.MustNewRegexp("(foo)"),
+ NameValidationScheme: model.LegacyValidation,
+ },
+ },
+ },
+ expected: nil,
+ },
+ "metric_relabel_configs invalid config empty action": {
+ limits: Limits{
+ MetricRelabelConfigs: []*relabel.Config{
+ {
+ SourceLabels: []model.LabelName{"__name__"},
+ Action: "",
+ Regex: relabel.DefaultRelabelConfig.Regex,
+ NameValidationScheme: model.LegacyValidation,
+ },
+ },
+ },
+ expected: errInvalidMetricRelabelConfigs,
+ },
+ "metric_relabel_configs invalid target_label for legacy": {
+ limits: Limits{
+ MetricRelabelConfigs: []*relabel.Config{
+ {
+ SourceLabels: []model.LabelName{"cluster"},
+ Action: relabel.Replace,
+ Regex: relabel.DefaultRelabelConfig.Regex,
+ TargetLabel: "invalid-label-with-dash",
+ Replacement: "x",
+ NameValidationScheme: model.LegacyValidation,
+ },
+ },
+ },
+ expected: errInvalidMetricRelabelConfigs,
+ },
}
for testName, testData := range tests {
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 66131916eb7..4deca443534 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,34 @@
+## [0.17.0](https://github.com/googleapis/google-cloud-go/releases/tag/auth%2Fv0.17.0) (2025-10-02)
+
+### Features
+
+* Add trust boundary support for service accounts and impersonation (HTTP/gRPC) (#11870) ([5c2b665](https://github.com/googleapis/google-cloud-go/commit/5c2b665f392e6dd90192f107188720aa1357e7da))
+* add trust boundary support for external accounts (#12864) ([a67a146](https://github.com/googleapis/google-cloud-go/commit/a67a146a6a88a6f1ba10c409dfce8015ecd60a64))
+
# Changelog
+## [0.16.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.4...auth/v0.16.5) (2025-08-14)
+
+
+### Bug Fixes
+
+* **auth:** Improve error message for unknown credentials type ([#12673](https://github.com/googleapis/google-cloud-go/issues/12673)) ([558b164](https://github.com/googleapis/google-cloud-go/commit/558b16429f621276694405fa5f2091199f2d4c4d))
+* **auth:** Set Content-Type in userTokenProvider.exchangeToken ([#12634](https://github.com/googleapis/google-cloud-go/issues/12634)) ([1197ebc](https://github.com/googleapis/google-cloud-go/commit/1197ebcbca491f8c610da732c7361c90bc6f46d0))
+
+## [0.16.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.3...auth/v0.16.4) (2025-08-06)
+
+
+### Bug Fixes
+
+* **auth:** Add UseDefaultClient: true to metadata.Options ([#12666](https://github.com/googleapis/google-cloud-go/issues/12666)) ([1482191](https://github.com/googleapis/google-cloud-go/commit/1482191e88236693efef68769752638281566766)), refs [#11078](https://github.com/googleapis/google-cloud-go/issues/11078) [#12657](https://github.com/googleapis/google-cloud-go/issues/12657)
+
+## [0.16.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.2...auth/v0.16.3) (2025-07-17)
+
+
+### Bug Fixes
+
+* **auth:** Fix race condition in cachedTokenProvider.tokenAsync ([#12586](https://github.com/googleapis/google-cloud-go/issues/12586)) ([73867cc](https://github.com/googleapis/google-cloud-go/commit/73867ccc1e9808d65361bcfc0776bd95fe34dbb3))
+
## [0.16.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.1...auth/v0.16.2) (2025-06-04)
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
index cd5e9886848..c6d80158331 100644
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -362,9 +362,6 @@ func (c *cachedTokenProvider) tokenState() tokenState {
// blocking call to Token should likely return the same error on the main goroutine.
func (c *cachedTokenProvider) tokenAsync(ctx context.Context) {
fn := func() {
- c.mu.Lock()
- c.isRefreshRunning = true
- c.mu.Unlock()
t, err := c.tp.Token(ctx)
c.mu.Lock()
defer c.mu.Unlock()
@@ -380,6 +377,7 @@ func (c *cachedTokenProvider) tokenAsync(ctx context.Context) {
c.mu.Lock()
defer c.mu.Unlock()
if !c.isRefreshRunning && !c.isRefreshErr {
+ c.isRefreshRunning = true
go fn()
}
}
@@ -485,6 +483,8 @@ type Options2LO struct {
Audience string
// PrivateClaims allows specifying any custom claims for the JWT. Optional.
PrivateClaims map[string]interface{}
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ UniverseDomain string
// Client is the client to be used to make the underlying token requests.
// Optional.
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
index e4a8078f8b4..a2d5c310a41 100644
--- a/vendor/cloud.google.com/go/auth/credentials/compute.go
+++ b/vendor/cloud.google.com/go/auth/credentials/compute.go
@@ -92,11 +92,11 @@ func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
if res.ExpiresInSec == 0 || res.AccessToken == "" {
return nil, errors.New("credentials: incomplete token received from metadata")
}
- return &auth.Token{
+ token := &auth.Token{
Value: res.AccessToken,
Type: res.TokenType,
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
Metadata: computeTokenMetadata,
- }, nil
-
+ }
+ return token, nil
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
index d8f7d961466..6700e33e148 100644
--- a/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -27,6 +27,7 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/trustboundary"
"cloud.google.com/go/compute/metadata"
"github.com/googleapis/gax-go/v2/internallog"
)
@@ -95,6 +96,10 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if err := opts.validate(); err != nil {
return nil, err
}
+ trustBoundaryEnabled, err := trustboundary.IsEnabled()
+ if err != nil {
+ return nil, err
+ }
if len(opts.CredentialsJSON) > 0 {
return readCredentialsFileJSON(opts.CredentialsJSON, opts)
}
@@ -116,16 +121,29 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if OnGCE() {
metadataClient := metadata.NewWithOptions(&metadata.Options{
- Logger: opts.logger(),
+ Logger: opts.logger(),
+ UseDefaultClient: true,
})
+ gceUniverseDomainProvider := &internal.ComputeUniverseDomainProvider{
+ MetadataClient: metadataClient,
+ }
+
+ tp := computeTokenProvider(opts, metadataClient)
+ if trustBoundaryEnabled {
+ gceConfigProvider := trustboundary.NewGCEConfigProvider(gceUniverseDomainProvider)
+ var err error
+ tp, err = trustboundary.NewProvider(opts.client(), gceConfigProvider, opts.logger(), tp)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to initialize GCE trust boundary provider: %w", err)
+ }
+
+ }
return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: computeTokenProvider(opts, metadataClient),
+ TokenProvider: tp,
ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return metadataClient.ProjectIDWithContext(ctx)
}),
- UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
- MetadataClient: metadataClient,
- },
+ UniverseDomainProvider: gceUniverseDomainProvider,
}), nil
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index e5243e6cfbe..d2a04247023 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -25,6 +25,7 @@ import (
"cloud.google.com/go/auth/credentials/internal/impersonate"
internalauth "cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/trustboundary"
)
func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
@@ -36,6 +37,8 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
var projectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
+ case credsfile.UnknownCredType:
+ return nil, errors.New("credentials: unsupported unidentified file type")
case credsfile.ServiceAccountKey:
f, err := credsfile.ParseServiceAccount(b)
if err != nil {
@@ -134,19 +137,34 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions)
return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
- Email: f.ClientEmail,
- PrivateKey: []byte(f.PrivateKey),
- PrivateKeyID: f.PrivateKeyID,
- Scopes: opts.scopes(),
- TokenURL: f.TokenURL,
- Subject: opts.Subject,
- Client: opts.client(),
- Logger: opts.logger(),
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: opts.scopes(),
+ TokenURL: f.TokenURL,
+ Subject: opts.Subject,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ UniverseDomain: ud,
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
}
- return auth.New2LOTokenProvider(opts2LO)
+
+ tp, err := auth.New2LOTokenProvider(opts2LO)
+ if err != nil {
+ return nil, err
+ }
+
+ trustBoundaryEnabled, err := trustboundary.IsEnabled()
+ if err != nil {
+ return nil, err
+ }
+ if !trustBoundaryEnabled {
+ return tp, nil
+ }
+ saConfig := trustboundary.NewServiceAccountConfigProvider(opts2LO.Email, opts2LO.UniverseDomain)
+ return trustboundary.NewProvider(opts.client(), saConfig, opts.logger(), tp)
}
func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
@@ -185,7 +203,39 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions
if f.ServiceAccountImpersonation != nil {
externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
}
- return externalaccount.NewTokenProvider(externalOpts)
+ tp, err := externalaccount.NewTokenProvider(externalOpts)
+ if err != nil {
+ return nil, err
+ }
+ trustBoundaryEnabled, err := trustboundary.IsEnabled()
+ if err != nil {
+ return nil, err
+ }
+ if !trustBoundaryEnabled {
+ return tp, nil
+ }
+
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ var configProvider trustboundary.ConfigProvider
+
+ if f.ServiceAccountImpersonationURL == "" {
+ // No impersonation, this is a direct external account credential.
+ // The trust boundary is based on the workload/workforce pool.
+ var err error
+ configProvider, err = trustboundary.NewExternalAccountConfigProvider(f.Audience, ud)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // Impersonation is used. The trust boundary is based on the target service account.
+ targetSAEmail, err := impersonate.ExtractServiceAccountEmail(f.ServiceAccountImpersonationURL)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not extract target service account email for trust boundary: %w", err)
+ }
+ configProvider = trustboundary.NewServiceAccountConfigProvider(targetSAEmail, ud)
+ }
+
+ return trustboundary.NewProvider(opts.client(), configProvider, opts.logger(), tp)
}
func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
@@ -200,7 +250,24 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU
Client: opts.client(),
Logger: opts.logger(),
}
- return externalaccountuser.NewTokenProvider(externalOpts)
+ tp, err := externalaccountuser.NewTokenProvider(externalOpts)
+ if err != nil {
+ return nil, err
+ }
+ trustBoundaryEnabled, err := trustboundary.IsEnabled()
+ if err != nil {
+ return nil, err
+ }
+ if !trustBoundaryEnabled {
+ return tp, nil
+ }
+
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ configProvider, err := trustboundary.NewExternalAccountConfigProvider(f.Audience, ud)
+ if err != nil {
+ return nil, err
+ }
+ return trustboundary.NewProvider(opts.client(), configProvider, opts.logger(), tp)
}
func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
@@ -208,20 +275,38 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil
return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
}
- tp, err := fileCredentials(f.CredSource, opts)
+ sourceTP, err := fileCredentials(f.CredSource, opts)
if err != nil {
return nil, err
}
- return impersonate.NewTokenProvider(&impersonate.Options{
- URL: f.ServiceAccountImpersonationURL,
- Scopes: opts.scopes(),
- Tp: tp,
- Delegates: f.Delegates,
- Client: opts.client(),
- Logger: opts.logger(),
- })
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ impOpts := &impersonate.Options{
+ URL: f.ServiceAccountImpersonationURL,
+ Scopes: opts.scopes(),
+ Tp: sourceTP,
+ Delegates: f.Delegates,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ UniverseDomain: ud,
+ }
+ tp, err := impersonate.NewTokenProvider(impOpts)
+ if err != nil {
+ return nil, err
+ }
+ trustBoundaryEnabled, err := trustboundary.IsEnabled()
+ if err != nil {
+ return nil, err
+ }
+ if !trustBoundaryEnabled {
+ return tp, nil
+ }
+ targetSAEmail, err := impersonate.ExtractServiceAccountEmail(f.ServiceAccountImpersonationURL)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not extract target service account email for trust boundary: %w", err)
+ }
+ targetSAConfig := trustboundary.NewServiceAccountConfigProvider(targetSAEmail, ud)
+ return trustboundary.NewProvider(opts.client(), targetSAConfig, opts.logger(), tp)
}
-
func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
return gdch.NewTokenProvider(f, &gdch.Options{
STSAudience: opts.STSAudience,
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
index b3a99261fa9..8253376ef8d 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
@@ -22,10 +22,12 @@ import (
"fmt"
"log/slog"
"net/http"
+ "regexp"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport/headers"
"github.com/googleapis/gax-go/v2/internallog"
)
@@ -34,6 +36,8 @@ const (
authHeaderKey = "Authorization"
)
+var serviceAccountEmailRegex = regexp.MustCompile(`serviceAccounts/(.+?):generateAccessToken`)
+
// generateAccesstokenReq is used for service account impersonation
type generateAccessTokenReq struct {
Delegates []string `json:"delegates,omitempty"`
@@ -81,6 +85,8 @@ type Options struct {
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ UniverseDomain string
}
func (o *Options) validate() error {
@@ -114,9 +120,11 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
- if err := setAuthHeader(ctx, o.Tp, req); err != nil {
+ sourceToken, err := o.Tp.Token(ctx)
+ if err != nil {
return nil, err
}
+ headers.SetAuthHeader(sourceToken, req)
logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
resp, body, err := internal.DoRequest(o.Client, req)
if err != nil {
@@ -135,22 +143,26 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
if err != nil {
return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
}
- return &auth.Token{
+ token := &auth.Token{
Value: accessTokenResp.AccessToken,
Expiry: expiry,
Type: internal.TokenTypeBearer,
- }, nil
+ }
+ return token, nil
}
-func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
- t, err := tp.Token(ctx)
- if err != nil {
- return err
- }
- typ := t.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
+// ExtractServiceAccountEmail extracts the service account email from the impersonation URL.
+// The impersonation URL is expected to be in the format:
+// https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}:generateAccessToken
+// or
+// https://iamcredentials.googleapis.com/v1/projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}:generateAccessToken
+// Returns an error if the email cannot be extracted.
+func ExtractServiceAccountEmail(impersonationURL string) (string, error) {
+ matches := serviceAccountEmailRegex.FindStringSubmatch(impersonationURL)
+
+ if len(matches) < 2 {
+ return "", fmt.Errorf("credentials: invalid impersonation URL format: %s", impersonationURL)
}
- r.Header.Set(authHeaderKey, typ+" "+t.Value)
- return nil
+
+ return matches[1], nil
}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 834aef41c87..6bcd3ef5459 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -30,6 +30,7 @@ import (
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
+ "cloud.google.com/go/auth/internal/transport/headers"
"github.com/googleapis/gax-go/v2/internallog"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
@@ -428,23 +429,13 @@ func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ..
}
}
metadata := make(map[string]string, len(c.metadata)+1)
- setAuthMetadata(token, metadata)
+ headers.SetAuthMetadata(token, metadata)
for k, v := range c.metadata {
metadata[k] = v
}
return metadata, nil
}
-// setAuthMetadata uses the provided token to set the Authorization metadata.
-// If the token.Type is empty, the type is assumed to be Bearer.
-func setAuthMetadata(token *auth.Token, m map[string]string) {
- typ := token.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- m["authorization"] = typ + " " + token.Value
-}
-
func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
return c.secure
}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
index 5758e85b5db..c9126535d72 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -25,8 +25,8 @@ import (
"cloud.google.com/go/auth"
detect "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
+ "cloud.google.com/go/auth/internal/transport/headers"
"github.com/googleapis/gax-go/v2/internallog"
)
@@ -236,12 +236,10 @@ func NewClient(opts *Options) (*http.Client, error) {
}, nil
}
-// SetAuthHeader uses the provided token to set the Authorization header on a
-// request. If the token.Type is empty, the type is assumed to be Bearer.
+// SetAuthHeader uses the provided token to set the Authorization and trust
+// boundary headers on an http.Request. If the token.Type is empty, the type is
+// assumed to be Bearer. This is the recommended way to set authorization
+// headers on a custom http.Request.
func SetAuthHeader(token *auth.Token, req *http.Request) {
- typ := token.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- req.Header.Set("Authorization", typ+" "+token.Value)
+ headers.SetAuthHeader(token, req)
}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
index ee215b6dc6c..3feb997c76d 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -27,6 +27,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
+ "cloud.google.com/go/auth/internal/transport/headers"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
@@ -228,7 +229,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
}
}
req2 := req.Clone(req.Context())
- SetAuthHeader(token, req2)
+ headers.SetAuthHeader(token, req2)
reqBodyClosed = true
return t.base.RoundTrip(req2)
}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
index 6a8eab6eb99..72a8a6b7a53 100644
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -47,6 +47,12 @@ const (
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
DefaultUniverseDomain = "googleapis.com"
+
+ // TrustBoundaryNoOp is a constant indicating no trust boundary is enforced.
+ TrustBoundaryNoOp = "0x0"
+
+ // TrustBoundaryDataKey is the key used to store trust boundary data in a token's metadata.
+ TrustBoundaryDataKey = "google.auth.trust_boundary_data"
)
type clonableTransport interface {
@@ -223,3 +229,56 @@ func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (st
func FormatIAMServiceAccountResource(name string) string {
return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
}
+
+// TrustBoundaryData represents the trust boundary data associated with a token.
+// It contains information about the regions or environments where the token is valid.
+type TrustBoundaryData struct {
+ // Locations is the list of locations that the token is allowed to be used in.
+ Locations []string
+ // EncodedLocations represents the locations in an encoded format.
+ EncodedLocations string
+}
+
+// NewTrustBoundaryData returns a new TrustBoundaryData with the specified locations and encoded locations.
+func NewTrustBoundaryData(locations []string, encodedLocations string) *TrustBoundaryData {
+ // Ensure consistency by treating a nil slice as an empty slice.
+ if locations == nil {
+ locations = []string{}
+ }
+ locationsCopy := make([]string, len(locations))
+ copy(locationsCopy, locations)
+ return &TrustBoundaryData{
+ Locations: locationsCopy,
+ EncodedLocations: encodedLocations,
+ }
+}
+
+// NewNoOpTrustBoundaryData returns a new TrustBoundaryData with no restrictions.
+func NewNoOpTrustBoundaryData() *TrustBoundaryData {
+ return &TrustBoundaryData{
+ Locations: []string{},
+ EncodedLocations: TrustBoundaryNoOp,
+ }
+}
+
+// TrustBoundaryHeader returns the value for the x-allowed-locations header and a bool
+// indicating if the header should be set. The return values are structured to
+// handle three distinct states required by the backend:
+// 1. Header not set: (value="", present=false) -> data is empty.
+// 2. Header set to an empty string: (value="", present=true) -> data is a no-op.
+// 3. Header set to a value: (value="...", present=true) -> data has locations.
+func (t TrustBoundaryData) TrustBoundaryHeader() (value string, present bool) {
+ if t.EncodedLocations == "" {
+ // If the data is empty, the header should not be present.
+ return "", false
+ }
+
+ // If data is not empty, the header should always be present.
+ present = true
+ value = ""
+ if t.EncodedLocations != TrustBoundaryNoOp {
+ value = t.EncodedLocations
+ }
+ // For a no-op, the backend requires an empty string.
+ return value, present
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/retry/retry.go b/vendor/cloud.google.com/go/auth/internal/retry/retry.go
new file mode 100644
index 00000000000..276cc4a3e26
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/retry/retry.go
@@ -0,0 +1,117 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retry
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "net/http"
+ "time"
+)
+
+const (
+ maxRetryAttempts = 5
+)
+
+var (
+ syscallRetryable = func(error) bool { return false }
+)
+
+// defaultBackoff is basically equivalent to gax.Backoff without the need for
+// the dependency.
+type defaultBackoff struct {
+ max time.Duration
+ mul float64
+ cur time.Duration
+}
+
+func (b *defaultBackoff) Pause() time.Duration {
+ d := time.Duration(1 + rand.Int63n(int64(b.cur)))
+ b.cur = time.Duration(float64(b.cur) * b.mul)
+ if b.cur > b.max {
+ b.cur = b.max
+ }
+ return d
+}
+
+// Sleep is the equivalent of gax.Sleep without the need for the dependency.
+func Sleep(ctx context.Context, d time.Duration) error {
+ t := time.NewTimer(d)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ case <-t.C:
+ return nil
+ }
+}
+
+// New returns a new Retryer with the default backoff strategy.
+func New() *Retryer {
+ return &Retryer{bo: &defaultBackoff{
+ cur: 100 * time.Millisecond,
+ max: 30 * time.Second,
+ mul: 2,
+ }}
+}
+
+type backoff interface {
+ Pause() time.Duration
+}
+
+// Retryer is a retryer for HTTP requests.
+type Retryer struct {
+ bo backoff
+ attempts int
+}
+
+// Retry determines if a request should be retried.
+func (r *Retryer) Retry(status int, err error) (time.Duration, bool) {
+ if status == http.StatusOK {
+ return 0, false
+ }
+ retryOk := shouldRetry(status, err)
+ if !retryOk {
+ return 0, false
+ }
+ if r.attempts == maxRetryAttempts {
+ return 0, false
+ }
+ r.attempts++
+ return r.bo.Pause(), true
+}
+
+func shouldRetry(status int, err error) bool {
+ if 500 <= status && status <= 599 {
+ return true
+ }
+ if err == io.ErrUnexpectedEOF {
+ return true
+ }
+ // Transient network errors should be retried.
+ if syscallRetryable(err) {
+ return true
+ }
+ if err, ok := err.(interface{ Temporary() bool }); ok {
+ if err.Temporary() {
+ return true
+ }
+ }
+ if err, ok := err.(interface{ Unwrap() error }); ok {
+ return shouldRetry(status, err.Unwrap())
+ }
+ return false
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go b/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go
new file mode 100644
index 00000000000..5483a763c42
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/headers/headers.go
@@ -0,0 +1,61 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package headers
+
+import (
+ "net/http"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+)
+
+// SetAuthHeader uses the provided token to set the Authorization and trust
+// boundary headers on a request. If the token.Type is empty, the type is
+// assumed to be Bearer.
+func SetAuthHeader(token *auth.Token, req *http.Request) {
+ typ := token.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ req.Header.Set("Authorization", typ+" "+token.Value)
+
+ if headerVal, setHeader := getTrustBoundaryHeader(token); setHeader {
+ req.Header.Set("x-allowed-locations", headerVal)
+ }
+}
+
+// SetAuthMetadata uses the provided token to set the Authorization and trust
+// boundary metadata. If the token.Type is empty, the type is assumed to be
+// Bearer.
+func SetAuthMetadata(token *auth.Token, m map[string]string) {
+ typ := token.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ m["authorization"] = typ + " " + token.Value
+
+ if headerVal, setHeader := getTrustBoundaryHeader(token); setHeader {
+ m["x-allowed-locations"] = headerVal
+ }
+}
+
+func getTrustBoundaryHeader(token *auth.Token) (val string, present bool) {
+ if data, ok := token.Metadata[internal.TrustBoundaryDataKey]; ok {
+ if tbd, ok := data.(internal.TrustBoundaryData); ok {
+ return tbd.TrustBoundaryHeader()
+ }
+ }
+ return "", false
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go b/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go
new file mode 100644
index 00000000000..8fa5600bdcb
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/trustboundary/external_accounts_config_providers.go
@@ -0,0 +1,100 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trustboundary
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+)
+
+const (
+ workloadAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/projects/%s/locations/global/workloadIdentityPools/%s/allowedLocations"
+ workforceAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/locations/global/workforcePools/%s/allowedLocations"
+)
+
+var (
+ workforceAudiencePattern = regexp.MustCompile(`//iam\.([^/]+)/locations/global/workforcePools/([^/]+)`)
+ workloadAudiencePattern = regexp.MustCompile(`//iam\.([^/]+)/projects/([^/]+)/locations/global/workloadIdentityPools/([^/]+)`)
+)
+
+// NewExternalAccountConfigProvider creates a new ConfigProvider for external accounts.
+func NewExternalAccountConfigProvider(audience, inputUniverseDomain string) (ConfigProvider, error) {
+ var audienceDomain, projectNumber, poolID string
+ var isWorkload bool
+
+ matches := workloadAudiencePattern.FindStringSubmatch(audience)
+ if len(matches) == 4 { // Expecting full match, domain, projectNumber, poolID
+ audienceDomain = matches[1]
+ projectNumber = matches[2]
+ poolID = matches[3]
+ isWorkload = true
+ } else {
+ matches = workforceAudiencePattern.FindStringSubmatch(audience)
+ if len(matches) == 3 { // Expecting full match, domain, poolID
+ audienceDomain = matches[1]
+ poolID = matches[2]
+ isWorkload = false
+ } else {
+ return nil, fmt.Errorf("trustboundary: unknown audience format: %q", audience)
+ }
+ }
+
+ effectiveUniverseDomain := inputUniverseDomain
+ if effectiveUniverseDomain == "" {
+ effectiveUniverseDomain = audienceDomain
+ } else if audienceDomain != "" && effectiveUniverseDomain != audienceDomain {
+ return nil, fmt.Errorf("trustboundary: provided universe domain (%q) does not match domain in audience (%q)", inputUniverseDomain, audienceDomain)
+ }
+
+ if isWorkload {
+ return &workloadIdentityPoolConfigProvider{
+ projectNumber: projectNumber,
+ poolID: poolID,
+ universeDomain: effectiveUniverseDomain,
+ }, nil
+ }
+ return &workforcePoolConfigProvider{
+ poolID: poolID,
+ universeDomain: effectiveUniverseDomain,
+ }, nil
+}
+
+type workforcePoolConfigProvider struct {
+ poolID string
+ universeDomain string
+}
+
+func (p *workforcePoolConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) {
+ return fmt.Sprintf(workforceAllowedLocationsEndpoint, p.universeDomain, p.poolID), nil
+}
+
+func (p *workforcePoolConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) {
+ return p.universeDomain, nil
+}
+
+type workloadIdentityPoolConfigProvider struct {
+ projectNumber string
+ poolID string
+ universeDomain string
+}
+
+func (p *workloadIdentityPoolConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) {
+ return fmt.Sprintf(workloadAllowedLocationsEndpoint, p.universeDomain, p.projectNumber, p.poolID), nil
+}
+
+func (p *workloadIdentityPoolConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) {
+ return p.universeDomain, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go b/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go
new file mode 100644
index 00000000000..bf898fffd69
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/trustboundary/trust_boundary.go
@@ -0,0 +1,392 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trustboundary
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/retry"
+ "cloud.google.com/go/auth/internal/transport/headers"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // serviceAccountAllowedLocationsEndpoint is the URL for fetching allowed locations for a given service account email.
+ serviceAccountAllowedLocationsEndpoint = "https://iamcredentials.%s/v1/projects/-/serviceAccounts/%s/allowedLocations"
+)
+
+// isEnabled wraps isTrustBoundaryEnabled with sync.OnceValues to ensure it's
+// called only once.
+var isEnabled = sync.OnceValues(isTrustBoundaryEnabled)
+
+// IsEnabled returns if the trust boundary feature is enabled and an error if
+// the configuration is invalid. The underlying check is performed only once.
+func IsEnabled() (bool, error) {
+ return isEnabled()
+}
+
+// isTrustBoundaryEnabled checks if the trust boundary feature is enabled via
+// GOOGLE_AUTH_TRUST_BOUNDARY_ENABLED environment variable.
+//
+// If the environment variable is not set, it is considered false.
+//
+// The environment variable is interpreted as a boolean with the following
+// (case-insensitive) rules:
+// - "true", "1" are considered true.
+// - "false", "0" are considered false.
+//
+// Any other values will return an error.
+func isTrustBoundaryEnabled() (bool, error) {
+ const envVar = "GOOGLE_AUTH_TRUST_BOUNDARY_ENABLED"
+ val, ok := os.LookupEnv(envVar)
+ if !ok {
+ return false, nil
+ }
+ val = strings.ToLower(val)
+ switch val {
+ case "true", "1":
+ return true, nil
+ case "false", "0":
+ return false, nil
+ default:
+ return false, fmt.Errorf(`invalid value for %s: %q. Must be one of "true", "false", "1", or "0"`, envVar, val)
+ }
+}
+
+// ConfigProvider provides specific configuration for trust boundary lookups.
+type ConfigProvider interface {
+ // GetTrustBoundaryEndpoint returns the endpoint URL for the trust boundary lookup.
+ GetTrustBoundaryEndpoint(ctx context.Context) (url string, err error)
+ // GetUniverseDomain returns the universe domain associated with the credential.
+ // It may return an error if the universe domain cannot be determined.
+ GetUniverseDomain(ctx context.Context) (string, error)
+}
+
+// AllowedLocationsResponse is the structure of the response from the Trust Boundary API.
+type AllowedLocationsResponse struct {
+ // Locations is the list of allowed locations.
+ Locations []string `json:"locations"`
+ // EncodedLocations is the encoded representation of the allowed locations.
+ EncodedLocations string `json:"encodedLocations"`
+}
+
+// fetchTrustBoundaryData fetches the trust boundary data from the API.
+func fetchTrustBoundaryData(ctx context.Context, client *http.Client, url string, token *auth.Token, logger *slog.Logger) (*internal.TrustBoundaryData, error) {
+ if logger == nil {
+ logger = slog.New(slog.NewTextHandler(io.Discard, nil))
+ }
+ if client == nil {
+ return nil, errors.New("trustboundary: HTTP client is required")
+ }
+
+ if url == "" {
+ return nil, errors.New("trustboundary: URL cannot be empty")
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: failed to create trust boundary request: %w", err)
+ }
+
+ if token == nil || token.Value == "" {
+ return nil, errors.New("trustboundary: access token required for lookup API authentication")
+ }
+ headers.SetAuthHeader(token, req)
+ logger.DebugContext(ctx, "trust boundary request", "request", internallog.HTTPRequest(req, nil))
+
+ retryer := retry.New()
+ var response *http.Response
+ for {
+ response, err = client.Do(req)
+
+ var statusCode int
+ if response != nil {
+ statusCode = response.StatusCode
+ }
+ pause, shouldRetry := retryer.Retry(statusCode, err)
+
+ if !shouldRetry {
+ break
+ }
+
+ if response != nil {
+ // Drain and close the body to reuse the connection
+ io.Copy(io.Discard, response.Body)
+ response.Body.Close()
+ }
+
+ if err := retry.Sleep(ctx, pause); err != nil {
+ return nil, err
+ }
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: failed to fetch trust boundary: %w", err)
+ }
+ defer response.Body.Close()
+
+ body, err := io.ReadAll(response.Body)
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: failed to read trust boundary response: %w", err)
+ }
+
+ logger.DebugContext(ctx, "trust boundary response", "response", internallog.HTTPResponse(response, body))
+
+ if response.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("trustboundary: trust boundary request failed with status: %s, body: %s", response.Status, string(body))
+ }
+
+ apiResponse := AllowedLocationsResponse{}
+ if err := json.Unmarshal(body, &apiResponse); err != nil {
+ return nil, fmt.Errorf("trustboundary: failed to unmarshal trust boundary response: %w", err)
+ }
+
+ if apiResponse.EncodedLocations == "" {
+ return nil, errors.New("trustboundary: invalid API response: encodedLocations is empty")
+ }
+
+ return internal.NewTrustBoundaryData(apiResponse.Locations, apiResponse.EncodedLocations), nil
+}
+
+// serviceAccountConfig holds configuration for SA trust boundary lookups.
+// It implements the ConfigProvider interface.
+type serviceAccountConfig struct {
+ ServiceAccountEmail string
+ UniverseDomain string
+}
+
+// NewServiceAccountConfigProvider creates a new config for service accounts.
+func NewServiceAccountConfigProvider(saEmail, universeDomain string) ConfigProvider {
+ return &serviceAccountConfig{
+ ServiceAccountEmail: saEmail,
+ UniverseDomain: universeDomain,
+ }
+}
+
+// GetTrustBoundaryEndpoint returns the formatted URL for fetching allowed locations
+// for the configured service account and universe domain.
+func (sac *serviceAccountConfig) GetTrustBoundaryEndpoint(ctx context.Context) (url string, err error) {
+ if sac.ServiceAccountEmail == "" {
+ return "", errors.New("trustboundary: service account email cannot be empty for config")
+ }
+ ud := sac.UniverseDomain
+ if ud == "" {
+ ud = internal.DefaultUniverseDomain
+ }
+ return fmt.Sprintf(serviceAccountAllowedLocationsEndpoint, ud, sac.ServiceAccountEmail), nil
+}
+
+// GetUniverseDomain returns the configured universe domain, defaulting to
+// [internal.DefaultUniverseDomain] if not explicitly set.
+func (sac *serviceAccountConfig) GetUniverseDomain(ctx context.Context) (string, error) {
+ if sac.UniverseDomain == "" {
+ return internal.DefaultUniverseDomain, nil
+ }
+ return sac.UniverseDomain, nil
+}
+
+// DataProvider fetches and caches trust boundary Data.
+// It implements the DataProvider interface and uses a ConfigProvider
+// to get type-specific details for the lookup.
+type DataProvider struct {
+ client *http.Client
+ configProvider ConfigProvider
+ data *internal.TrustBoundaryData
+ logger *slog.Logger
+ base auth.TokenProvider
+}
+
+// NewProvider wraps the provided base [auth.TokenProvider] to create a new
+// provider that injects tokens with trust boundary data. It uses the provided
+// HTTP client and configProvider to fetch the data and attach it to the token's
+// metadata.
+func NewProvider(client *http.Client, configProvider ConfigProvider, logger *slog.Logger, base auth.TokenProvider) (*DataProvider, error) {
+ if client == nil {
+ return nil, errors.New("trustboundary: HTTP client cannot be nil for DataProvider")
+ }
+ if configProvider == nil {
+ return nil, errors.New("trustboundary: ConfigProvider cannot be nil for DataProvider")
+ }
+ p := &DataProvider{
+ client: client,
+ configProvider: configProvider,
+ logger: internallog.New(logger),
+ base: base,
+ }
+ return p, nil
+}
+
+// Token retrieves a token from the base provider and injects it with trust
+// boundary data.
+func (p *DataProvider) Token(ctx context.Context) (*auth.Token, error) {
+ // Get the original token.
+ token, err := p.base.Token(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ tbData, err := p.GetTrustBoundaryData(ctx, token)
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: error fetching the trust boundary data: %w", err)
+ }
+ if tbData != nil {
+ if token.Metadata == nil {
+ token.Metadata = make(map[string]interface{})
+ }
+ token.Metadata[internal.TrustBoundaryDataKey] = *tbData
+ }
+ return token, nil
+}
+
+// GetTrustBoundaryData retrieves the trust boundary data.
+// It first checks the universe domain: if it's non-default, a NoOp is returned.
+// Otherwise, it checks a local cache. If the data is not cached as NoOp,
+// it fetches new data from the endpoint provided by its ConfigProvider,
+// using the given accessToken for authentication. Results are cached.
+// If fetching fails, it returns previously cached data if available, otherwise the fetch error.
+func (p *DataProvider) GetTrustBoundaryData(ctx context.Context, token *auth.Token) (*internal.TrustBoundaryData, error) {
+ // Check the universe domain.
+ uniDomain, err := p.configProvider.GetUniverseDomain(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: error getting universe domain: %w", err)
+ }
+ if uniDomain != "" && uniDomain != internal.DefaultUniverseDomain {
+ if p.data == nil || p.data.EncodedLocations != internal.TrustBoundaryNoOp {
+ p.data = internal.NewNoOpTrustBoundaryData()
+ }
+ return p.data, nil
+ }
+
+ // Check cache for a no-op result from a previous API call.
+ cachedData := p.data
+ if cachedData != nil && cachedData.EncodedLocations == internal.TrustBoundaryNoOp {
+ return cachedData, nil
+ }
+
+ // Get the endpoint
+ url, err := p.configProvider.GetTrustBoundaryEndpoint(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("trustboundary: error getting the lookup endpoint: %w", err)
+ }
+
+ // Proceed to fetch new data.
+ newData, fetchErr := fetchTrustBoundaryData(ctx, p.client, url, token, p.logger)
+
+ if fetchErr != nil {
+ // Fetch failed. Fallback to cachedData if available.
+ if cachedData != nil {
+ return cachedData, nil // Successful fallback
+ }
+ // No cache to fallback to.
+ return nil, fmt.Errorf("trustboundary: failed to fetch trust boundary data for endpoint %s and no cache available: %w", url, fetchErr)
+ }
+
+ // Fetch successful. Update cache.
+ p.data = newData
+ return newData, nil
+}
+
+// GCEConfigProvider implements ConfigProvider for GCE environments.
+// It lazily fetches and caches the necessary metadata (service account email, universe domain)
+// from the GCE metadata server.
+type GCEConfigProvider struct {
+ // universeDomainProvider provides the universe domain and underlying metadata client.
+ universeDomainProvider *internal.ComputeUniverseDomainProvider
+
+ // Caching for service account email
+ saOnce sync.Once
+ saEmail string
+ saEmailErr error
+
+ // Caching for universe domain
+ udOnce sync.Once
+ ud string
+ udErr error
+}
+
+// NewGCEConfigProvider creates a new GCEConfigProvider
+// which uses the provided gceUDP to interact with the GCE metadata server.
+func NewGCEConfigProvider(gceUDP *internal.ComputeUniverseDomainProvider) *GCEConfigProvider {
+ // The validity of gceUDP and its internal MetadataClient will be checked
+ // within the GetTrustBoundaryEndpoint and GetUniverseDomain methods.
+ return &GCEConfigProvider{
+ universeDomainProvider: gceUDP,
+ }
+}
+
+func (g *GCEConfigProvider) fetchSA(ctx context.Context) {
+ if g.universeDomainProvider == nil || g.universeDomainProvider.MetadataClient == nil {
+ g.saEmailErr = errors.New("trustboundary: GCEConfigProvider not properly initialized (missing ComputeUniverseDomainProvider or MetadataClient)")
+ return
+ }
+ mdClient := g.universeDomainProvider.MetadataClient
+ saEmail, err := mdClient.EmailWithContext(ctx, "default")
+ if err != nil {
+ g.saEmailErr = fmt.Errorf("trustboundary: GCE config: failed to get service account email: %w", err)
+ return
+ }
+ g.saEmail = saEmail
+}
+
+func (g *GCEConfigProvider) fetchUD(ctx context.Context) {
+ if g.universeDomainProvider == nil || g.universeDomainProvider.MetadataClient == nil {
+ g.udErr = errors.New("trustboundary: GCEConfigProvider not properly initialized (missing ComputeUniverseDomainProvider or MetadataClient)")
+ return
+ }
+ ud, err := g.universeDomainProvider.GetProperty(ctx)
+ if err != nil {
+ g.udErr = fmt.Errorf("trustboundary: GCE config: failed to get universe domain: %w", err)
+ return
+ }
+ if ud == "" {
+ ud = internal.DefaultUniverseDomain
+ }
+ g.ud = ud
+}
+
+// GetTrustBoundaryEndpoint constructs the trust boundary lookup URL for a GCE environment.
+// It uses cached metadata (service account email, universe domain) after the first call.
+func (g *GCEConfigProvider) GetTrustBoundaryEndpoint(ctx context.Context) (string, error) {
+ g.saOnce.Do(func() { g.fetchSA(ctx) })
+ if g.saEmailErr != nil {
+ return "", g.saEmailErr
+ }
+ g.udOnce.Do(func() { g.fetchUD(ctx) })
+ if g.udErr != nil {
+ return "", g.udErr
+ }
+ return fmt.Sprintf(serviceAccountAllowedLocationsEndpoint, g.ud, g.saEmail), nil
+}
+
+// GetUniverseDomain retrieves the universe domain from the GCE metadata server.
+// It uses a cached value after the first call.
+func (g *GCEConfigProvider) GetUniverseDomain(ctx context.Context) (string, error) {
+ g.udOnce.Do(func() { g.fetchUD(ctx) })
+ if g.udErr != nil {
+ return "", g.udErr
+ }
+ return g.ud, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/version.go b/vendor/cloud.google.com/go/auth/internal/version.go
new file mode 100644
index 00000000000..e2f56cf4d81
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/version.go
@@ -0,0 +1,20 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapicgen. DO NOT EDIT.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "0.17.0"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index d99d530934b..1799c6ef223 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,28 @@
# Release History
+## 1.19.1 (2025-09-11)
+
+### Bugs Fixed
+
+* Fixed resource identifier parsing for provider-specific resource hierarchies containing "resourceGroups" segments.
+
+### Other Changes
+
+* Improved error fall-back for improperly authored long-running operations.
+* Upgraded dependencies.
+
+## 1.19.0 (2025-08-21)
+
+### Features Added
+
+* Added `runtime.APIVersionLocationPath` to be set by clients that set the API version in the path.
+
+## 1.18.2 (2025-07-31)
+
+### Bugs Fixed
+
+* Fixed a case in which `BearerTokenPolicy` didn't ensure an authentication error is non-retriable
+
## 1.18.1 (2025-07-10)
### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
index a08d3d0ffa6..b8348b7d82e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
@@ -123,9 +123,9 @@ func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTy
}
func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType {
- if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) {
+ if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) && isSubscriptionResource(parent) {
return ResourceGroupResourceType
- } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() {
+ } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && isTenantResource(parent) {
return SubscriptionResourceType
}
@@ -182,12 +182,12 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err
if len(parts) == 1 {
// subscriptions and resourceGroups are not valid ids without their names
- if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) {
+ if strings.EqualFold(parts[0], subscriptionsKey) && isTenantResource(parent) || strings.EqualFold(parts[0], resourceGroupsLowerKey) && isSubscriptionResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
// resourceGroup must contain either child or provider resource type
- if parent.ResourceType.String() == ResourceGroupResourceType.String() {
+ if isResourceGroupResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
@@ -196,7 +196,7 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err
if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) {
// provider resource can only be on a tenant or a subscription parent
- if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() {
+ if !isSubscriptionResource(parent) && !isTenantResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
@@ -225,3 +225,18 @@ func splitStringAndOmitEmpty(v, sep string) []string {
return r
}
+
+// isTenantResource returns true if the resourceID represents a tenant resource. The condition is resource ID matched with TenantResourceType and has no parent.
+func isTenantResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), TenantResourceType.String()) && resourceID.Parent == nil
+}
+
+// isSubscriptionResource returns true if the resourceID represents a subscription resource. The condition is resource ID matched with SubscriptionResourceType and its parent is a tenant resource.
+func isSubscriptionResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), SubscriptionResourceType.String()) && isTenantResource(resourceID.Parent)
+}
+
+// isResourceGroupResource returns true if the resourceID represents a resource group resource. The condition is resource ID matched with ResourceGroupResourceType and its parent is a subscription resource.
+func isResourceGroupResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), ResourceGroupResourceType.String()) && isSubscriptionResource(resourceID.Parent)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 23788b14d92..8aebe5ce53b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.18.1"
+ Version = "v1.19.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
index e5309aa6c15..c3646feb55b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
@@ -16,9 +16,10 @@ import (
// APIVersionOptions contains options for API versions
type APIVersionOptions struct {
- // Location indicates where to set the version on a request, for example in a header or query param
+ // Location indicates where to set the version on a request, for example in a header or query param.
Location APIVersionLocation
- // Name is the name of the header or query parameter, for example "api-version"
+ // Name is the name of the header or query parameter, for example "api-version".
+ // For [APIVersionLocationPath] the value is not used.
Name string
}
@@ -30,6 +31,8 @@ const (
APIVersionLocationQueryParam = 0
// APIVersionLocationHeader indicates a header
APIVersionLocationHeader = 1
+ // APIVersionLocationPath indicates a path segment
+ APIVersionLocationPath = 2
)
// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version
@@ -55,7 +58,10 @@ type apiVersionPolicy struct {
// Do sets the request's API version, if the policy is configured to do so, replacing any prior value.
func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) {
- if a.version != "" {
+ // for API versions in the path, the client is responsible for
+ // setting the correct path segment with the version. so, if the
+ // location is path the policy is effectively a no-op.
+ if a.location != APIVersionLocationPath && a.version != "" {
if a.name == "" {
// user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions
return nil, errors.New("this client doesn't support overriding its API version")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index 1950a2e5b3f..547e5a327ff 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -97,7 +97,9 @@ func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(p
as := acquiringResourceState{p: b, req: req, tro: tro}
tk, err := b.mainResource.Get(as)
if err != nil {
- return err
+ // consider this error non-retriable because if it could be resolved by
+ // retrying authentication, the credential would have done so already
+ return errorinfo.NonRetriableError(err)
}
req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
return nil
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index 4f90e447432..a89ae9b7b9d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -91,7 +91,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !poller.StatusCodeValid(resp) {
- return nil, errors.New("the operation failed or was cancelled")
+ return nil, exported.NewResponseError(resp)
}
// determine the polling method
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index 84e7941e4f3..ab63f9c031b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,35 @@
# Release History
+## 1.12.0 (2025-09-16)
+
+### Features Added
+- Added `DefaultAzureCredentialOptions.RequireAzureTokenCredentials`. `NewDefaultAzureCredential` returns an
+ error when this option is true and the environment variable `AZURE_TOKEN_CREDENTIALS` has no value.
+
+### Other Changes
+- `AzureDeveloperCLICredential` no longer hangs when AZD_DEBUG is set
+- `GetToken` methods of `AzureCLICredential` and `AzureDeveloperCLICredential` return an error when
+ `TokenRequestOptions.Claims` has a value because these credentials can't acquire a token in that
+ case. The error messages describe the action required to get a token.
+
+## 1.11.0 (2025-08-05)
+
+### Other Changes
+- `DefaultAzureCredential` tries its next credential when a dev tool credential such as
+ `AzureCLICredential` returns an error
+
+## 1.11.0-beta.1 (2025-07-15)
+
+### Features Added
+- `DefaultAzureCredential` allows selecting one of its credential types by name via environment variable
+ `AZURE_TOKEN_CREDENTIALS`. It will use only the selected type at runtime. For example, set
+ `AZURE_TOKEN_CREDENTIALS=WorkloadIdentityCredential` to have `DefaultAzureCredential` use only
+ `WorkloadIdentityCredential`.
+
+### Other Changes
+- By default, `ManagedIdentityCredential` retries IMDS requests for a maximum of ~70 seconds as recommended
+ in IMDS documentation. In previous versions, it would stop retrying after ~54 seconds by default.
+
## 1.10.1 (2025-06-10)
### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index 91f4f05cc0c..838601d69c8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -18,7 +18,6 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues)
- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues)
- [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
- - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
- [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues)
- [Get additional help](#get-additional-help)
@@ -86,6 +85,7 @@ azlog.SetEvents(azidentity.EventAuthentication)
|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
[Enable logging](#enable-and-configure-logging) to get further diagnostic information. Consult the troubleshooting guide for underlying credential types for more information. [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues) [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues) [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues) |
|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|[Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token. If an unexpected credential is returning a token, check application configuration such as environment variables. Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role. |
|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.|
+|invalid AZURE_TOKEN_CREDENTIALS value "..."|AZURE_TOKEN_CREDENTIALS has an unexpected value|Specify a valid value as described in [DefaultAzureCredential documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)
## Troubleshoot EnvironmentCredential authentication issues
@@ -119,7 +119,6 @@ azlog.SetEvents(azidentity.EventAuthentication)
|---|---|---|
|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
-|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)|
|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)||
@@ -158,14 +157,6 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
-### Azure Kubernetes Service managed identity
-
-#### Pod Identity
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response).
-
## Troubleshoot AzureCLICredential authentication issues
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
index 4118f99ef2c..1646ff91167 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_191110b0dd"
+ "Tag": "go/azidentity_530ea4279b"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
index 0fd03f45634..6944152c96e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
@@ -7,14 +7,11 @@
package azidentity
import (
- "bytes"
"context"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
- "os"
- "os/exec"
- "runtime"
"strings"
"sync"
"time"
@@ -26,8 +23,6 @@ import (
const credNameAzureCLI = "AzureCLICredential"
-type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error)
-
// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
type AzureCLICredentialOptions struct {
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
@@ -45,15 +40,8 @@ type AzureCLICredentialOptions struct {
// inDefaultChain is true when the credential is part of DefaultAzureCredential
inDefaultChain bool
- // tokenProvider is used by tests to fake invoking az
- tokenProvider azTokenProvider
-}
-
-// init returns an instance of AzureCLICredentialOptions initialized with default values.
-func (o *AzureCLICredentialOptions) init() {
- if o.tokenProvider == nil {
- o.tokenProvider = defaultAzTokenProvider
- }
+ // exec is used by tests to fake invoking az
+ exec executor
}
// AzureCLICredential authenticates as the identity logged in to the Azure CLI.
@@ -80,7 +68,9 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent
if cp.TenantID != "" && !validTenantID(cp.TenantID) {
return nil, errInvalidTenantID
}
- cp.init()
+ if cp.exec == nil {
+ cp.exec = shellExec
+ }
cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants)
return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
}
@@ -99,14 +89,37 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ
if err != nil {
return at, err
}
+ // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
+ resource := strings.TrimSuffix(opts.Scopes[0], defaultSuffix)
+ command := "az account get-access-token -o json --resource " + resource
+ tenantArg := ""
+ if tenant != "" {
+ tenantArg = " --tenant " + tenant
+ command += tenantArg
+ }
+ if c.opts.Subscription != "" {
+ // subscription needs quotes because it may contain spaces
+ command += ` --subscription "` + c.opts.Subscription + `"`
+ }
+ if opts.Claims != "" {
+ encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims))
+ return at, fmt.Errorf(
+ "%s.GetToken(): Azure CLI requires multifactor authentication or additional claims. Run this command then retry the operation: az login%s --claims-challenge %s",
+ credNameAzureCLI,
+ tenantArg,
+ encoded,
+ )
+ }
+
c.mu.Lock()
defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription)
+
+ b, err := c.opts.exec(ctx, credNameAzureCLI, command)
if err == nil {
at, err = c.createAccessToken(b)
}
if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ err = unavailableIfInDAC(err, c.opts.inDefaultChain)
return at, err
}
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", "))
@@ -114,63 +127,6 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ
return at, nil
}
-// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) {
- // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
- resource := strings.TrimSuffix(scopes[0], defaultSuffix)
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "az account get-access-token -o json --resource " + resource
- if tenantID != "" {
- commandLine += " --tenant " + tenantID
- }
- if subscription != "" {
- // subscription needs quotes because it may contain spaces
- commandLine += ` --subscription "` + subscription + `"`
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
- cliCmd.WaitDelay = 100 * time.Millisecond
-
- stdout, err := cliCmd.Output()
- if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
- // The child process wrote to stdout and exited without closing it.
- // Swallow this error and return stdout because it may contain a token.
- return stdout, nil
- }
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") {
- msg = "Azure CLI not found on path"
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureCLI, msg)
- }
-
- return stdout, nil
-}
-
func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
t := struct {
AccessToken string `json:"accessToken"`
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
index 1bd3720b649..f97bf95df9b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
@@ -7,14 +7,11 @@
package azidentity
import (
- "bytes"
"context"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
- "os"
- "os/exec"
- "runtime"
"strings"
"sync"
"time"
@@ -24,9 +21,10 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
-
-type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error)
+const (
+ credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
+ mfaRequired = "Azure Developer CLI requires multifactor authentication or additional claims"
+)
// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
type AzureDeveloperCLICredentialOptions struct {
@@ -41,8 +39,8 @@ type AzureDeveloperCLICredentialOptions struct {
// inDefaultChain is true when the credential is part of DefaultAzureCredential
inDefaultChain bool
- // tokenProvider is used by tests to fake invoking azd
- tokenProvider azdTokenProvider
+ // exec is used by tests to fake invoking azd
+ exec executor
}
// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI].
@@ -62,8 +60,8 @@ func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions)
if cp.TenantID != "" && !validTenantID(cp.TenantID) {
return nil, errInvalidTenantID
}
- if cp.tokenProvider == nil {
- cp.tokenProvider = defaultAzdTokenProvider
+ if cp.exec == nil {
+ cp.exec = shellExec
}
return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
}
@@ -75,23 +73,52 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.
if len(opts.Scopes) == 0 {
return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope")
}
+ command := "azd auth token -o json --no-prompt"
for _, scope := range opts.Scopes {
if !validScope(scope) {
return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope)
}
+ command += " --scope " + scope
}
tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants)
if err != nil {
return at, err
}
+ if tenant != "" {
+ command += " --tenant-id " + tenant
+ }
+ commandNoClaims := command
+ if opts.Claims != "" {
+ encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims))
+ command += " --claims " + encoded
+ }
+
c.mu.Lock()
defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant)
+
+ b, err := c.opts.exec(ctx, credNameAzureDeveloperCLI, command)
if err == nil {
at, err = c.createAccessToken(b)
}
if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ msg := err.Error()
+ switch {
+ case strings.Contains(msg, "unknown flag: --claims"):
+ err = newAuthenticationFailedError(
+ credNameAzureDeveloperCLI,
+ mfaRequired+", however the installed version doesn't support this. Upgrade to version 1.18.1 or later",
+ nil,
+ )
+ case opts.Claims != "":
+ err = newAuthenticationFailedError(
+ credNameAzureDeveloperCLI,
+ mfaRequired+". Run this command then retry the operation: "+commandNoClaims,
+ nil,
+ )
+ case strings.Contains(msg, "azd auth login"):
+ err = newCredentialUnavailableError(credNameAzureDeveloperCLI, `please run "azd auth login" from a command prompt to authenticate before using this credential`)
+ }
+ err = unavailableIfInDAC(err, c.opts.inDefaultChain)
return at, err
}
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", "))
@@ -99,61 +126,6 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.
return at, nil
}
-// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) {
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "azd auth token -o json"
- if tenant != "" {
- commandLine += " --tenant-id " + tenant
- }
- for _, scope := range scopes {
- commandLine += " --scope " + scope
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
- cliCmd.WaitDelay = 100 * time.Millisecond
-
- stdout, err := cliCmd.Output()
- if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
- // The child process wrote to stdout and exited without closing it.
- // Swallow this error and return stdout because it may contain a token.
- return stdout, nil
- }
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") {
- msg = "Azure Developer CLI not found on path"
- } else if strings.Contains(msg, "azd auth login") {
- msg = `please run "azd auth login" from a command prompt to authenticate before using this credential`
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg)
- }
- return stdout, nil
-}
-
func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
t := struct {
AccessToken string `json:"token"`
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
index c3af0cdc2d6..51dd9793908 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
@@ -41,6 +41,3 @@ extends:
GenerateVMJobs: true
Path: sdk/azidentity/managed-identity-matrix.json
Selection: sparse
- MatrixReplace:
- - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi
- - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
index f2a31ee6ac2..c041a52dbbe 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
@@ -19,6 +19,15 @@ import (
const azureTokenCredentials = "AZURE_TOKEN_CREDENTIALS"
+// bit flags NewDefaultAzureCredential uses to parse AZURE_TOKEN_CREDENTIALS
+const (
+ env = uint8(1) << iota
+ workloadIdentity
+ managedIdentity
+ az
+ azd
+)
+
// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential.
// These options may not apply to all credentials in the chain.
type DefaultAzureCredentialOptions struct {
@@ -39,6 +48,10 @@ type DefaultAzureCredentialOptions struct {
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+ // RequireAzureTokenCredentials determines whether NewDefaultAzureCredential returns an error when the environment
+ // variable AZURE_TOKEN_CREDENTIALS has no value.
+ RequireAzureTokenCredentials bool
+
// TenantID sets the default tenant for authentication via the Azure CLI, Azure Developer CLI, and workload identity.
TenantID string
}
@@ -63,6 +76,20 @@ type DefaultAzureCredentialOptions struct {
// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
// every subsequent authentication.
//
+// # Selecting credentials
+//
+// Set environment variable AZURE_TOKEN_CREDENTIALS to select a subset of the credential chain described above.
+// DefaultAzureCredential will try only the specified credential(s), but its other behavior remains the same.
+// Valid values for AZURE_TOKEN_CREDENTIALS are the name of any single type in the above chain, for example
+// "EnvironmentCredential" or "AzureCLICredential", and these special values:
+//
+// - "dev": try [AzureCLICredential] and [AzureDeveloperCLICredential], in that order
+// - "prod": try [EnvironmentCredential], [WorkloadIdentityCredential], and [ManagedIdentityCredential], in that order
+//
+// [DefaultAzureCredentialOptions].RequireAzureTokenCredentials controls whether AZURE_TOKEN_CREDENTIALS must be set.
+// NewDefaultAzureCredential returns an error when RequireAzureTokenCredentials is true and AZURE_TOKEN_CREDENTIALS
+// has no value.
+//
// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
type DefaultAzureCredential struct {
chain *ChainedTokenCredential
@@ -70,34 +97,46 @@ type DefaultAzureCredential struct {
// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults.
func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) {
+ if options == nil {
+ options = &DefaultAzureCredentialOptions{}
+ }
+
var (
- creds []azcore.TokenCredential
- errorMessages []string
- includeDev, includeProd = true, true
+ creds []azcore.TokenCredential
+ errorMessages []string
+ selected = env | workloadIdentity | managedIdentity | az | azd
)
- if c, ok := os.LookupEnv(azureTokenCredentials); ok {
- switch c {
- case "dev":
- includeProd = false
- case "prod":
- includeDev = false
+ if atc, ok := os.LookupEnv(azureTokenCredentials); ok {
+ switch {
+ case atc == "dev":
+ selected = az | azd
+ case atc == "prod":
+ selected = env | workloadIdentity | managedIdentity
+ case strings.EqualFold(atc, credNameEnvironment):
+ selected = env
+ case strings.EqualFold(atc, credNameWorkloadIdentity):
+ selected = workloadIdentity
+ case strings.EqualFold(atc, credNameManagedIdentity):
+ selected = managedIdentity
+ case strings.EqualFold(atc, credNameAzureCLI):
+ selected = az
+ case strings.EqualFold(atc, credNameAzureDeveloperCLI):
+ selected = azd
default:
- return nil, fmt.Errorf(`invalid %s value %q. Valid values are "dev" and "prod"`, azureTokenCredentials, c)
+ return nil, fmt.Errorf(`invalid %s value %q. Valid values are "dev", "prod", or the name of any credential type in the default chain. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information`, azureTokenCredentials, atc)
}
+ } else if options.RequireAzureTokenCredentials {
+ return nil, fmt.Errorf("%s must be set when RequireAzureTokenCredentials is true. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information", azureTokenCredentials)
}
- if options == nil {
- options = &DefaultAzureCredentialOptions{}
- }
additionalTenants := options.AdditionallyAllowedTenants
if len(additionalTenants) == 0 {
if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" {
additionalTenants = strings.Split(tenants, ";")
}
}
-
- if includeProd {
+ if selected&env != 0 {
envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
@@ -107,9 +146,10 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
creds = append(creds, envCred)
} else {
errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameEnvironment, err: err})
}
-
+ }
+ if selected&workloadIdentity != 0 {
wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
AdditionallyAllowedTenants: additionalTenants,
ClientOptions: options.ClientOptions,
@@ -122,7 +162,8 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
}
-
+ }
+ if selected&managedIdentity != 0 {
o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true}
if ID, ok := os.LookupEnv(azureClientID); ok {
o.ID = ClientID(ID)
@@ -135,18 +176,24 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
}
}
- if includeDev {
- azCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID})
+ if selected&az != 0 {
+ azCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ inDefaultChain: true,
+ })
if err == nil {
creds = append(creds, azCred)
} else {
errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
}
-
+ }
+ if selected&azd != 0 {
azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
AdditionallyAllowedTenants: additionalTenants,
TenantID: options.TenantID,
+ inDefaultChain: true,
})
if err == nil {
creds = append(creds, azdCred)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
index be963d3a2af..14f8a031265 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
@@ -7,22 +7,73 @@
package azidentity
import (
+ "bytes"
+ "context"
"errors"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
"time"
)
// cliTimeout is the default timeout for authentication attempts via CLI tools
const cliTimeout = 10 * time.Second
-// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a
+// executor runs a command and returns its output or an error
+type executor func(ctx context.Context, credName, command string) ([]byte, error)
+
+var shellExec = func(ctx context.Context, credName, command string) ([]byte, error) {
+ // set a default timeout for this authentication iff the caller hasn't done so already
+ var cancel context.CancelFunc
+ if _, hasDeadline := ctx.Deadline(); !hasDeadline {
+ ctx, cancel = context.WithTimeout(ctx, cliTimeout)
+ defer cancel()
+ }
+ var cmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ dir := os.Getenv("SYSTEMROOT")
+ if dir == "" {
+ return nil, newCredentialUnavailableError(credName, `environment variable "SYSTEMROOT" has no value`)
+ }
+ cmd = exec.CommandContext(ctx, "cmd.exe", "/c", command)
+ cmd.Dir = dir
+ } else {
+ cmd = exec.CommandContext(ctx, "/bin/sh", "-c", command)
+ cmd.Dir = "/bin"
+ }
+ cmd.Env = os.Environ()
+ stderr := bytes.Buffer{}
+ cmd.Stderr = &stderr
+ cmd.WaitDelay = 100 * time.Millisecond
+
+ stdout, err := cmd.Output()
+ if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
+ // The child process wrote to stdout and exited without closing it.
+ // Swallow this error and return stdout because it may contain a token.
+ return stdout, nil
+ }
+ if err != nil {
+ msg := stderr.String()
+ var exErr *exec.ExitError
+ if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.Contains(msg, "' is not recognized") {
+ return nil, newCredentialUnavailableError(credName, "CLI executable not found on path")
+ }
+ if msg == "" {
+ msg = err.Error()
+ }
+ return nil, newAuthenticationFailedError(credName, msg, nil)
+ }
+
+ return stdout, nil
+}
+
+// unavailableIfInDAC returns err or, if the credential was invoked by DefaultAzureCredential, a
// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try
// the next credential in its chain (another developer credential).
-func unavailableIfInChain(err error, inDefaultChain bool) error {
- if err != nil && inDefaultChain {
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error())
- }
+func unavailableIfInDAC(err error, inDefaultChain bool) error {
+ if err != nil && inDefaultChain && !errors.As(err, new(credentialUnavailable)) {
+ err = NewCredentialUnavailableError(err.Error())
}
return err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
index 9b5e17dcde8..f04d40ea4ec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -18,7 +18,10 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
+const (
+ credNameEnvironment = "EnvironmentCredential"
+ envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
+)
// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential
type EnvironmentCredentialOptions struct {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
index edd56f9d571..063325c69d6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
@@ -4,14 +4,13 @@
"Agent": {
"msi_image": {
"ArmTemplateParameters": "@{deployResources = $true}",
- "OSVmImage": "env:LINUXNEXTVMIMAGE",
+ "OSVmImage": "env:LINUXVMIMAGE",
"Pool": "env:LINUXPOOL"
}
},
"GoVersion": [
"env:GO_VERSION_PREVIOUS"
- ],
- "IDENTITY_IMDS_AVAILABLE": "1"
+ ]
}
]
-}
+}
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
index b3a0f85883f..0735d1fcbef 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -54,10 +54,10 @@ type managedIdentityClient struct {
// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
func setIMDSRetryOptionDefaults(o *policy.RetryOptions) {
if o.MaxRetries == 0 {
- o.MaxRetries = 5
+ o.MaxRetries = 6
}
if o.MaxRetryDelay == 0 {
- o.MaxRetryDelay = 1 * time.Minute
+ o.MaxRetryDelay = 25 * time.Second
}
if o.RetryDelay == 0 {
o.RetryDelay = 2 * time.Second
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
index 67f97fbb2b0..c5634cd21d0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
@@ -41,7 +41,7 @@ if ($CI) {
az account set --subscription $SubscriptionId
}
-Write-Host "Building container"
+Write-Host "##[group]Building container"
$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test"
Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @"
FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder
@@ -62,11 +62,34 @@ CMD ["./managed-id-test"]
docker build -t $image "$PSScriptRoot"
az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME']
docker push $image
+Write-Host "##[endgroup]"
$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP']
+Write-Host "##[group]Deploying to VM"
+# az will return 0 when the script fails on the VM, so the script prints a UUID to indicate all commands succeeded
+$uuid = [guid]::NewGuid().ToString()
+$vmScript = @"
+az acr login -n $($DeploymentOutputs['AZIDENTITY_ACR_NAME']) && \
+sudo docker run \
+-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) \
+-e AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) \
+-p 80:8080 -d \
+$image && \
+/usr/bin/echo $uuid
+"@
+$output = az vm run-command invoke -g $rg -n $DeploymentOutputs['AZIDENTITY_VM_NAME'] --command-id RunShellScript --scripts "$vmScript" | Out-String
+Write-Host $output
+if (-not $output.Contains($uuid)) {
+ throw "couldn't start container on VM"
+}
+Write-Host "##[endgroup]"
+
# ACI is easier to provision here than in the bicep file because the image isn't available before now
-Write-Host "Deploying Azure Container Instance"
+Write-Host "##[group]Deploying Azure Container Instance"
$aciName = "azidentity-test"
az container create -g $rg -n $aciName --image $image `
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
@@ -85,23 +108,27 @@ az container create -g $rg -n $aciName --image $image `
FUNCTIONS_CUSTOMHANDLER_PORT=80
$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv
Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP"
+Write-Host "##[endgroup]"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
-Write-Host "Deploying to Azure Functions"
+Write-Host "##[group]Deploying to Azure Functions"
$container = docker create $image
docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/"
docker rm -v $container
Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force
az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip
+Write-Host "##[endgroup]"
-Write-Host "Creating federated identity"
+Write-Host "##[group]Creating federated identity"
$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME']
$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME']
$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv
$podName = "azidentity-test"
$serviceAccountName = "workload-identity-sa"
-az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName
-Write-Host "Deploying to AKS"
+az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName --audiences api://AzureADTokenExchange
+Write-Host "##[endgroup]"
+
+Write-Host "##[group]Deploying to AKS"
az aks get-credentials -g $rg -n $aksName
az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName
Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @"
@@ -138,3 +165,4 @@ spec:
"@
kubectl apply -f "$PSScriptRoot/k8s.yaml"
Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName"
+Write-Host "##[endgroup]"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
index 135feb0178e..cb3b5f4df42 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
@@ -19,7 +19,10 @@ param location string = resourceGroup().location
// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles
var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d')
-var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1')
+var blobReader = subscriptionResourceId(
+ 'Microsoft.Authorization/roleDefinitions',
+ '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'
+)
resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) {
kind: 'StorageV2'
@@ -60,6 +63,16 @@ resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-
scope: containerRegistry
}
+resource acrPullVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
+ name: guid(resourceGroup().id, acrPull, 'vm')
+ properties: {
+ principalId: deployResources ? vm.identity.principalId : ''
+ principalType: 'ServicePrincipal'
+ roleDefinitionId: acrPull
+ }
+ scope: containerRegistry
+}
+
resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
scope: saUserAssigned
name: guid(resourceGroup().id, blobReader, usermgdid.id)
@@ -80,6 +93,16 @@ resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if
scope: sa
}
+resource blobRoleVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
+ scope: sa
+ name: guid(resourceGroup().id, blobReader, 'vm')
+ properties: {
+ principalId: deployResources ? vm.identity.principalId : ''
+ roleDefinitionId: blobReader
+ principalType: 'ServicePrincipal'
+ }
+}
+
resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) {
location: location
name: uniqueString(resourceGroup().id)
@@ -215,6 +238,143 @@ resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deplo
}
}
+resource publicIP 'Microsoft.Network/publicIPAddresses@2023-05-01' = if (deployResources) {
+ name: '${baseName}PublicIP'
+ location: location
+ sku: {
+ name: 'Standard'
+ }
+ properties: {
+ publicIPAllocationMethod: 'Static'
+ }
+}
+
+resource nsg 'Microsoft.Network/networkSecurityGroups@2024-07-01' = if (deployResources) {
+ name: '${baseName}NSG'
+ location: location
+ properties: {
+ securityRules: [
+ {
+ name: 'AllowHTTP'
+ properties: {
+ description: 'Allow HTTP traffic on port 80'
+ protocol: 'Tcp'
+ sourcePortRange: '*'
+ destinationPortRange: '80'
+ sourceAddressPrefix: '*'
+ destinationAddressPrefix: '*'
+ access: 'Allow'
+ priority: 1000
+ direction: 'Inbound'
+ }
+ }
+ ]
+ }
+}
+
+resource vnet 'Microsoft.Network/virtualNetworks@2024-07-01' = if (deployResources) {
+ name: '${baseName}vnet'
+ location: location
+ properties: {
+ addressSpace: {
+ addressPrefixes: [
+ '10.0.0.0/16'
+ ]
+ }
+ subnets: [
+ {
+ name: '${baseName}subnet'
+ properties: {
+ addressPrefix: '10.0.0.0/24'
+ defaultOutboundAccess: false
+ networkSecurityGroup: {
+ id: deployResources ? nsg.id : ''
+ }
+ }
+ }
+ ]
+ }
+}
+
+resource nic 'Microsoft.Network/networkInterfaces@2024-07-01' = if (deployResources) {
+ name: '${baseName}NIC'
+ location: location
+ properties: {
+ ipConfigurations: [
+ {
+ name: 'myIPConfig'
+ properties: {
+ privateIPAllocationMethod: 'Dynamic'
+ publicIPAddress: {
+ id: deployResources ? publicIP.id : ''
+ }
+ subnet: {
+ id: deployResources ? vnet.properties.subnets[0].id : ''
+ }
+ }
+ }
+ ]
+ }
+}
+
+resource vm 'Microsoft.Compute/virtualMachines@2024-07-01' = if (deployResources) {
+ name: '${baseName}vm'
+ location: location
+ identity: {
+ type: 'SystemAssigned, UserAssigned'
+ userAssignedIdentities: {
+ '${deployResources ? usermgdid.id: ''}': {}
+ }
+ }
+ properties: {
+ hardwareProfile: {
+ vmSize: 'Standard_DS1_v2'
+ }
+ osProfile: {
+ adminUsername: adminUser
+ computerName: '${baseName}vm'
+ customData: base64('''
+#cloud-config
+package_update: true
+packages:
+ - docker.io
+runcmd:
+ - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ - az login --identity --allow-no-subscriptions
+''')
+ linuxConfiguration: {
+ disablePasswordAuthentication: true
+ ssh: {
+ publicKeys: [
+ {
+ path: '/home/${adminUser}/.ssh/authorized_keys'
+ keyData: sshPubKey
+ }
+ ]
+ }
+ }
+ }
+ networkProfile: {
+ networkInterfaces: [
+ {
+ id: deployResources ? nic.id : ''
+ }
+ ]
+ }
+ storageProfile: {
+ imageReference: {
+ publisher: 'Canonical'
+ offer: 'ubuntu-24_04-lts'
+ sku: 'server'
+ version: 'latest'
+ }
+ osDisk: {
+ createOption: 'FromImage'
+ }
+ }
+ }
+}
+
output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : ''
output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : ''
output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : ''
@@ -226,3 +386,5 @@ output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id
output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : ''
+output AZIDENTITY_VM_NAME string = deployResources ? vm.name : ''
+output AZIDENTITY_VM_IP string = deployResources ? publicIP.properties.ipAddress : ''
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
index 2b767762fa8..4c88605366d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
- version = "v1.10.1"
+ version = "v1.12.0"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
index 8ee66b52676..779657b23b8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
@@ -6,6 +6,8 @@
package errorinfo
+import "errors"
+
// NonRetriable represents a non-transient error. This works in
// conjunction with the retry policy, indicating that the error condition
// is idempotent, so no retries will be attempted.
@@ -15,10 +17,14 @@ type NonRetriable interface {
NonRetriable()
}
-// NonRetriableError marks the specified error as non-retriable.
-// This function takes an error as input and returns a new error that is marked as non-retriable.
+// NonRetriableError ensures the specified error is [NonRetriable]. If
+// the error is already [NonRetriable], it returns that error unchanged.
+// Otherwise, it returns a new, [NonRetriable] error.
func NonRetriableError(err error) error {
- return &nonRetriableError{err}
+ if !errors.As(err, new(NonRetriable)) {
+ err = &nonRetriableError{err}
+ }
+ return err
}
// nonRetriableError is a struct that embeds the error interface.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
index cda678e3342..c6baf209477 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
@@ -143,9 +143,10 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
headerErr := q.Get("error")
if headerErr != "" {
desc := html.EscapeString(q.Get("error_description"))
+ escapedHeaderErr := html.EscapeString(headerErr)
// Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
// change this to s.error() and make s.error() write the failPage instead of an error code.
- _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
+ _, _ = w.Write([]byte(fmt.Sprintf(failPage, escapedHeaderErr, desc)))
s.putResult(Result{Err: fmt.Errorf("%s", desc)})
return
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
index c3c4a96fc30..3f403746404 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
@@ -46,9 +46,11 @@ type jsonCaller interface {
JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error
}
+// For backward compatibility, accept both old and new China endpoints for a transition period.
var aadTrustedHostList = map[string]bool{
"login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
- "login.partner.microsoftonline.cn": true, // Microsoft Azure China
+ "login.partner.microsoftonline.cn": true, // Microsoft Azure China (new)
+ "login.chinacloudapi.cn": true, // Microsoft Azure China (legacy, backward compatibility)
"login.microsoftonline.de": true, // Microsoft Azure Blackforest
"login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
"login.microsoftonline.us": true, // Microsoft Azure US Government
@@ -98,6 +100,41 @@ func (r *TenantDiscoveryResponse) Validate() error {
return nil
}
+// ValidateIssuerMatchesAuthority validates that the issuer in the TenantDiscoveryResponse matches the authority.
+// This is used to identity security or configuration issues in authorities and the OIDC endpoint
+func (r *TenantDiscoveryResponse) ValidateIssuerMatchesAuthority(authorityURI string, aliases map[string]bool) error {
+
+ if authorityURI == "" {
+ return errors.New("TenantDiscoveryResponse: empty authorityURI provided for validation")
+ }
+
+ // Parse the issuer URL
+ issuerURL, err := url.Parse(r.Issuer)
+ if err != nil {
+ return fmt.Errorf("TenantDiscoveryResponse: failed to parse issuer URL: %w", err)
+ }
+
+ // Even if it doesn't match the authority, issuers from known and trusted hosts are valid
+ if aliases != nil && aliases[issuerURL.Host] {
+ return nil
+ }
+
+ // Parse the authority URL for comparison
+ authorityURL, err := url.Parse(authorityURI)
+ if err != nil {
+ return fmt.Errorf("TenantDiscoveryResponse: failed to parse authority URL: %w", err)
+ }
+
+ // Check if the scheme and host match (paths can be ignored when validating the issuer)
+ if issuerURL.Scheme == authorityURL.Scheme && issuerURL.Host == authorityURL.Host {
+ return nil
+ }
+
+ // If we get here, validation failed
+ return fmt.Errorf("TenantDiscoveryResponse: issuer from OIDC discovery '%s' does not match authority '%s' or a known pattern",
+ r.Issuer, authorityURI)
+}
+
type InstanceDiscoveryMetadata struct {
PreferredNetwork string `json:"preferred_network"`
PreferredCache string `json:"preferred_cache"`
@@ -354,6 +391,8 @@ type Info struct {
Tenant string
Region string
InstanceDiscoveryDisabled bool
+ // InstanceDiscoveryMetadata stores the metadata from AAD instance discovery
+ InstanceDiscoveryMetadata []InstanceDiscoveryMetadata
}
// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
index 4030ec8d8f1..d220a99466c 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
@@ -21,10 +21,12 @@ import (
type cacheEntry struct {
Endpoints authority.Endpoints
ValidForDomainsInList map[string]bool
+ // Aliases stores host aliases from instance discovery for quick lookup
+ Aliases map[string]bool
}
func createcacheEntry(endpoints authority.Endpoints) cacheEntry {
- return cacheEntry{endpoints, map[string]bool{}}
+ return cacheEntry{endpoints, map[string]bool{}, map[string]bool{}}
}
// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition.
@@ -71,10 +73,15 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo
m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints)
+ if err := resp.ValidateIssuerMatchesAuthority(authorityInfo.CanonicalAuthorityURI,
+ m.cache[authorityInfo.CanonicalAuthorityURI].Aliases); err != nil {
+ return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err)
+ }
+
return endpoints, nil
}
-// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false.
+// cachedEndpoints returns the cached endpoints if they exist. If not, we return false.
func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) {
m.mu.Lock()
defer m.mu.Unlock()
@@ -113,6 +120,13 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use
}
}
+ // Extract aliases from instance discovery metadata and add to cache
+ for _, metadata := range authorityInfo.InstanceDiscoveryMetadata {
+ for _, alias := range metadata.Aliases {
+ updatedCacheEntry.Aliases[alias] = true
+ }
+ }
+
m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry
}
@@ -127,12 +141,14 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut
if err != nil {
return "", err
}
+ authorityInfo.InstanceDiscoveryMetadata = resp.Metadata
return resp.TenantDiscoveryEndpoint, nil
} else if authorityInfo.Region != "" {
resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
if err != nil {
return "", err
}
+ authorityInfo.InstanceDiscoveryMetadata = resp.Metadata
return resp.TenantDiscoveryEndpoint, nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 1820ff0fba3..372d0f9839a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.39.2"
+const goModuleVersion = "1.39.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
index 993929bd9b7..4881ae1445b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
@@ -64,6 +64,11 @@ func (r *timeoutReadCloser) Close() error {
// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the
// response body so that a read that takes too long will return an error.
+//
+// Deprecated: This API was previously exposed to customize behavior of the
+// Kinesis service. That customization has been removed and this middleware's
+// implementation can cause panics within the standard library networking loop.
+// See #2752.
func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error {
return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 526537b8bb7..b6057249bdf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,25 @@
+# v1.31.17 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.31.16 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.15 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.14 (2025-10-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.13 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.31.12 (2025-09-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 8274236780f..e27cb6f4d8f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.31.12"
+const goModuleVersion = "1.31.17"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index 015f24d3be7..463dce685cb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,25 @@
+# v1.18.21 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.18.20 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.19 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.18 (2025-10-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.17 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.18.16 (2025-09-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index 03357b7603e..89e632cc24b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.16"
+const goModuleVersion = "1.18.21"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index 6b8c454739d..cfb9d77fe05 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v1.18.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.18.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.18.9 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index ce89f5829de..1af019aec27 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.9"
+const goModuleVersion = "1.18.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index b34f47c9151..0981931aa78 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v1.4.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.4.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.4.9 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index ebc2f6a7651..970e61deed8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.4.9"
+const goModuleVersion = "1.4.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
index 6ad5df64691..6ab4d9669fb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
@@ -386,6 +386,13 @@ var partitions = []Partition{
SupportsFIPS: nil,
SupportsDualStack: nil,
},
+ "us-isob-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
},
},
{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
index b346b0be9b9..c789264d2b0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
@@ -194,6 +194,9 @@
},
"us-isob-east-1" : {
"description" : "US ISOB East (Ohio)"
+ },
+ "us-isob-west-1" : {
+ "description" : "US ISOB West"
}
}
}, {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index 8de3bfec8c7..9c3aafe1da1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v2.7.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v2.7.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.7.9 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index c5168da33a3..9675feb419b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.7.9"
+const goModuleVersion = "2.7.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
index f729db535b7..4791d328c04 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.8.4 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+
# v1.8.3 (2025-02-18)
* **Bug Fix**: Bump go version to 1.22
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
index 00df0e3cb9b..f94970e7742 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
@@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.8.3"
+const goModuleVersion = "1.8.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
index 607fc092204..c05f82ea411 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.13.3 (2025-11-04)
+
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.13.2 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+
# v1.13.1 (2025-08-27)
* **Dependency Update**: Update to smithy-go v1.23.0.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
index 7a0b6aae29a..6a4c336055a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
@@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.1"
+const goModuleVersion = "1.13.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 6f143784e12..2021865dd0e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v1.13.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.13.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.13.9 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index bc347369d8b..9d29218c313 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.9"
+const goModuleVersion = "1.13.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index 4c5e39d8739..4cb0c9100b3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,22 @@
+# v1.30.1 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.30.0 (2025-10-30)
+
+* **Feature**: Update endpoint ruleset parameters casing
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.8 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.7 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.29.6 (2025-09-29)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
index 1a88fe4df8e..1499c0a9591 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
@@ -30,7 +30,7 @@
"types/types.go",
"validators.go"
],
- "go": "1.22",
+ "go": "1.23",
"module": "github.com/aws/aws-sdk-go-v2/service/sso",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 3628768ce40..c84f88075c5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.29.6"
+const goModuleVersion = "1.30.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index dc5e399a88b..ac8f4c476e5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v1.35.5 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.35.4 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.3 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.2 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.35.1 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
index f3b0b242acc..ee79b48eaa5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
@@ -31,7 +31,7 @@
"types/types.go",
"validators.go"
],
- "go": "1.22",
+ "go": "1.23",
"module": "github.com/aws/aws-sdk-go-v2/service/ssooidc",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index 765f6371dae..d882f41ddcc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.35.1"
+const goModuleVersion = "1.35.5"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 77183922d3c..9ae35f44991 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,26 @@
+# v1.39.1 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.39.0 (2025-10-30)
+
+* **Feature**: Update endpoint ruleset parameters casing
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.9 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.8 (2025-10-22)
+
+* No change notes available for this release.
+
+# v1.38.7 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.38.6 (2025-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
index 86bb3b79be4..935307771e2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
@@ -37,7 +37,7 @@
"types/types.go",
"validators.go"
],
- "go": "1.22",
+ "go": "1.23",
"module": "github.com/aws/aws-sdk-go-v2/service/sts",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index dd0eacf56c2..a4dbe82e4e2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.38.6"
+const goModuleVersion = "1.39.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
index 1dc87dd6bf1..1ec1ecf6525 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -430,6 +430,9 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "us-isob-east-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isob-west-1",
+ }: endpoints.Endpoint{},
},
},
{
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
index 8b6ab295004..8193f4b3964 100644
--- a/vendor/github.com/aws/smithy-go/CHANGELOG.md
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -1,3 +1,25 @@
+# Release (2025-11-03)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.23.2
+ * **Bug Fix**: Adjust the initial sizes of each middleware phase to avoid some unnecessary reallocation.
+ * **Bug Fix**: Avoid unnecessary allocation overhead from the metrics system when not in use.
+
+# Release (2025-10-15)
+
+## General Highlights
+* **Dependency Update**: Bump minimum go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# Release (2025-09-18)
+
+## Module Highlights
+* `github.com/aws/smithy-go/aws-http-auth`: [v1.1.0](aws-http-auth/CHANGELOG.md#v110-2025-09-18)
+ * **Feature**: Added support for SIG4/SIGV4A querystring authentication.
+
# Release (2025-08-27)
## General Highlights
diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile
index 34b17ab2fe0..a12b124d505 100644
--- a/vendor/github.com/aws/smithy-go/Makefile
+++ b/vendor/github.com/aws/smithy-go/Makefile
@@ -13,6 +13,7 @@ REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${R
REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_EACHMODULE = ${REPOTOOLS_MODULE}/cmd/eachmodule@${REPOTOOLS_VERSION}
UNIT_TEST_TAGS=
BUILD_TAGS=
@@ -55,8 +56,11 @@ ensure-gradle-up:
verify: vet
-vet:
- go vet ${BUILD_TAGS} --all ./...
+vet: vet-modules-.
+
+vet-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst vet-modules-,,$@) \
+ "go vet ${BUILD_TAGS} --all ./..."
cover:
go test ${BUILD_TAGS} -coverprofile c.out ./...
@@ -66,21 +70,22 @@ cover:
################
# Unit Testing #
################
-.PHONY: unit unit-race unit-test unit-race-test
+.PHONY: test unit unit-race
+
+test: unit-race
+
+unit: verify unit-modules-.
-unit: verify
- go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
- go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+unit-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-modules-,,$@) \
+ "go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
-unit-race: verify
- go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
- go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
+unit-race: verify unit-race-modules-.
-unit-test: verify
- go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+unit-race-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-race-modules-,,$@) \
+ "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
-unit-race-test: verify
- go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
#####################
# Release Process #
diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md
index 77a74ae0c2c..ddce37b99ef 100644
--- a/vendor/github.com/aws/smithy-go/README.md
+++ b/vendor/github.com/aws/smithy-go/README.md
@@ -4,7 +4,7 @@
[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime.
-The smithy-go runtime requires a minimum version of Go 1.22.
+The smithy-go runtime requires a minimum version of Go 1.23.
**WARNING: All interfaces are subject to change.**
@@ -80,7 +80,7 @@ example created from `smithy init`:
"service": "example.weather#Weather",
"module": "github.com/example/weather",
"generateGoMod": true,
- "goDirective": "1.22"
+ "goDirective": "1.23"
}
}
}
diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go
index 945db0af309..263059014b8 100644
--- a/vendor/github.com/aws/smithy-go/go_module_metadata.go
+++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go
@@ -3,4 +3,4 @@
package smithy
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.23.0"
+const goModuleVersion = "1.23.2"
diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go
index fb374e1fb85..444126df5a0 100644
--- a/vendor/github.com/aws/smithy-go/metrics/nop.go
+++ b/vendor/github.com/aws/smithy-go/metrics/nop.go
@@ -9,54 +9,82 @@ var _ MeterProvider = (*NopMeterProvider)(nil)
// Meter returns a meter which creates no-op instruments.
func (NopMeterProvider) Meter(string, ...MeterOption) Meter {
- return nopMeter{}
+ return NopMeter{}
}
-type nopMeter struct{}
+// NopMeter creates no-op instruments.
+type NopMeter struct{}
-var _ Meter = (*nopMeter)(nil)
+var _ Meter = (*NopMeter)(nil)
-func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) {
- return nopInstrument[int64]{}, nil
+// Int64Counter creates a no-op instrument.
+func (NopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64UpDownCounter creates a no-op instrument.
+func (NopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64Gauge creates a no-op instrument.
+func (NopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64Histogram creates a no-op instrument.
+func (NopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64AsyncCounter creates a no-op instrument.
+func (NopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64AsyncUpDownCounter creates a no-op instrument.
+func (NopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[int64]{}, nil
+
+// Int64AsyncGauge creates a no-op instrument.
+func (NopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
}
-func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64Counter creates a no-op instrument.
+func (NopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64UpDownCounter creates a no-op instrument.
+func (NopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64Gauge creates a no-op instrument.
+func (NopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64Histogram creates a no-op instrument.
+func (NopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64AsyncCounter creates a no-op instrument.
+func (NopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64AsyncUpDownCounter creates a no-op instrument.
+func (NopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
}
-func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
- return nopInstrument[float64]{}, nil
+
+// Float64AsyncGauge creates a no-op instrument.
+func (NopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
}
type nopInstrument[N any] struct{}
@@ -65,3 +93,6 @@ func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {}
func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {}
func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {}
func (nopInstrument[_]) Stop() {}
+
+var nopInstrumentInt64 = nopInstrument[int64]{}
+var nopInstrumentFloat64 = nopInstrument[float64]{}
diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
index 4b195308c59..daf90136e96 100644
--- a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
+++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
@@ -23,12 +23,14 @@ type orderedIDs struct {
items map[string]ider
}
-const baseOrderedItems = 5
+// selected based on the general upper bound of # of middlewares in each step
+// in the downstream aws-sdk-go-v2
+const baseOrderedItems = 8
-func newOrderedIDs() *orderedIDs {
+func newOrderedIDs(cap int) *orderedIDs {
return &orderedIDs{
- order: newRelativeOrder(),
- items: make(map[string]ider, baseOrderedItems),
+ order: newRelativeOrder(cap),
+ items: make(map[string]ider, cap),
}
}
@@ -141,9 +143,9 @@ type relativeOrder struct {
order []string
}
-func newRelativeOrder() *relativeOrder {
+func newRelativeOrder(cap int) *relativeOrder {
return &relativeOrder{
- order: make([]string, 0, baseOrderedItems),
+ order: make([]string, 0, cap),
}
}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go
index 7e1d94caeef..cc7fe89c94a 100644
--- a/vendor/github.com/aws/smithy-go/middleware/step_build.go
+++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go
@@ -79,7 +79,7 @@ type BuildStep struct {
// initialization added to it.
func NewBuildStep() *BuildStep {
return &BuildStep{
- ids: newOrderedIDs(),
+ ids: newOrderedIDs(baseOrderedItems),
}
}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
index 44860721571..9a6679a59b3 100644
--- a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
+++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
@@ -85,7 +85,8 @@ type DeserializeStep struct {
// initialization added to it.
func NewDeserializeStep() *DeserializeStep {
return &DeserializeStep{
- ids: newOrderedIDs(),
+ // downstream SDK typically has larger Deserialize step
+ ids: newOrderedIDs(baseOrderedItems * 2),
}
}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
index 065e3885de9..76eab249093 100644
--- a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
+++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
@@ -79,7 +79,8 @@ type FinalizeStep struct {
// initialization added to it.
func NewFinalizeStep() *FinalizeStep {
return &FinalizeStep{
- ids: newOrderedIDs(),
+ // downstream SDK typically has larger Finalize step
+ ids: newOrderedIDs(baseOrderedItems * 2),
}
}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
index fe359144d24..312be3a331e 100644
--- a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
+++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
@@ -79,7 +79,7 @@ type InitializeStep struct {
// initialization added to it.
func NewInitializeStep() *InitializeStep {
return &InitializeStep{
- ids: newOrderedIDs(),
+ ids: newOrderedIDs(baseOrderedItems),
}
}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
index 114bafcedea..a4ce4bee3b7 100644
--- a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
+++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
@@ -85,7 +85,7 @@ type SerializeStep struct {
// serialize the input parameters into.
func NewSerializeStep(newRequest func() interface{}) *SerializeStep {
return &SerializeStep{
- ids: newOrderedIDs(),
+ ids: newOrderedIDs(baseOrderedItems),
newRequest: newRequest,
}
}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
index d1beaa595d9..b4cd4a47e36 100644
--- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go
+++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
@@ -17,6 +17,12 @@ var now = time.Now
func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) (
context.Context, ClientDo, error,
) {
+ // WithClientTrace is an expensive operation - avoid calling it if we're
+ // not actually using a metrics sink.
+ if _, ok := meter.(metrics.NopMeter); ok {
+ return parent, client, nil
+ }
+
hm, err := newHTTPMetrics(meter)
if err != nil {
return nil, nil, err
diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go
index c1f3e442d36..79d425e8746 100644
--- a/vendor/github.com/cenkalti/backoff/v5/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go
@@ -1,7 +1,7 @@
package backoff
import (
- "math/rand"
+ "math/rand/v2"
"time"
)
@@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
+Example: Given the following default arguments, for 9 tries the sequence will be:
Request # RetryInterval (seconds) Randomized Interval (seconds)
@@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try:
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go
index e43f47fb8a5..32a7f988347 100644
--- a/vendor/github.com/cenkalti/backoff/v5/retry.go
+++ b/vendor/github.com/cenkalti/backoff/v5/retry.go
@@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption {
}
}
-// WithMaxTries limits the number of retry attempts.
+// WithMaxTries limits the number of all attempts.
func WithMaxTries(n uint) RetryOption {
return func(args *retryOptions) {
args.MaxTries = n
@@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti
// Handle permanent errors without retrying.
var permanent *PermanentError
if errors.As(err, &permanent) {
- return res, err
+ return res, permanent.Unwrap()
}
// Stop retrying if context is cancelled.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
index ff9c57e1d84..b3178e7519b 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -155,7 +155,7 @@ stored in base64 encoded form, which was redundant with the information in the
type Token struct {
Raw string // Raw contains the raw token
Method SigningMethod // Method is the signing method used or to be used
- Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Header map[string]any // Header is the first segment of the token in decoded form
Claims Claims // Claims is the second segment of the token in decoded form
Signature []byte // Signature is the third segment of the token in decoded form
Valid bool // Valid specifies if the token is valid
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
index c929e4a02fc..06cd94d2329 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -55,7 +55,7 @@ func (m *SigningMethodECDSA) Alg() string {
// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an ecdsa.PublicKey struct
-func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key any) error {
// Get the key
var ecdsaKey *ecdsa.PublicKey
switch k := key.(type) {
@@ -89,7 +89,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf
// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an ecdsa.PrivateKey struct
-func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *SigningMethodECDSA) Sign(signingString string, key any) ([]byte, error) {
// Get the key
var ecdsaKey *ecdsa.PrivateKey
switch k := key.(type) {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
index 5700636d35b..44a3b7a1c23 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -23,7 +23,7 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
}
// Parse the key
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
@@ -50,7 +50,7 @@ func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
}
// Parse the key
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
index c2138119e51..4159e57bfef 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -33,7 +33,7 @@ func (m *SigningMethodEd25519) Alg() string {
// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an ed25519.PublicKey
-func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key any) error {
var ed25519Key ed25519.PublicKey
var ok bool
@@ -55,7 +55,7 @@ func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key inte
// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an ed25519.PrivateKey
-func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *SigningMethodEd25519) Sign(signingString string, key any) ([]byte, error) {
var ed25519Key crypto.Signer
var ok bool
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
index cdb5e68e876..6f46e88605f 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -24,7 +24,7 @@ func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
}
// Parse the key
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
@@ -49,7 +49,7 @@ func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
}
// Parse the key
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
return nil, err
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
index 23bb616ddde..14e007516a0 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/errors.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -2,6 +2,7 @@ package jwt
import (
"errors"
+ "fmt"
"strings"
)
@@ -47,3 +48,42 @@ func joinErrors(errs ...error) error {
errs: errs,
}
}
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
deleted file mode 100644
index a893d355e1a..00000000000
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
+++ /dev/null
@@ -1,47 +0,0 @@
-//go:build go1.20
-// +build go1.20
-
-package jwt
-
-import (
- "fmt"
-)
-
-// Unwrap implements the multiple error unwrapping for this error type, which is
-// possible in Go 1.20.
-func (je joinedError) Unwrap() []error {
- return je.errs
-}
-
-// newError creates a new error message with a detailed error message. The
-// message will be prefixed with the contents of the supplied error type.
-// Additionally, more errors, that provide more context can be supplied which
-// will be appended to the message. This makes use of Go 1.20's possibility to
-// include more than one %w formatting directive in [fmt.Errorf].
-//
-// For example,
-//
-// newError("no keyfunc was provided", ErrTokenUnverifiable)
-//
-// will produce the error string
-//
-// "token is unverifiable: no keyfunc was provided"
-func newError(message string, err error, more ...error) error {
- var format string
- var args []any
- if message != "" {
- format = "%w: %s"
- args = []any{err, message}
- } else {
- format = "%w"
- args = []any{err}
- }
-
- for _, e := range more {
- format += ": %w"
- args = append(args, e)
- }
-
- err = fmt.Errorf(format, args...)
- return err
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
deleted file mode 100644
index 2ad542f00ca..00000000000
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
+++ /dev/null
@@ -1,78 +0,0 @@
-//go:build !go1.20
-// +build !go1.20
-
-package jwt
-
-import (
- "errors"
- "fmt"
-)
-
-// Is implements checking for multiple errors using [errors.Is], since multiple
-// error unwrapping is not possible in versions less than Go 1.20.
-func (je joinedError) Is(err error) bool {
- for _, e := range je.errs {
- if errors.Is(e, err) {
- return true
- }
- }
-
- return false
-}
-
-// wrappedErrors is a workaround for wrapping multiple errors in environments
-// where Go 1.20 is not available. It basically uses the already implemented
-// functionality of joinedError to handle multiple errors with supplies a
-// custom error message that is identical to the one we produce in Go 1.20 using
-// multiple %w directives.
-type wrappedErrors struct {
- msg string
- joinedError
-}
-
-// Error returns the stored error string
-func (we wrappedErrors) Error() string {
- return we.msg
-}
-
-// newError creates a new error message with a detailed error message. The
-// message will be prefixed with the contents of the supplied error type.
-// Additionally, more errors, that provide more context can be supplied which
-// will be appended to the message. Since we cannot use of Go 1.20's possibility
-// to include more than one %w formatting directive in [fmt.Errorf], we have to
-// emulate that.
-//
-// For example,
-//
-// newError("no keyfunc was provided", ErrTokenUnverifiable)
-//
-// will produce the error string
-//
-// "token is unverifiable: no keyfunc was provided"
-func newError(message string, err error, more ...error) error {
- // We cannot wrap multiple errors here with %w, so we have to be a little
- // bit creative. Basically, we are using %s instead of %w to produce the
- // same error message and then throw the result into a custom error struct.
- var format string
- var args []any
- if message != "" {
- format = "%s: %s"
- args = []any{err, message}
- } else {
- format = "%s"
- args = []any{err}
- }
- errs := []error{err}
-
- for _, e := range more {
- format += ": %s"
- args = append(args, e)
- errs = append(errs, e)
- }
-
- err = &wrappedErrors{
- msg: fmt.Sprintf(format, args...),
- joinedError: joinedError{errs: errs},
- }
- return err
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
index aca600ce1b0..1bef138c38e 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -55,7 +55,7 @@ func (m *SigningMethodHMAC) Alg() string {
// about this, and why we intentionally are not supporting string as a key can
// be found on our usage guide
// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
-func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key any) error {
// Verify the key is the right type
keyBytes, ok := key.([]byte)
if !ok {
@@ -88,7 +88,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa
// cryptographically random source, e.g. crypto/rand. Additional information
// about this, and why we intentionally are not supporting string as a key can
// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
-func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *SigningMethodHMAC) Sign(signingString string, key any) ([]byte, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return nil, ErrHashUnavailable
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
index b2b51a1f806..3b9205272f0 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -5,9 +5,9 @@ import (
"fmt"
)
-// MapClaims is a claims type that uses the map[string]interface{} for JSON
+// MapClaims is a claims type that uses the map[string]any for JSON
// decoding. This is the default claims type if you don't supply one
-type MapClaims map[string]interface{}
+type MapClaims map[string]any
// GetExpirationTime implements the Claims interface.
func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
@@ -73,7 +73,7 @@ func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
cs = append(cs, v)
case []string:
cs = v
- case []interface{}:
+ case []any:
for _, a := range v {
vs, ok := a.(string)
if !ok {
@@ -92,7 +92,7 @@ func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
func (m MapClaims) parseString(key string) (string, error) {
var (
ok bool
- raw interface{}
+ raw any
iss string
)
raw, ok = m[key]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
index 685c2ea3065..624ad55e878 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/none.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -25,7 +25,7 @@ func (m *signingMethodNone) Alg() string {
}
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) {
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key any) (err error) {
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
// accepting 'none' signing method
if _, ok := key.(unsafeNoneMagicConstant); !ok {
@@ -41,7 +41,7 @@ func (m *signingMethodNone) Verify(signingString string, sig []byte, key interfa
}
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *signingMethodNone) Sign(signingString string, key any) ([]byte, error) {
if _, ok := key.(unsafeNoneMagicConstant); ok {
return []byte{}, nil
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
index 88a780fbd4a..431573557b1 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -66,20 +66,37 @@ func WithExpirationRequired() ParserOption {
}
}
-// WithAudience configures the validator to require the specified audience in
-// the `aud` claim. Validation will fail if the audience is not listed in the
-// token or the `aud` claim is missing.
+// WithAudience configures the validator to require any of the specified
+// audiences in the `aud` claim. Validation will fail if the audience is not
+// listed in the token or the `aud` claim is missing.
//
// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
// application-specific. Since this validation API is helping developers in
// writing secure application, we decided to REQUIRE the existence of the claim,
// if an audience is expected.
-func WithAudience(aud string) ParserOption {
+func WithAudience(aud ...string) ParserOption {
return func(p *Parser) {
p.validator.expectedAud = aud
}
}
+// WithAllAudiences configures the validator to require all the specified
+// audiences in the `aud` claim. Validation will fail if the specified audiences
+// are not listed in the token or the `aud` claim is missing. Duplicates within
+// the list are de-duplicated since internally, we use a map to look up the
+// audiences.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAllAudiences(aud ...string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ p.validator.expectAllAud = true
+ }
+}
+
// WithIssuer configures the validator to require the specified issuer in the
// `iss` claim. Validation will fail if a different issuer is specified in the
// token or the `iss` claim is missing.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
index 83cbee6ae2b..98b960a783b 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -46,7 +46,7 @@ func (m *SigningMethodRSA) Alg() string {
// Verify implements token verification for the SigningMethod
// For this signing method, must be an *rsa.PublicKey structure.
-func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key any) error {
var rsaKey *rsa.PublicKey
var ok bool
@@ -67,7 +67,7 @@ func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interfac
// Sign implements token signing for the SigningMethod
// For this signing method, must be an *rsa.PrivateKey structure.
-func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *SigningMethodRSA) Sign(signingString string, key any) ([]byte, error) {
var rsaKey *rsa.PrivateKey
var ok bool
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
index 28c386ec43a..f17590cc4a1 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -1,6 +1,3 @@
-//go:build go1.4
-// +build go1.4
-
package jwt
import (
@@ -82,7 +79,7 @@ func init() {
// Verify implements token verification for the SigningMethod.
// For this verify method, key must be an rsa.PublicKey struct
-func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key any) error {
var rsaKey *rsa.PublicKey
switch k := key.(type) {
case *rsa.PublicKey:
@@ -108,7 +105,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key inter
// Sign implements token signing for the SigningMethod.
// For this signing method, key must be an rsa.PrivateKey struct
-func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
+func (m *SigningMethodRSAPSS) Sign(signingString string, key any) ([]byte, error) {
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
index b3aeebbe110..f22c3d0685f 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -23,7 +23,7 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
return nil, ErrKeyMustBePEMEncoded
}
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
@@ -53,7 +53,7 @@ func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.Pr
return nil, ErrKeyMustBePEMEncoded
}
- var parsedKey interface{}
+ var parsedKey any
var blockDecrypted []byte
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
@@ -86,7 +86,7 @@ func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
}
// Parse the key
- var parsedKey interface{}
+ var parsedKey any
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
index 0d73631c1bf..096d0ed4ca6 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -12,9 +12,9 @@ var signingMethodLock = new(sync.RWMutex)
// signature in Sign. The signature is then usually base64 encoded as part of a
// JWT.
type SigningMethod interface {
- Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid
- Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error
- Alg() string // returns the alg identifier for this method (example: 'HS256')
+ Verify(signingString string, sig []byte, key any) error // Returns nil if signature is valid
+ Sign(signingString string, key any) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
}
// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
index 9c7f4ab010c..3f71558888a 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/token.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -11,9 +11,9 @@ import (
// Token. This allows you to use properties in the Header of the token (such as
// `kid`) to identify which key to use.
//
-// The returned interface{} may be a single key or a VerificationKeySet containing
+// The returned any may be a single key or a VerificationKeySet containing
// multiple keys.
-type Keyfunc func(*Token) (interface{}, error)
+type Keyfunc func(*Token) (any, error)
// VerificationKey represents a public or secret key for verifying a token's signature.
type VerificationKey interface {
@@ -28,12 +28,12 @@ type VerificationKeySet struct {
// Token represents a JWT Token. Different fields will be used depending on
// whether you're creating or parsing/verifying a token.
type Token struct {
- Raw string // Raw contains the raw token. Populated when you [Parse] a token
- Method SigningMethod // Method is the signing method used or to be used
- Header map[string]interface{} // Header is the first segment of the token in decoded form
- Claims Claims // Claims is the second segment of the token in decoded form
- Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
- Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]any // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
}
// New creates a new [Token] with the specified signing method and an empty map
@@ -46,7 +46,7 @@ func New(method SigningMethod, opts ...TokenOption) *Token {
// claims. Additional options can be specified, but are currently unused.
func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
return &Token{
- Header: map[string]interface{}{
+ Header: map[string]any{
"typ": "JWT",
"alg": method.Alg(),
},
@@ -60,7 +60,7 @@ func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *To
// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
// for an overview of the different signing methods and their respective key
// types.
-func (t *Token) SignedString(key interface{}) (string, error) {
+func (t *Token) SignedString(key any) (string, error) {
sstr, err := t.SigningString()
if err != nil {
return "", err
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
index b2655a9e6d2..a3e0ef12122 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/types.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -103,7 +103,7 @@ func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
type ClaimStrings []string
func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
- var value interface{}
+ var value any
if err = json.Unmarshal(data, &value); err != nil {
return err
@@ -116,7 +116,7 @@ func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
aud = append(aud, v)
case []string:
aud = ClaimStrings(v)
- case []interface{}:
+ case []any:
for _, vv := range v {
vs, ok := vv.(string)
if !ok {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
index 008ecd8712e..92b5c057cd2 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/validator.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -1,8 +1,8 @@
package jwt
import (
- "crypto/subtle"
"fmt"
+ "slices"
"time"
)
@@ -52,8 +52,12 @@ type Validator struct {
verifyIat bool
// expectedAud contains the audience this token expects. Supplying an empty
- // string will disable aud checking.
- expectedAud string
+ // slice will disable aud checking.
+ expectedAud []string
+
+ // expectAllAud specifies whether all expected audiences must be present in
+ // the token. If false, only one of the expected audiences must be present.
+ expectAllAud bool
// expectedIss contains the issuer this token expects. Supplying an empty
// string will disable iss checking.
@@ -88,7 +92,7 @@ func NewValidator(opts ...ParserOption) *Validator {
func (v *Validator) Validate(claims Claims) error {
var (
now time.Time
- errs []error = make([]error, 0, 6)
+ errs = make([]error, 0, 6)
err error
)
@@ -120,8 +124,8 @@ func (v *Validator) Validate(claims Claims) error {
}
// If we have an expected audience, we also require the audience claim
- if v.expectedAud != "" {
- if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
+ if len(v.expectedAud) > 0 {
+ if err = v.verifyAudience(claims, v.expectedAud, v.expectAllAud); err != nil {
errs = append(errs, err)
}
}
@@ -226,33 +230,39 @@ func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool)
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error {
+func (v *Validator) verifyAudience(claims Claims, cmp []string, expectAllAud bool) error {
aud, err := claims.GetAudience()
if err != nil {
return err
}
- if len(aud) == 0 {
+ // Check that aud exists and is not empty. We only require the aud claim
+ // if we expect at least one audience to be present.
+ if len(aud) == 0 || len(aud) == 1 && aud[0] == "" {
+ required := len(v.expectedAud) > 0
return errorIfRequired(required, "aud")
}
- // use a var here to keep constant time compare when looping over a number of claims
- result := false
-
- var stringClaims string
- for _, a := range aud {
- if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
- result = true
+ if !expectAllAud {
+ for _, a := range aud {
+ // If we only expect one match, we can stop early if we find a match
+ if slices.Contains(cmp, a) {
+ return nil
+ }
}
- stringClaims = stringClaims + a
+
+ return ErrTokenInvalidAudience
}
- // case where "" is sent in one or many aud claims
- if stringClaims == "" {
- return errorIfRequired(required, "aud")
+ // Note that we are looping cmp here to ensure that all expected audiences
+ // are present in the aud claim.
+ for _, a := range cmp {
+ if !slices.Contains(aud, a) {
+ return ErrTokenInvalidAudience
+ }
}
- return errorIfFalse(result, ErrTokenInvalidAudience)
+ return nil
}
// verifyIssuer compares the iss claim in claims against cmp.
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
index ba4d746407c..8a51690be44 100644
--- a/vendor/github.com/google/pprof/profile/merge.go
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -17,6 +17,7 @@ package profile
import (
"encoding/binary"
"fmt"
+ "slices"
"sort"
"strconv"
"strings"
@@ -78,12 +79,10 @@ func Merge(srcs []*Profile) (*Profile, error) {
}
}
- for _, s := range p.Sample {
- if isZeroSample(s) {
- // If there are any zero samples, re-merge the profile to GC
- // them.
- return Merge([]*Profile{p})
- }
+ if slices.ContainsFunc(p.Sample, isZeroSample) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
}
return p, nil
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
index f47a243903e..43f561d4455 100644
--- a/vendor/github.com/google/pprof/profile/profile.go
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -24,6 +24,7 @@ import (
"math"
"path/filepath"
"regexp"
+ "slices"
"sort"
"strings"
"sync"
@@ -734,12 +735,7 @@ func (p *Profile) RemoveLabel(key string) {
// HasLabel returns true if a sample has a label with indicated key and value.
func (s *Sample) HasLabel(key, value string) bool {
- for _, v := range s.Label[key] {
- if v == value {
- return true
- }
- }
- return false
+ return slices.Contains(s.Label[key], value)
}
// SetNumLabel sets the specified key to the specified value for all samples in the
@@ -852,7 +848,17 @@ func (p *Profile) HasFileLines() bool {
// "[vdso]", "[vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File)
- return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
+ switch {
+ case strings.HasPrefix(name, "["):
+ case strings.HasPrefix(name, "linux-vdso"):
+ case strings.HasPrefix(m.File, "/dev/dri/"):
+ case m.File == "//anon":
+ case m.File == "":
+ case strings.HasPrefix(m.File, "/memfd:"):
+ default:
+ return false
+ }
+ return true
}
// Copy makes a fully independent copy of a profile.
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
index a15696ba16f..31bf6bca63e 100644
--- a/vendor/github.com/google/pprof/profile/proto.go
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -36,6 +36,7 @@ package profile
import (
"errors"
"fmt"
+ "slices"
)
type buffer struct {
@@ -187,6 +188,16 @@ func le32(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
}
+func peekNumVarints(data []byte) (numVarints int) {
+ for ; len(data) > 0; numVarints++ {
+ var err error
+ if _, data, err = decodeVarint(data); err != nil {
+ break
+ }
+ }
+ return numVarints
+}
+
func decodeVarint(data []byte) (uint64, []byte, error) {
var u uint64
for i := 0; ; i++ {
@@ -286,6 +297,9 @@ func decodeInt64(b *buffer, x *int64) error {
func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 {
// Packed encoding
+ dataLen := peekNumVarints(b.data)
+ *x = slices.Grow(*x, dataLen)
+
data := b.data
for len(data) > 0 {
var u uint64
@@ -316,8 +330,11 @@ func decodeUint64(b *buffer, x *uint64) error {
func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 {
- data := b.data
// Packed encoding
+ dataLen := peekNumVarints(b.data)
+ *x = slices.Grow(*x, dataLen)
+
+ data := b.data
for len(data) > 0 {
var u uint64
var err error
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
index b2f9fd54660..7bba31e8cea 100644
--- a/vendor/github.com/google/pprof/profile/prune.go
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -19,6 +19,7 @@ package profile
import (
"fmt"
"regexp"
+ "slices"
"strings"
)
@@ -40,13 +41,7 @@ func simplifyFunc(f string) string {
// Account for unsimplified names -- try to remove the argument list by trimming
// starting from the first '(', but skipping reserved names that have '('.
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
- foundReserved := false
- for _, res := range reservedNames {
- if funcName[ind[0]:ind[1]] == res {
- foundReserved = true
- break
- }
- }
+ foundReserved := slices.Contains(reservedNames, funcName[ind[0]:ind[1]])
if !foundReserved {
funcName = funcName[:ind[0]]
break
diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
index 846e3ece818..2fcff6e2731 100644
--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
+++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- "v2": "2.14.2"
+ "v2": "2.15.0"
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
index a7fe145a433..fec6b1da9ed 100644
--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
+++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
@@ -1,5 +1,12 @@
# Changelog
+## [2.15.0](https://github.com/googleapis/gax-go/compare/v2.14.2...v2.15.0) (2025-07-09)
+
+
+### Features
+
+* **apierror:** improve gRPC status code mapping for HTTP errors ([#431](https://github.com/googleapis/gax-go/issues/431)) ([c207f2a](https://github.com/googleapis/gax-go/commit/c207f2a19ab91d3baee458b57d4aa992519025c7))
+
## [2.14.2](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.14.2) (2025-05-12)
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
index 7de60773d63..90a40d29c15 100644
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
+++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
@@ -38,6 +38,7 @@ package apierror
import (
"errors"
"fmt"
+ "net/http"
"strings"
jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
@@ -49,6 +50,39 @@ import (
"google.golang.org/protobuf/proto"
)
+// canonicalMap maps HTTP codes to gRPC status code equivalents.
+var canonicalMap = map[int]codes.Code{
+ http.StatusOK: codes.OK,
+ http.StatusBadRequest: codes.InvalidArgument,
+ http.StatusForbidden: codes.PermissionDenied,
+ http.StatusNotFound: codes.NotFound,
+ http.StatusConflict: codes.Aborted,
+ http.StatusRequestedRangeNotSatisfiable: codes.OutOfRange,
+ http.StatusTooManyRequests: codes.ResourceExhausted,
+ http.StatusGatewayTimeout: codes.DeadlineExceeded,
+ http.StatusNotImplemented: codes.Unimplemented,
+ http.StatusServiceUnavailable: codes.Unavailable,
+ http.StatusUnauthorized: codes.Unauthenticated,
+}
+
+// toCode maps an http code to the most correct equivalent.
+func toCode(httpCode int) codes.Code {
+ if sCode, ok := canonicalMap[httpCode]; ok {
+ return sCode
+ }
+ switch {
+ case httpCode >= 200 && httpCode < 300:
+ return codes.OK
+
+ case httpCode >= 400 && httpCode < 500:
+ return codes.FailedPrecondition
+
+ case httpCode >= 500 && httpCode < 600:
+ return codes.Internal
+ }
+ return codes.Unknown
+}
+
// ErrDetails holds the google/rpc/error_details.proto messages.
type ErrDetails struct {
ErrorInfo *errdetails.ErrorInfo
@@ -217,6 +251,11 @@ func (a *APIError) Error() string {
// GRPCStatus extracts the underlying gRPC Status error.
// This method is necessary to fulfill the interface
// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError.
+//
+// For errors that originated as an HTTP-based googleapi.Error, GRPCStatus()
+// returns a status that attempts to map from the original HTTP code to an
+// equivalent gRPC status code. For use cases where you want to avoid this
+// behavior, error unwrapping can be used.
func (a *APIError) GRPCStatus() *status.Status {
return a.status
}
@@ -243,9 +282,9 @@ func (a *APIError) Metadata() map[string]string {
// setDetailsFromError parses a Status error or a googleapi.Error
// and sets status and details or httpErr and details, respectively.
// It returns false if neither Status nor googleapi.Error can be parsed.
-// When err is a googleapi.Error, the status of the returned error will
-// be set to an Unknown error, rather than nil, since a nil code is
-// interpreted as OK in the gRPC status package.
+//
+// When err is a googleapi.Error, the status of the returned error will be
+// mapped to the closest equivalent gGRPC status code.
func (a *APIError) setDetailsFromError(err error) bool {
st, isStatus := status.FromError(err)
var herr *googleapi.Error
@@ -258,7 +297,7 @@ func (a *APIError) setDetailsFromError(err error) bool {
case isHTTPErr:
a.httpErr = herr
a.details = parseHTTPDetails(herr)
- a.status = status.New(codes.Unknown, herr.Message)
+ a.status = status.New(toCode(a.httpErr.Code), herr.Message)
default:
return false
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
index e272d4d720c..0ab1bce59cc 100644
--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go
+++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
@@ -30,4 +30,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "2.14.2"
+const Version = "2.15.0"
diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go
index 53cbd958394..96e360661b9 100644
--- a/vendor/github.com/grafana/regexp/onepass.go
+++ b/vendor/github.com/grafana/regexp/onepass.go
@@ -465,12 +465,20 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText {
return nil
}
- // every instruction leading to InstMatch must be EmptyEndText
+ hasAlt := false
+ for _, inst := range prog.Inst {
+ if inst.Op == syntax.InstAlt || inst.Op == syntax.InstAltMatch {
+ hasAlt = true
+ break
+ }
+ }
+ // If we have alternates, every instruction leading to InstMatch must be EmptyEndText.
+ // Also, any match on empty text must be $.
for _, inst := range prog.Inst {
opOut := prog.Inst[inst.Out].Op
switch inst.Op {
default:
- if opOut == syntax.InstMatch {
+ if opOut == syntax.InstMatch && hasAlt {
return nil
}
case syntax.InstAlt, syntax.InstAltMatch:
diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go
index d1218ad0e87..253415fb6a4 100644
--- a/vendor/github.com/grafana/regexp/regexp.go
+++ b/vendor/github.com/grafana/regexp/regexp.go
@@ -14,10 +14,7 @@
// guaranteed to run in time linear in the size of the input.
// (This is a property not guaranteed by most open source
// implementations of regular expressions.) For more information
-// about this property, see
-//
-// https://swtch.com/~rsc/regexp/regexp1.html
-//
+// about this property, see https://swtch.com/~rsc/regexp/regexp1.html
// or any book about automata theory.
//
// All characters are UTF-8-encoded code points.
@@ -54,14 +51,13 @@
// subexpression did not match any string in the input. For 'String' versions
// an empty string means either no match or an empty match.
//
-// There is also a subset of the methods that can be applied to text read
-// from a RuneReader:
-//
-// MatchReader, FindReaderIndex, FindReaderSubmatchIndex
+// There is also a subset of the methods that can be applied to text read from
+// an [io.RuneReader]: [Regexp.MatchReader], [Regexp.FindReaderIndex],
+// [Regexp.FindReaderSubmatchIndex].
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
-// match text from a RuneReader may read arbitrarily far into the input
+// match text from an [io.RuneReader] may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
@@ -537,7 +533,7 @@ func (re *Regexp) Match(b []byte) bool {
return re.doMatch(nil, b, "")
}
-// MatchReader reports whether the text returned by the RuneReader
+// MatchReader reports whether the text returned by the [io.RuneReader]
// contains any match of the regular expression pattern.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
@@ -1244,10 +1240,9 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
// // s: ["", "b", "b", "c", "cadaaae"]
//
// The count determines the number of substrings to return:
-//
-// n > 0: at most n substrings; the last substring will be the unsplit remainder.
-// n == 0: the result is nil (zero substrings)
-// n < 0: all substrings
+// - n > 0: at most n substrings; the last substring will be the unsplit remainder;
+// - n == 0: the result is nil (zero substrings);
+// - n < 0: all substrings.
func (re *Regexp) Split(s string, n int) []string {
if n == 0 {
@@ -1282,14 +1277,22 @@ func (re *Regexp) Split(s string, n int) []string {
return strings
}
-// MarshalText implements [encoding.TextMarshaler]. The output
+// AppendText implements [encoding.TextAppender]. The output
// matches that of calling the [Regexp.String] method.
//
// Note that the output is lossy in some cases: This method does not indicate
// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or
// those for which the [Regexp.Longest] method has been called.
+func (re *Regexp) AppendText(b []byte) ([]byte, error) {
+ return append(b, re.String()...), nil
+}
+
+// MarshalText implements [encoding.TextMarshaler]. The output
+// matches that of calling the [Regexp.AppendText] method.
+//
+// See [Regexp.AppendText] for more information.
func (re *Regexp) MarshalText() ([]byte, error) {
- return []byte(re.String()), nil
+ return re.AppendText(nil)
}
// UnmarshalText implements [encoding.TextUnmarshaler] by calling
diff --git a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl
index 80a2c9ae6b9..fafa41cf2cf 100644
--- a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl
+++ b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl
@@ -11,7 +11,10 @@
# Perl about each letter from 0-128 and write down
# its answer.
-@posixclasses = (
+use strict;
+use warnings;
+
+my @posixclasses = (
"[:alnum:]",
"[:alpha:]",
"[:ascii:]",
@@ -28,13 +31,13 @@
"[:xdigit:]",
);
-@perlclasses = (
+my @perlclasses = (
"\\d",
"\\s",
"\\w",
);
-%overrides = (
+my %overrides = (
# Prior to Perl 5.18, \s did not match vertical tab.
# RE2 preserves that original behaviour.
"\\s:11" => 0,
@@ -70,7 +73,7 @@ ($$@)
}
print "}\n\n";
my $n = @ranges;
- $negname = $name;
+ my $negname = $name;
if ($negname =~ /:/) {
$negname =~ s/:/:^/;
} else {
@@ -97,13 +100,25 @@ ($@)
my $count = @entries;
}
+# Prepare gofmt command
+my $gofmt;
+
+if (@ARGV > 0 && $ARGV[0] =~ /\.go$/) {
+ # Send the output of gofmt to the given file
+ open($gofmt, '|-', 'gofmt >'.$ARGV[0]) or die;
+} else {
+ open($gofmt, '|-', 'gofmt') or die;
+}
+
+# Redirect STDOUT to gofmt input
+select $gofmt;
+
print <perl_groups.go
+// Code generated by make_perl_groups.pl; DO NOT EDIT.
package syntax
diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go
index 6ed6491c807..ed239dafdf3 100644
--- a/vendor/github.com/grafana/regexp/syntax/parse.go
+++ b/vendor/github.com/grafana/regexp/syntax/parse.go
@@ -621,7 +621,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
}
// Found end of a run with common leading literal string:
- // sub[start:i] all begin with str[0:len(str)], but sub[i]
+ // sub[start:i] all begin with str[:len(str)], but sub[i]
// does not even begin with str[0].
//
// Factor out common string and append factored expression to out.
@@ -941,9 +941,7 @@ func parse(s string, flags Flags) (_ *Regexp, err error) {
p.op(opLeftParen).Cap = p.numCap
t = t[1:]
case '|':
- if err = p.parseVerticalBar(); err != nil {
- return nil, err
- }
+ p.parseVerticalBar()
t = t[1:]
case ')':
if err = p.parseRightParen(); err != nil {
@@ -1328,7 +1326,7 @@ func matchRune(re *Regexp, r rune) bool {
}
// parseVerticalBar handles a | in the input.
-func (p *parser) parseVerticalBar() error {
+func (p *parser) parseVerticalBar() {
p.concat()
// The concatenation we just parsed is on top of the stack.
@@ -1338,8 +1336,6 @@ func (p *parser) parseVerticalBar() error {
if !p.swapVerticalBar() {
p.op(opVerticalBar)
}
-
- return nil
}
// mergeCharClass makes dst = dst|src.
@@ -1580,6 +1576,8 @@ type charGroup struct {
class []rune
}
+//go:generate perl make_perl_groups.pl perl_groups.go
+
// parsePerlClassEscape parses a leading Perl character class escape like \d
// from the beginning of s. If one is present, it appends the characters to r
// and returns the new slice r and the remainder of the string.
diff --git a/vendor/github.com/grafana/regexp/syntax/perl_groups.go b/vendor/github.com/grafana/regexp/syntax/perl_groups.go
index effe4e68627..675466e5a0c 100644
--- a/vendor/github.com/grafana/regexp/syntax/perl_groups.go
+++ b/vendor/github.com/grafana/regexp/syntax/perl_groups.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// GENERATED BY make_perl_groups.pl; DO NOT EDIT.
-// make_perl_groups.pl >perl_groups.go
+// Code generated by make_perl_groups.pl; DO NOT EDIT.
package syntax
diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go
index 8ad3653abba..f15d2051230 100644
--- a/vendor/github.com/grafana/regexp/syntax/regexp.go
+++ b/vendor/github.com/grafana/regexp/syntax/regexp.go
@@ -79,7 +79,7 @@ func (x *Regexp) Equal(y *Regexp) bool {
return slices.Equal(x.Rune, y.Rune)
case OpAlternate, OpConcat:
- return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) })
+ return slices.EqualFunc(x.Sub, y.Sub, (*Regexp).Equal)
case OpStar, OpPlus, OpQuest:
if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) {
diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md
index 0f5864dc3a8..3130a2870ed 100644
--- a/vendor/github.com/knadh/koanf/v2/README.md
+++ b/vendor/github.com/knadh/koanf/v2/README.md
@@ -4,7 +4,7 @@
**koanf** is a library for reading configuration from different sources in different formats in Go applications. It is a cleaner, lighter [alternative to spf13/viper](#alternative-to-viper) with better abstractions and extensibility and far fewer dependencies.
-koanf v2 has modules (Providers) for reading configuration from a variety of sources such as files, command line flags, environment variables, Vault, and S3 and for parsing (Parsers) formats such as JSON, YAML, TOML, Hashicorp HCL. It is easy to plug in custom parsers and providers.
+koanf v2 has modules (Providers) for reading configuration from a variety of sources such as files, command line flags, environment variables, Vault, and S3 and for parsing (Parsers) formats such as JSON, YAML, TOML, HUML, Hashicorp HCL. It is easy to plug in custom parsers and providers.
All external dependencies in providers and parsers are detached from the core and can be installed separately as necessary.
@@ -17,7 +17,7 @@ All external dependencies in providers and parsers are detached from the core an
go get -u github.com/knadh/koanf/v2
# Install the necessary Provider(s).
-# Available: file, env, posflag, basicflag, confmap, rawbytes,
+# Available: file, env/v2, posflag, basicflag, confmap, rawbytes,
# structs, fs, s3, appconfig/v2, consul/v2, etcd/v2, vault/v2, parameterstore/v2
# eg: go get -u github.com/knadh/koanf/providers/s3
# eg: go get -u github.com/knadh/koanf/providers/consul/v2
@@ -26,7 +26,7 @@ go get -u github.com/knadh/koanf/providers/file
# Install the necessary Parser(s).
-# Available: toml, toml/v2, json, yaml, dotenv, hcl, hjson, nestedtext
+# Available: toml, toml/v2, json, yaml, huml, dotenv, hcl, hjson, nestedtext
# go get -u github.com/knadh/koanf/parsers/$parser
go get -u github.com/knadh/koanf/parsers/toml
@@ -41,6 +41,7 @@ go get -u github.com/knadh/koanf/parsers/toml
- [Watching file for changes](#watching-file-for-changes)
- [Reading from command line](#reading-from-command-line)
- [Reading environment variables](#reading-environment-variables)
+- [Reading from an S3 bucket](#reading-from-an-s3-bucket)
- [Reading raw bytes](#reading-raw-bytes)
- [Reading from maps and structs](#reading-from-nested-maps)
- [Unmarshalling and marshalling](#unmarshalling-and-marshalling)
@@ -229,7 +230,7 @@ import (
"github.com/knadh/koanf/v2"
"github.com/knadh/koanf/parsers/json"
- "github.com/knadh/koanf/providers/env"
+ "github.com/knadh/koanf/providers/env/v2"
"github.com/knadh/koanf/providers/file"
)
@@ -242,46 +243,36 @@ func main() {
log.Fatalf("error loading config: %v", err)
}
- // Load environment variables and merge into the loaded config.
- // "MYVAR" is the prefix to filter the env vars by.
- // "." is the delimiter used to represent the key hierarchy in env vars.
- // The (optional, or can be nil) function can be used to transform
- // the env var names, for instance, to lowercase them.
- //
- // For example, env vars: MYVAR_TYPE and MYVAR_PARENT1_CHILD1_NAME
- // will be merged into the "type" and the nested "parent1.child1.name"
- // keys in the config file here as we lowercase the key,
- // replace `_` with `.` and strip the MYVAR_ prefix so that
- // only "parent1.child1.name" remains.
- k.Load(env.Provider("MYVAR_", ".", func(s string) string {
- return strings.Replace(strings.ToLower(
- strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1)
+ // Load only environment variables with prefix "MYVAR_" and merge into config.
+ // Transform var names by:
+ // 1. Converting to lowercase
+ // 2. Removing "MYVAR_" prefix
+ // 3. Replacing "_" with "." to representing nesting using the . delimiter.
+ // Example: MYVAR_PARENT1_CHILD1_NAME becomes "parent1.child1.name"
+ k.Load(env.Provider(".", env.Opt{
+ Prefix: "MYVAR_",
+ TransformFunc: func(k, v string) (string, any) {
+ // Transform the key.
+ k = strings.ReplaceAll(strings.ToLower(strings.TrimPrefix(k, "MYVAR_")), "_", ".")
+
+ // Transform the value into slices, if they contain spaces.
+ // Eg: MYVAR_TAGS="foo bar baz" -> tags: ["foo", "bar", "baz"]
+ // This is to demonstrate that string values can be transformed to any type
+ // where necessary.
+ if strings.Contains(v, " ") {
+ return k, strings.Split(v, " ")
+ }
+
+ return k, v
+ },
}), nil)
- fmt.Println("name is = ", k.String("parent1.child1.name"))
+ fmt.Println("name is =", k.String("parent1.child1.name"))
+ fmt.Println("time is =", k.Time("time", time.DateOnly))
+ fmt.Println("ids are =", k.Strings("parent1.child1.grandchild1.ids"))
}
```
-You can also use the `env.ProviderWithValue` with a callback that supports mutating both the key and value
-to return types other than a string. For example, here, env values separated by spaces are
-returned as string slices or arrays. eg: `MYVAR_slice=a b c` becomes `slice: [a, b, c]`.
-
-```go
- k.Load(env.ProviderWithValue("MYVAR_", ".", func(s string, v string) (string, interface{}) {
- // Strip out the MYVAR_ prefix and lowercase and get the key while also replacing
- // the _ character with . in the key (koanf delimiter).
- key := strings.Replace(strings.ToLower(strings.TrimPrefix(s, "MYVAR_")), "_", ".", -1)
-
- // If there is a space in the value, split the value into a slice by the space.
- if strings.Contains(v, " ") {
- return key, strings.Split(v, " ")
- }
-
- // Otherwise, return the plain string.
- return key, v
- }), nil)
-```
-
### Reading from an S3 bucket
```go
@@ -662,7 +653,7 @@ Install with `go get -u github.com/knadh/koanf/providers/$provider`
| fs | `fs.Provider(f fs.FS, filepath string)` | (**Experimental**) Reads a file from fs.FS and returns the raw bytes to be parsed. The provider requires `go v1.16` or higher. |
| basicflag | `basicflag.Provider(f *flag.FlagSet, delim string)` | Takes a stdlib `flag.FlagSet` |
| posflag | `posflag.Provider(f *pflag.FlagSet, delim string)` | Takes an `spf13/pflag.FlagSet` (advanced POSIX compatible flags with multiple types) and provides a nested config map based on delim. |
-| env | `env.Provider(prefix, delim string, f func(s string) string)` | Takes an optional prefix to filter env variables by, an optional function that takes and returns a string to transform env variables, and returns a nested config map based on delim. |
+| env/v2 | `env.Provider(prefix, delim string, f func(s string) string)` | Takes an optional prefix to filter env variables by, an optional function that takes and returns a string to transform env variables, and returns a nested config map based on delim. |
| confmap | `confmap.Provider(mp map[string]interface{}, delim string)` | Takes a premade `map[string]interface{}` conf map. If delim is provided, the keys are assumed to be flattened, thus unflattened using delim. |
| structs | `structs.Provider(s interface{}, tag string)` | Takes a struct and struct tag. |
| s3 | `s3.Provider(s3.S3Config{})` | Takes a s3 config struct. |
@@ -674,6 +665,7 @@ Install with `go get -u github.com/knadh/koanf/providers/$provider`
| parameterstore/v2 | `parameterstore.Provider(parameterstore.Config{})` | AWS Systems Manager Parameter Store provider |
| cliflagv2 | `cliflagv2.Provider(ctx *cli.Context, delimiter string)` | Reads commands and flags from urfave/cli/v2 context including global flags and nested command flags and provides a nested config map based on delim. |
| cliflagv3 | `cliflagv3.Provider(ctx *cli.Context, delimiter string)` | Reads commands and flags from urfave/cli/v3 and provides a nested config map based on delim. |
+| kiln | `kiln.Provider(configPath, keyPath, file string)` | Takes an optional prefix to filter environment variables keys by, an optional function that takes and returns a string to transform environment variables, and returns a nested config map. |
### Bundled Parsers
@@ -688,8 +680,9 @@ Install with `go get -u github.com/knadh/koanf/parsers/$parser`
| toml/v2 | `toml.Parser()` | Parses TOML bytes into a nested map (using go-toml v2) |
| dotenv | `dotenv.Parser()` | Parses DotEnv bytes into a flat map |
| hcl | `hcl.Parser(flattenSlices bool)` | Parses Hashicorp HCL bytes into a nested map. `flattenSlices` is recommended to be set to true. [Read more](https://github.com/hashicorp/hcl/issues/162). |
+| hjson | `hjson.Parser()` | Parses HJSON bytes into a nested map |
+| huml | `huml.Parser()` | Parses HUML (Human-Oriented Markup Language) bytes into a nested map |
| nestedtext | `nestedtext.Parser()` | Parses NestedText bytes into a flat map |
-| hjson | `hjson.Parser()` | Parses HJSON bytes into a nested map
|
diff --git a/vendor/github.com/knadh/koanf/v2/go.work b/vendor/github.com/knadh/koanf/v2/go.work
index 28f4fad6c8c..e5aa5dee7d5 100644
--- a/vendor/github.com/knadh/koanf/v2/go.work
+++ b/vendor/github.com/knadh/koanf/v2/go.work
@@ -1,6 +1,6 @@
-go 1.24
+go 1.24.4
-toolchain go1.24.2
+toolchain go1.24.5
use (
.
@@ -9,6 +9,7 @@ use (
./parsers/dotenv
./parsers/hcl
./parsers/hjson
+ ./parsers/huml
./parsers/json
./parsers/kdl
./parsers/nestedtext
diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum
index 03d00a03618..e19681eab3b 100644
--- a/vendor/github.com/knadh/koanf/v2/go.work.sum
+++ b/vendor/github.com/knadh/koanf/v2/go.work.sum
@@ -1,360 +1,595 @@
+cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accessapproval v1.7.1 h1:/5YjNhR6lzCvmJZAnByYkfEgWjfAKwYP6nkuTk6nKFE=
cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68=
cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/accesscontextmanager v1.8.1 h1:WIAt9lW9AXtqw/bnvrEUaE8VG/7bAAeMzRCBGMkc4+w=
cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo=
cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/aiplatform v1.48.0 h1:M5davZWCTzE043rJCn+ZLW6hSxfG1KAx4vJTtas2/ec=
cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/analytics v0.21.3 h1:TFBC1ZAqX9/jL56GEXdLrVe5vT3I22bDVWyDwZX4IEg=
cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigateway v1.6.1 h1:aBSwCQPcp9rZ0zVEUeJbR623palnqtvxJlUyvzsKGQc=
cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA=
cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeconnect v1.6.1 h1:6u/jj0P2c3Mcm+H9qLsXI7gYcTiG9ueyQL3n6vCmFJM=
cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs=
cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apigeeregistry v0.7.1 h1:hgq0ANLDx7t2FDZDJQrCMtCtddR/pjCqVuvQWGrQbXw=
cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw=
+cloud.google.com/go/apikeys v0.6.0 h1:B9CdHFZTFjVti89tmyXXrO+7vSNo2jvZuHG8zD5trdQ=
cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/appengine v1.8.1 h1:J+aaUZ6IbTpBegXbmEsh8qZZy864ZVnOoWyfa1XSNbI=
cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY=
cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/area120 v0.8.1 h1:wiOq3KDpdqXmaHzvZwKdpoM+3lDcqsI2Lwhyac7stss=
cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg=
cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/artifactregistry v1.14.1 h1:k6hNqab2CubhWlGcSzunJ7kfxC7UzpAfQ1UPb9PDCKI=
cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E=
cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/asset v1.14.1 h1:vlHdznX70eYW4V1y1PxocvF6tEwxJTTarwIGwOhFF3U=
cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ=
cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/assuredworkloads v1.11.1 h1:yaO0kwS+SnhVSTF7BqTyVGt3DTocI6Jqo+S3hHmCwNk=
cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/automl v1.13.1 h1:iP9iQurb0qbz+YOOMfKSEjhONA/WcoOIjt6/m+6pIgo=
cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE=
cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/baremetalsolution v1.1.1 h1:0Ge9PQAy6cZ1tRrkc44UVgYV15nw2TVnzJzYsMHXF+E=
cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA=
cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/batch v1.3.1 h1:uE0Q//W7FOGPjf7nuPiP0zoE8wOT3ngoIO2HIet0ilY=
cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A=
cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/beyondcorp v1.0.0 h1:VPg+fZXULQjs8LiMeWdLaB5oe8G9sEoZ0I0j6IMiG1Q=
cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/bigquery v1.53.0 h1:K3wLbjbnSlxhuG5q4pntHv5AEbQM1QqHKGYgwFIqOTg=
cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/billing v1.16.0 h1:1iktEAIZ2uA6KpebC235zi/rCXDdDYQ0bTXTNetSL80=
cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA=
cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/binaryauthorization v1.6.1 h1:cAkOhf1ic92zEN4U1zRoSupTmwmxHfklcp1X7CCBKvE=
cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U=
cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/certificatemanager v1.7.1 h1:uKsohpE0hiobx1Eak9jNcPCznwfB6gvyQCcS28Ah9E8=
cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI=
cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/channel v1.16.0 h1:dqRkK2k7Ll/HHeYGxv18RrfhozNxuTJRkspW0iaFZoY=
cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc=
cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/cloudbuild v1.13.0 h1:YBbAWcvE4x6xPWTyS+OU4eiUpz5rCS3VCM/aqmfddPA=
cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/clouddms v1.6.1 h1:rjR1nV6oVf2aNNB7B5uz1PDIlBjlOiBgR+q5n7bbB7M=
cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI=
cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/cloudtasks v1.12.1 h1:cMh9Q6dkvh+Ry5LAPbD/U2aw6KAqdiU6FttwhbTo69w=
cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/contactcenterinsights v1.10.0 h1:YR2aPedGVQPpFBZXJnPkqRj8M//8veIZZH5ZvICoXnI=
cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/container v1.24.0 h1:N51t/cgQJFqDD/W7Mb+IvmAPHrf8AbPx7Bb7aF4lROE=
cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/containeranalysis v0.10.1 h1:SM/ibWHWp4TYyJMwrILtcBtYKObyupwOVeceI9pNblw=
cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0=
cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/datacatalog v1.16.0 h1:qVeQcw1Cz93/cGu2E7TYUPh8Lz5dn5Ws2siIuQ17Vng=
cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataflow v0.9.1 h1:VzG2tqsk/HbmOtq/XSfdF4cBvUWRK+S+oL9k4eWkENQ=
cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw=
cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/dataform v0.8.1 h1:xcWso0hKOoxeW72AjBSIp/UfkvpqHNzzS0/oygHlcqY=
cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M=
cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datafusion v1.7.1 h1:eX9CZoyhKQW6g1Xj7+RONeDj1mV8KQDKEB9KLELX9/8=
cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI=
cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/datalabeling v0.8.1 h1:zxsCD/BLKXhNuRssen8lVXChUj8VxF3ofN06JfdWOXw=
cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY=
cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataplex v1.9.0 h1:yoBWuuUZklYp7nx26evIhzq8+i/nvKYuZr1jka9EqLs=
cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataproc/v2 v2.0.1 h1:4OpSiPMMGV3XmtPqskBU/RwYpj3yMFjtMLj/exi425Q=
cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4=
cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/dataqna v0.8.1 h1:ITpUJep04hC9V7C+gcK390HO++xesQFSUJ7S4nSnF3U=
cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8=
cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastore v1.13.0 h1:ktbC66bOQB3HJPQe8qNI1/aiQ77PMu7hD4mzE6uxe3w=
cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/datastream v1.10.0 h1:ra/+jMv36zTAGPfi8TRne1hXme+UsKtdcK4j6bnqQiw=
cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/deploy v1.13.0 h1:A+w/xpWgz99EYzB6e31gMGAI/P5jTZ2UO7veQK5jQ8o=
cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dialogflow v1.40.0 h1:sCJbaXt6ogSbxWQnERKAzos57f02PP6WkGbOZvXUdwc=
cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/dlp v1.10.1 h1:tF3wsJ2QulRhRLWPzWVkeDz3FkOGVoMl6cmDUHtfYxw=
cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI=
cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/documentai v1.22.0 h1:dW8ex9yb3oT9s1yD2+yLcU8Zq15AquRZ+wd0U+TkxFw=
cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/domains v0.9.1 h1:rqz6KY7mEg7Zs/69U6m6LMbB7PxFDWmT3QWNXIqhHm0=
cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE=
cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/edgecontainer v1.1.1 h1:zhHWnLzg6AqzE+I3gzJqiIwHfjEBhWctNQEzqb+FaRo=
cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk=
+cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=
cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/essentialcontacts v1.6.2 h1:OEJ0MLXXCW/tX1fkxzEZOsv/wRfyFsvDVNaHWBAvoV0=
cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4=
cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/eventarc v1.13.0 h1:xIP3XZi0Xawx8DEfh++mE2lrIi5kQmCr/KcWhJ1q0J4=
cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/filestore v1.7.1 h1:Eiz8xZzMJc5ppBWkuaod/PUdUZGCFR8ku0uS+Ah2fRw=
cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4=
cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/firestore v1.12.0 h1:aeEA/N7DW7+l2u5jtkO8I0qv0D95YwjggD8kUHrTHO4=
cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/functions v1.15.1 h1:LtAyqvO1TFmNLcROzHZhV0agEJfBi+zfMZsF4RT/a7U=
cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE=
+cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc=
cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkebackup v1.3.0 h1:lgyrpdhtJKV7l1GM15YFt+OCyHMxsQZuSydyNmS0Pxo=
cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU=
cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkeconnect v0.8.1 h1:a1ckRvVznnuvDWESM2zZDzSVFvggeBaVY5+BVB8tbT0=
cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw=
cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkehub v0.14.1 h1:2BLSb8i+Co1P05IYCKATXy5yaaIw/ZqGvVSBTLdzCQo=
cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY=
cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gkemulticloud v1.0.0 h1:MluqhtPVZReoriP5+adGIw+ij/RIeRik8KApCW2WMTw=
cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/grafeas v0.3.0 h1:oyTL/KjiUeBs9eYLw/40cpSZglUC+0F7X4iu/8t7NWs=
cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/gsuiteaddons v1.6.1 h1:mi9jxZpzVjLQibTS/XfPZvl+Jr6D5Bs8pGqUjllRb00=
cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY=
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/iap v1.8.1 h1:X1tcp+EoJ/LGX6cUPt3W2D4H2Kbqq0pLAsldnsCjLlE=
cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ=
cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/ids v1.4.1 h1:khXYmSoDDhWGEVxHl4c4IgbwSRR+qE/L4hzP3vaU9Hc=
cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw=
cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/iot v1.7.1 h1:yrH0OSmicD5bqGBoMlWG8UltzdLkYzNUwNVUVz7OT54=
cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk=
cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs=
cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/language v1.10.1 h1:3MXeGEv8AlX+O2LyV4pO4NGpodanc26AmXwOuipEym0=
cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0=
cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/lifesciences v0.9.1 h1:axkANGx1wiBXHiPcJZAE+TDjjYoJRIDzbHC/WYllCBU=
cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc=
+cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I=
cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=
cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/managedidentities v1.6.1 h1:2/qZuOeLgUHorSdxSQGtnOu9xQkBn37+j+oZQv/KHJY=
cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak=
cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/maps v1.4.0 h1:PdfgpBLhAoSzZrQXP+/zBc78fIPLZSJp5y8+qSMn2UU=
cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/mediatranslation v0.8.1 h1:50cF7c1l3BanfKrpnTCaTvhf+Fo6kdF21DG0byG7gYU=
cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig=
cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/memcache v1.10.1 h1:7lkLsF0QF+Mre0O/NvkD9Q5utUNwtzvIYjrOLOs0HO0=
cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA=
cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/metastore v1.12.0 h1:+9DsxUOHvsqvC0ylrRc/JwzbXJaaBpfIK3tX0Lx8Tcc=
cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58=
cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM=
cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkconnectivity v1.12.1 h1:LnrYM6lBEeTq+9f2lR4DjBhv31EROSAQi/P5W4Q0AEc=
cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E=
cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networkmanagement v1.8.0 h1:/3xP37eMxnyvkfLrsm1nv1b2FbMMSAEAOlECTvoeCq4=
cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0=
cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/networksecurity v0.9.1 h1:TBLEkMp3AE+6IV/wbIGRNTxnqLXHCTEQWoxRVC18TzY=
cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ=
cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/notebooks v1.9.1 h1:CUqMNEtv4EHFnbogV+yGHQH5iAQLmijOx191innpOcs=
cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8=
cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/optimization v1.4.1 h1:pEwOAmO00mxdbesCRSsfj8Sd4rKY9kBrYW7Vd3Pq7cA=
cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk=
cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orchestration v1.8.1 h1:KmN18kE/xa1n91cM5jhCh7s1/UfIguSCisw7nTMUzgE=
cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8=
cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/orgpolicy v1.11.1 h1:I/7dHICQkNwym9erHqmlb50LRU588NPCvkfIY0Bx9jI=
cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE=
cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/osconfig v1.12.1 h1:dgyEHdfqML6cUW6/MkihNdTVc0INQst0qSE8Ou1ub9c=
cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE=
cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/oslogin v1.10.1 h1:LdSuG3xBYu2Sgr3jTUULL1XCl5QBx6xwzGqzoDUw1j0=
cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs=
cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/phishingprotection v0.8.1 h1:aK/lNmSd1vtbft/vLe2g7edXK72sIQbqr2QyrZN/iME=
cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I=
cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/policytroubleshooter v1.8.0 h1:XTMHy31yFmXgQg57CB3w9YQX8US7irxDX0Fl0VwlZyY=
cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU=
cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/privatecatalog v0.9.1 h1:B/18xGo+E0EMS9LOEQ0zXz7F2asMgmVgTYGSI89MHOA=
cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA=
cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g=
cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/pubsublite v1.8.1 h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY=
cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.2 h1:IGkbudobsTXAwmkEYOzPCQPApUCsN4Gbq3ndGVhHQpI=
cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU=
cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommendationengine v0.8.1 h1:nMr1OEVHuDambRn+/y4RmNAmnR/pXCuHtH0Y4tCgGRQ=
cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE=
cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/recommender v1.10.1 h1:UKp94UH5/Lv2WXSQe9+FttqV07x/2p1hFTMMYVFtilg=
cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA=
cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/redis v1.13.1 h1:YrjQnCC7ydk+k30op7DSjSHw1yAYhqYXFcOq1bSXRYA=
cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg=
cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcemanager v1.9.1 h1:QIAMfndPOHR6yTmMUB0ZN+HSeRmPjR/21Smq5/xwghI=
cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8=
cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/resourcesettings v1.6.1 h1:Fdyq418U69LhvNPFdlEO29w+DRRjwDA4/pFamm4ksAg=
cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw=
cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/retail v1.14.1 h1:gYBrb9u/Hc5s5lUTFXX1Vsbc/9BEvgtioY6ZKaK0DK8=
cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE=
cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/run v1.2.0 h1:kHeIG8q+N6Zv0nDkBjSOYfK2eWqa5FnaiDPH/7/HirE=
cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo=
cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/scheduler v1.10.1 h1:yoZbZR8880KgPGLmACOMCiY2tPk+iX4V/dkxqTirlz8=
cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo=
cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/secretmanager v1.11.1 h1:cLTCwAjFh9fKvU6F13Y4L9vPcx9yiWPyWXE4+zkuEQs=
cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw=
cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/security v1.15.1 h1:jR3itwycg/TgGA0uIgTItcVhA55hKWiNJxaNNpQJaZE=
cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA=
cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/securitycenter v1.23.0 h1:XOGJ9OpnDtqg8izd7gYk/XUhj8ytjIalyjjsR6oyG0M=
cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ=
+cloud.google.com/go/servicecontrol v1.11.1 h1:d0uV7Qegtfaa7Z2ClDzr9HJmnbJW7jn0WhZ7wOX6hLE=
cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicedirectory v1.11.0 h1:pBWpjCFVGWkzVTkqN3TBBIqNSoSHY86/6RL0soSQ4z8=
cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicemanagement v1.8.0 h1:fopAQI/IAzlxnVeiKn/8WiV6zKndjFkvi+gzu+NjywY=
cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.6.0 h1:rXyq+0+RSIm3HFypctp7WoXxIA563rn206CfMWdqXX4=
cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/shell v1.7.1 h1:aHbwH9LSqs4r2rbay9f6fKEls61TAjT63jSyglsw7sI=
cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g=
cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.47.0 h1:aqiMP8dhsEXgn9K5EZBWxPG7dxIiyM2VaikqeU4iteg=
cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI=
cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/speech v1.19.0 h1:MCagaq8ObV2tr1kZJcJYgXYbIn8Ai5rp42tyGYw9rls=
cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/storagetransfer v1.10.0 h1:+ZLkeXx0K0Pk5XdDmG0MnUVqIR18lllsihU/yq39I8Q=
cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA=
cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/talent v1.6.2 h1:j46ZgD6N2YdpFPux9mc7OAf4YK3tiBCsbLKc8rQx+bU=
cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24=
cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/texttospeech v1.7.1 h1:S/pR/GZT9p15R7Y2dk2OXD/3AufTct/NSxT4a7nxByw=
cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk=
cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/tpu v1.6.1 h1:kQf1jgPY04UJBYYjNUO+3GrZtIb57MfGAW2bwgLbR3A=
cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E=
cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg=
cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk=
cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.8.2 h1:PQHamiOzlehqLBJMnM72lXk/OsMQewZB12BKJ8zXrU0=
cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.19.0 h1:BRyyS+wU+Do6VOXnb8WfPr42ZXti9hzmLKLUCkggeK4=
cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/videointelligence v1.11.1 h1:MBMWnkQ78GQnRz5lfdTAbBq/8QMCF3wahgtHh3s/J+k=
cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo=
cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vision/v2 v2.7.2 h1:ccK6/YgPfGHR/CyESz1mvIbsht5Y2xRsWCPqmTNydEw=
cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU=
cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmmigration v1.7.1 h1:gnjIclgqbEMc+cF5IJuPxp53wjBIlqZ8h9hE8Rkwp7A=
cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro=
cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vmwareengine v1.0.0 h1:qsJ0CPlOQu/3MFBGklu752v3AkD+Pdu091UmXJ+EjTA=
cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/vpcaccess v1.7.1 h1:ram0GzjNWElmbxXMIzeOZUkQ9J8ZAahD6V8ilPGqX0Y=
cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs=
cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/webrisk v1.9.1 h1:Ssy3MkOMOnyRV5H2bkMQ13Umv7CwB/kugo3qkAX83Fk=
cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc=
cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/websecurityscanner v1.6.1 h1:CfEF/vZ+xXyAR3zC9iaC/QRdf1MEgS20r5UR17Q4gOg=
cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cloud.google.com/go/workflows v1.11.1 h1:2akeQ/PgtRhrNuD/n1WvJd5zb7YyuDZrlOanBj2ihPg=
cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.0/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
+github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/sprig/v3 v3.2.1 h1:n6EPaDyLSvCEa3frruQvAiHuNp2dhBlMSmkEr+HuzGc=
github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
+github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6 h1:5kUcJJAKWWI82Xnp/CaU0eu5hLlHkmm9acjowSkwCd0=
github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
+github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc=
github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
+github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
+github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
+github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q=
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
+github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o=
github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE=
+github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA=
github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc=
+github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034 h1:BuCyszxPxUjBrYW2HNVrimC0rBUs2U27jCJGVh0IKTM=
github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034/go.mod h1:zz4KxBkcXUWKjIcrc+uphJ1gPh/t18ymGm3PmQ+VGTk=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI=
+github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
+github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
+github.com/hashicorp/mdns v1.0.5 h1:1M5hW1cunYeoXOqHwEb/GBDDHAFo0Yqb/uz/beC6LbE=
github.com/hashicorp/mdns v1.0.5/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4 h1:sIXJOMrYnQZJu7OB7ANSF4MYri2fTEGIsRLz6LwI4xE=
github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
+github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A=
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
+github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
+github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20=
github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
+github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
+github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
+github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk=
github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
@@ -365,10 +600,13 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@@ -378,6 +616,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -396,6 +636,7 @@ golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -405,11 +646,15 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -421,19 +666,25 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
+golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -446,9 +697,14 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
@@ -473,10 +729,15 @@ google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/vmihailenco/msgpack.v2 v2.9.2 h1:gjPqo9orRVlSAH/065qw3MsFCDpH7fa1KpiizXyllY4=
gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/knadh/koanf/v2/koanf.go b/vendor/github.com/knadh/koanf/v2/koanf.go
index 0b89e55c7cb..66fae73b539 100644
--- a/vendor/github.com/knadh/koanf/v2/koanf.go
+++ b/vendor/github.com/knadh/koanf/v2/koanf.go
@@ -7,6 +7,7 @@ import (
"reflect"
"sort"
"strconv"
+ "sync"
"github.com/go-viper/mapstructure/v2"
"github.com/knadh/koanf/maps"
@@ -19,6 +20,7 @@ type Koanf struct {
confMapFlat map[string]interface{}
keyMap KeyMap
conf Conf
+ mu sync.RWMutex
}
// Conf is the Koanf configuration.
@@ -123,10 +125,12 @@ func (ko *Koanf) Load(p Provider, pa Parser, opts ...Option) error {
// Keys returns the slice of all flattened keys in the loaded configuration
// sorted alphabetically.
func (ko *Koanf) Keys() []string {
+ ko.mu.RLock()
out := make([]string, 0, len(ko.confMapFlat))
for k := range ko.confMapFlat {
out = append(out, k)
}
+ ko.mu.RUnlock()
sort.Strings(out)
return out
}
@@ -134,11 +138,13 @@ func (ko *Koanf) Keys() []string {
// KeyMap returns a map of flattened keys and the individual parts of the
// key as slices. eg: "parent.child.key" => ["parent", "child", "key"].
func (ko *Koanf) KeyMap() KeyMap {
+ ko.mu.RLock()
out := make(KeyMap, len(ko.keyMap))
for key, parts := range ko.keyMap {
out[key] = make([]string, len(parts))
copy(out[key], parts)
}
+ ko.mu.RUnlock()
return out
}
@@ -146,6 +152,8 @@ func (ko *Koanf) KeyMap() KeyMap {
// Note that it uses maps.Copy to create a copy that uses
// json.Marshal which changes the numeric types to float64.
func (ko *Koanf) All() map[string]interface{} {
+ ko.mu.RLock()
+ defer ko.mu.RUnlock()
return maps.Copy(ko.confMapFlat)
}
@@ -153,6 +161,8 @@ func (ko *Koanf) All() map[string]interface{} {
// Note that it uses maps.Copy to create a copy that uses
// json.Marshal which changes the numeric types to float64.
func (ko *Koanf) Raw() map[string]interface{} {
+ ko.mu.RLock()
+ defer ko.mu.RUnlock()
return maps.Copy(ko.confMap)
}
@@ -160,9 +170,13 @@ func (ko *Koanf) Raw() map[string]interface{} {
// of the config map with keys sorted alphabetically.
func (ko *Koanf) Sprint() string {
b := bytes.Buffer{}
- for _, k := range ko.Keys() {
- b.WriteString(fmt.Sprintf("%s -> %v\n", k, ko.confMapFlat[k]))
+ keys := ko.Keys()
+ ko.mu.RLock()
+ for _, k := range keys {
+ v := ko.confMapFlat[k]
+ b.WriteString(fmt.Sprintf("%s -> %v\n", k, v))
}
+ ko.mu.RUnlock()
return b.String()
}
@@ -287,6 +301,9 @@ func (ko *Koanf) UnmarshalWithConf(path string, o interface{}, c UnmarshalConf)
// Clears all keys/values if no path is specified.
// Every empty, key on the path, is recursively deleted.
func (ko *Koanf) Delete(path string) {
+ ko.mu.Lock()
+ defer ko.mu.Unlock()
+
// No path. Erase the entire map.
if path == "" {
ko.confMap = make(map[string]interface{})
@@ -316,11 +333,14 @@ func (ko *Koanf) Get(path string) interface{} {
}
// Does the path exist?
+ ko.mu.RLock()
p, ok := ko.keyMap[path]
if !ok {
+ ko.mu.RUnlock()
return nil
}
res := maps.Search(ko.confMap, p)
+ ko.mu.RUnlock()
// Non-reference types are okay to return directly.
// Other types are "copied" with maps.Copy or json.Marshal
@@ -370,7 +390,9 @@ func (ko *Koanf) Slices(path string) []*Koanf {
// Exists returns true if the given key path exists in the conf map.
func (ko *Koanf) Exists(path string) bool {
+ ko.mu.RLock()
_, ok := ko.keyMap[path]
+ ko.mu.RUnlock()
return ok
}
@@ -404,6 +426,9 @@ func (ko *Koanf) Delim() string {
}
func (ko *Koanf) merge(c map[string]interface{}, opts *options) error {
+ ko.mu.Lock()
+ defer ko.mu.Unlock()
+
maps.IntfaceKeysToStrings(c)
if opts.merge != nil {
if err := opts.merge(c, ko.confMap); err != nil {
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 91793b90696..5c970ca7ed8 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -317,30 +317,30 @@ func (e *EDNS0_SUBNET) pack() ([]byte, error) {
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// We might don't need to complain either
if e.SourceNetmask != 0 {
- return nil, errors.New("dns: bad address family")
+ return nil, errors.New("bad address family")
}
case 1:
if e.SourceNetmask > net.IPv4len*8 {
- return nil, errors.New("dns: bad netmask")
+ return nil, errors.New("bad netmask")
}
if len(e.Address.To4()) != net.IPv4len {
- return nil, errors.New("dns: bad address")
+ return nil, errors.New("bad address")
}
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
case 2:
if e.SourceNetmask > net.IPv6len*8 {
- return nil, errors.New("dns: bad netmask")
+ return nil, errors.New("bad netmask")
}
if len(e.Address) != net.IPv6len {
- return nil, errors.New("dns: bad address")
+ return nil, errors.New("bad address")
}
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
default:
- return nil, errors.New("dns: bad address family")
+ return nil, errors.New("bad address family")
}
return b, nil
}
@@ -357,25 +357,25 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// It's okay to accept such a packet
if e.SourceNetmask != 0 {
- return errors.New("dns: bad address family")
+ return errors.New("bad address family")
}
e.Address = net.IPv4(0, 0, 0, 0)
case 1:
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
- return errors.New("dns: bad netmask")
+ return errors.New("bad netmask")
}
addr := make(net.IP, net.IPv4len)
copy(addr, b[4:])
e.Address = addr.To16()
case 2:
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
- return errors.New("dns: bad netmask")
+ return errors.New("bad netmask")
}
addr := make(net.IP, net.IPv6len)
copy(addr, b[4:])
e.Address = addr
default:
- return errors.New("dns: bad address family")
+ return errors.New("bad address family")
}
return nil
}
@@ -720,7 +720,7 @@ func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
case 2:
e.Timeout = binary.BigEndian.Uint16(b)
default:
- return fmt.Errorf("dns: length mismatch, want 0/2 but got %d", len(b))
+ return fmt.Errorf("length mismatch, want 0/2 but got %d", len(b))
}
return nil
}
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index d87b5323be0..edf18596199 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -872,7 +872,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
// TODO(miek) make this an error?
// use PackOpt to let people tell how detailed the error reporting should be?
// if off != len(msg) {
- // // println("dns: extra bytes in dns packet", off, "<", len(msg))
+ // // println("dns: extra bytes in dns packet", off, "<", len(msg))
// }
return err
}
@@ -1123,23 +1123,28 @@ func unpackQuestion(msg []byte, off int) (Question, int, error) {
)
q.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
- return q, off, err
+ return q, off, fmt.Errorf("bad question name: %w", err)
}
if off == len(msg) {
return q, off, nil
}
q.Qtype, off, err = unpackUint16(msg, off)
if err != nil {
- return q, off, err
+ return q, off, fmt.Errorf("bad question qtype: %w", err)
}
if off == len(msg) {
return q, off, nil
}
q.Qclass, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return q, off, fmt.Errorf("bad question qclass: %w", err)
+ }
+
if off == len(msg) {
return q, off, nil
}
- return q, off, err
+
+ return q, off, nil
}
func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
@@ -1177,27 +1182,27 @@ func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
)
dh.Id, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header id: %w", err)
}
dh.Bits, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header bits: %w", err)
}
dh.Qdcount, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header question count: %w", err)
}
dh.Ancount, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header answer count: %w", err)
}
dh.Nscount, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header ns count: %w", err)
}
dh.Arcount, off, err = unpackUint16(msg, off)
if err != nil {
- return dh, off, err
+ return dh, off, fmt.Errorf("bad header extra count: %w", err)
}
return dh, off, nil
}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index b04d370f682..364149cfcea 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -332,7 +332,7 @@ func (srv *Server) ListenAndServe() error {
return srv.serveTCP(l)
case "tcp-tls", "tcp4-tls", "tcp6-tls":
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
- return errors.New("dns: neither Certificates nor GetCertificate set in Config")
+ return errors.New("neither Certificates nor GetCertificate set in config")
}
network := strings.TrimSuffix(srv.Net, "-tls")
l, err := listenTCP(network, addr, srv.ReusePort, srv.ReuseAddr)
diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go
index d1baeea992f..598103c10c5 100644
--- a/vendor/github.com/miekg/dns/svcb.go
+++ b/vendor/github.com/miekg/dns/svcb.go
@@ -298,7 +298,7 @@ func (s *SVCBMandatory) pack() ([]byte, error) {
func (s *SVCBMandatory) unpack(b []byte) error {
if len(b)%2 != 0 {
- return errors.New("dns: svcbmandatory: value length is not a multiple of 2")
+ return errors.New("bad svcbmandatory: value length is not a multiple of 2")
}
codes := make([]SVCBKey, 0, len(b)/2)
for i := 0; i < len(b); i += 2 {
@@ -395,10 +395,10 @@ func (s *SVCBAlpn) pack() ([]byte, error) {
b := make([]byte, 0, 10*len(s.Alpn))
for _, e := range s.Alpn {
if e == "" {
- return nil, errors.New("dns: svcbalpn: empty alpn-id")
+ return nil, errors.New("bad svcbalpn: empty alpn-id")
}
if len(e) > 255 {
- return nil, errors.New("dns: svcbalpn: alpn-id too long")
+ return nil, errors.New("bad svcbalpn: alpn-id too long")
}
b = append(b, byte(len(e)))
b = append(b, e...)
@@ -413,7 +413,7 @@ func (s *SVCBAlpn) unpack(b []byte) error {
length := int(b[i])
i++
if i+length > len(b) {
- return errors.New("dns: svcbalpn: alpn array overflowing")
+ return errors.New("bad svcbalpn: alpn array overflowing")
}
alpn = append(alpn, string(b[i:i+length]))
i += length
@@ -433,13 +433,13 @@ func (s *SVCBAlpn) parse(b string) error {
for p := 0; p < len(b); {
c, q := nextByte(b, p)
if q == 0 {
- return errors.New("dns: svcbalpn: unterminated escape")
+ return errors.New("bad svcbalpn: unterminated escape")
}
p += q
// If we find a comma, we have finished reading an alpn.
if c == ',' {
if len(a) == 0 {
- return errors.New("dns: svcbalpn: empty protocol identifier")
+ return errors.New("bad svcbalpn: empty protocol identifier")
}
alpn = append(alpn, string(a))
a = []byte{}
@@ -449,10 +449,10 @@ func (s *SVCBAlpn) parse(b string) error {
if c == '\\' {
dc, dq := nextByte(b, p)
if dq == 0 {
- return errors.New("dns: svcbalpn: unterminated escape decoding comma-separated list")
+ return errors.New("bad svcbalpn: unterminated escape decoding comma-separated list")
}
if dc != '\\' && dc != ',' {
- return errors.New("dns: svcbalpn: bad escaped character decoding comma-separated list")
+ return errors.New("bad svcbalpn: bad escaped character decoding comma-separated list")
}
p += dq
c = dc
@@ -461,7 +461,7 @@ func (s *SVCBAlpn) parse(b string) error {
}
// Add the final alpn.
if len(a) == 0 {
- return errors.New("dns: svcbalpn: last protocol identifier empty")
+ return errors.New("bad svcbalpn: last protocol identifier empty")
}
s.Alpn = append(alpn, string(a))
return nil
@@ -499,14 +499,14 @@ func (*SVCBNoDefaultAlpn) len() int { return 0 }
func (*SVCBNoDefaultAlpn) unpack(b []byte) error {
if len(b) != 0 {
- return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value")
+ return errors.New("bad svcbnodefaultalpn: no-default-alpn must have no value")
}
return nil
}
func (*SVCBNoDefaultAlpn) parse(b string) error {
if b != "" {
- return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value")
+ return errors.New("bad svcbnodefaultalpn: no-default-alpn must have no value")
}
return nil
}
@@ -529,7 +529,7 @@ func (s *SVCBPort) copy() SVCBKeyValue { return &SVCBPort{s.Port} }
func (s *SVCBPort) unpack(b []byte) error {
if len(b) != 2 {
- return errors.New("dns: svcbport: port length is not exactly 2 octets")
+ return errors.New("bad svcbport: port length is not exactly 2 octets")
}
s.Port = binary.BigEndian.Uint16(b)
return nil
@@ -544,7 +544,7 @@ func (s *SVCBPort) pack() ([]byte, error) {
func (s *SVCBPort) parse(b string) error {
port, err := strconv.ParseUint(b, 10, 16)
if err != nil {
- return errors.New("dns: svcbport: port out of range")
+ return errors.New("bad svcbport: port out of range")
}
s.Port = uint16(port)
return nil
@@ -577,7 +577,7 @@ func (s *SVCBIPv4Hint) pack() ([]byte, error) {
for _, e := range s.Hint {
x := e.To4()
if x == nil {
- return nil, errors.New("dns: svcbipv4hint: expected ipv4, hint is ipv6")
+ return nil, errors.New("bad svcbipv4hint: expected ipv4, hint is ipv6")
}
b = append(b, x...)
}
@@ -586,7 +586,7 @@ func (s *SVCBIPv4Hint) pack() ([]byte, error) {
func (s *SVCBIPv4Hint) unpack(b []byte) error {
if len(b) == 0 || len(b)%4 != 0 {
- return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
+ return errors.New("bad svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
}
b = cloneSlice(b)
x := make([]net.IP, 0, len(b)/4)
@@ -611,10 +611,10 @@ func (s *SVCBIPv4Hint) String() string {
func (s *SVCBIPv4Hint) parse(b string) error {
if b == "" {
- return errors.New("dns: svcbipv4hint: empty hint")
+ return errors.New("bad svcbipv4hint: empty hint")
}
if strings.Contains(b, ":") {
- return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6")
+ return errors.New("bad svcbipv4hint: expected ipv4, got ipv6")
}
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
@@ -623,7 +623,7 @@ func (s *SVCBIPv4Hint) parse(b string) error {
e, b, _ = strings.Cut(b, ",")
ip := net.ParseIP(e).To4()
if ip == nil {
- return errors.New("dns: svcbipv4hint: bad ip")
+ return errors.New("bad svcbipv4hint: bad ip")
}
hint = append(hint, ip)
}
@@ -671,7 +671,7 @@ func (s *SVCBECHConfig) unpack(b []byte) error {
func (s *SVCBECHConfig) parse(b string) error {
x, err := fromBase64([]byte(b))
if err != nil {
- return errors.New("dns: svcbech: bad base64 ech")
+ return errors.New("bad svcbech: bad base64 ech")
}
s.ECH = x
return nil
@@ -699,7 +699,7 @@ func (s *SVCBIPv6Hint) pack() ([]byte, error) {
b := make([]byte, 0, 16*len(s.Hint))
for _, e := range s.Hint {
if len(e) != net.IPv6len || e.To4() != nil {
- return nil, errors.New("dns: svcbipv6hint: expected ipv6, hint is ipv4")
+ return nil, errors.New("bad svcbipv6hint: expected ipv6, hint is ipv4")
}
b = append(b, e...)
}
@@ -708,14 +708,14 @@ func (s *SVCBIPv6Hint) pack() ([]byte, error) {
func (s *SVCBIPv6Hint) unpack(b []byte) error {
if len(b) == 0 || len(b)%16 != 0 {
- return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16")
+ return errors.New("bas svcbipv6hint: ipv6 address byte array length not a multiple of 16")
}
b = cloneSlice(b)
x := make([]net.IP, 0, len(b)/16)
for i := 0; i < len(b); i += 16 {
ip := net.IP(b[i : i+16])
if ip.To4() != nil {
- return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4")
+ return errors.New("bad svcbipv6hint: expected ipv6, got ipv4")
}
x = append(x, ip)
}
@@ -736,7 +736,7 @@ func (s *SVCBIPv6Hint) String() string {
func (s *SVCBIPv6Hint) parse(b string) error {
if b == "" {
- return errors.New("dns: svcbipv6hint: empty hint")
+ return errors.New("bad svcbipv6hint: empty hint")
}
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
@@ -745,10 +745,10 @@ func (s *SVCBIPv6Hint) parse(b string) error {
e, b, _ = strings.Cut(b, ",")
ip := net.ParseIP(e)
if ip == nil {
- return errors.New("dns: svcbipv6hint: bad ip")
+ return errors.New("bad svcbipv6hint: bad ip")
}
if ip.To4() != nil {
- return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6")
+ return errors.New("bad svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6")
}
hint = append(hint, ip)
}
@@ -800,7 +800,7 @@ func (s *SVCBDoHPath) unpack(b []byte) error {
func (s *SVCBDoHPath) parse(b string) error {
template, err := svcbParseParam(b)
if err != nil {
- return fmt.Errorf("dns: svcbdohpath: %w", err)
+ return fmt.Errorf("bad svcbdohpath: %w", err)
}
s.Template = string(template)
return nil
@@ -838,14 +838,14 @@ func (*SVCBOhttp) len() int { return 0 }
func (*SVCBOhttp) unpack(b []byte) error {
if len(b) != 0 {
- return errors.New("dns: svcbotthp: svcbotthp must have no value")
+ return errors.New("bad svcbotthp: svcbotthp must have no value")
}
return nil
}
func (*SVCBOhttp) parse(b string) error {
if b != "" {
- return errors.New("dns: svcbotthp: svcbotthp must have no value")
+ return errors.New("bad svcbotthp: svcbotthp must have no value")
}
return nil
}
@@ -878,7 +878,7 @@ func (s *SVCBLocal) unpack(b []byte) error {
func (s *SVCBLocal) parse(b string) error {
data, err := svcbParseParam(b)
if err != nil {
- return fmt.Errorf("dns: svcblocal: svcb private/experimental key %w", err)
+ return fmt.Errorf("bad svcblocal: svcb private/experimental key %w", err)
}
s.Data = data
return nil
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index 936dc212457..4af7d286c7f 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 66}
+var Version = v{1, 1, 68}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index cc09810fb12..8143ddc1b98 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -2,6 +2,8 @@
package dns
+import "fmt"
+
// pack*() functions
func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
@@ -1222,7 +1224,7 @@ func (rr *A) unpack(msg []byte, off int) (off1 int, err error) {
rr.A, off, err = unpackDataA(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("A: %w", err)
}
return off, nil
}
@@ -1233,7 +1235,7 @@ func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) {
rr.AAAA, off, err = unpackDataAAAA(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AAAA: %w", err)
}
return off, nil
}
@@ -1244,14 +1246,14 @@ func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) {
rr.Subtype, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AFSDB.Subtype: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Hostname, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AFSDB.Hostname: %w", err)
}
return off, nil
}
@@ -1262,14 +1264,14 @@ func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Precedence, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AMTRELAY.Precedence: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.GatewayType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AMTRELAY.GatewayType: %w", err)
}
if off == len(msg) {
return off, nil
@@ -1279,7 +1281,7 @@ func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AMTRELAY.GatewayHost: %w", err)
}
return off, nil
}
@@ -1297,7 +1299,7 @@ func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) {
rr.Prefixes, off, err = unpackDataApl(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("APL.Prefixes: %w", err)
}
return off, nil
}
@@ -1308,7 +1310,7 @@ func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) {
rr.Txt, off, err = unpackStringTxt(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("AVC.Txt: %w", err)
}
return off, nil
}
@@ -1319,21 +1321,21 @@ func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) {
rr.Flag, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CAA.Flag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Tag, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CAA.Tag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Value, off, err = unpackStringOctet(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CAA.Value: %w", err)
}
return off, nil
}
@@ -1344,28 +1346,28 @@ func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Flags, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDNSKEY.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Protocol, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDNSKEY.Protocol: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDNSKEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDNSKEY.PublicKey: %w", err)
}
return off, nil
}
@@ -1376,28 +1378,28 @@ func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) {
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDS.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDS.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.DigestType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDS.DigestType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("CDS.Digest: %w", err)
}
return off, nil
}
@@ -1408,28 +1410,28 @@ func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) {
rr.Type, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CERT.Type: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CERT.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CERT.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("CERT.Certificate: %w", err)
}
return off, nil
}
@@ -1440,7 +1442,7 @@ func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) {
rr.Target, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CNAME.Target: %w", err)
}
return off, nil
}
@@ -1451,21 +1453,21 @@ func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) {
rr.Serial, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CSYNC.Serial: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Flags, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CSYNC.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("CSYNC.TypeBitMap: %w", err)
}
return off, nil
}
@@ -1476,7 +1478,7 @@ func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) {
rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("DHCID.Digest: %w", err)
}
return off, nil
}
@@ -1487,28 +1489,28 @@ func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) {
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DLV.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DLV.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.DigestType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DLV.DigestType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("DLV.Digest: %w", err)
}
return off, nil
}
@@ -1519,7 +1521,7 @@ func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) {
rr.Target, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DNAME.Target: %w", err)
}
return off, nil
}
@@ -1530,28 +1532,28 @@ func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Flags, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DNSKEY.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Protocol, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DNSKEY.Protocol: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DNSKEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("DNSKEY.PublicKey: %w", err)
}
return off, nil
}
@@ -1562,28 +1564,28 @@ func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) {
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DS.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DS.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.DigestType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("DS.DigestType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("DS.Digest: %w", err)
}
return off, nil
}
@@ -1594,7 +1596,7 @@ func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) {
rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("EID.Endpoint: %w", err)
}
return off, nil
}
@@ -1605,7 +1607,7 @@ func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) {
rr.Address, off, err = unpackUint48(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("EUI48.Address: %w", err)
}
return off, nil
}
@@ -1616,7 +1618,7 @@ func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) {
rr.Address, off, err = unpackUint64(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("EUI64.Address: %w", err)
}
return off, nil
}
@@ -1627,7 +1629,7 @@ func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) {
rr.Gid, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("GID.Gid: %w", err)
}
return off, nil
}
@@ -1638,21 +1640,21 @@ func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) {
rr.Longitude, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("GPOS.Longitude: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Latitude, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("GPOS.Latitude: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Altitude, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("GPOS.Altitude: %w", err)
}
return off, nil
}
@@ -1663,14 +1665,14 @@ func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) {
rr.Cpu, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HINFO.Cpu: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Os, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HINFO.Os: %w", err)
}
return off, nil
}
@@ -1681,21 +1683,21 @@ func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) {
rr.HitLength, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HIP.HitLength: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HIP.PublicKeyAlgorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKeyLength, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HIP.PublicKeyLength: %w", err)
}
if off == len(msg) {
return off, nil
@@ -1710,7 +1712,7 @@ func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("HIP.RendezvousServers: %w", err)
}
return off, nil
}
@@ -1721,21 +1723,21 @@ func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) {
rr.Priority, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HTTPS.Priority: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Target, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HTTPS.Target: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Value, off, err = unpackDataSVCB(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("HTTPS.Value: %w", err)
}
return off, nil
}
@@ -1746,21 +1748,21 @@ func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Precedence, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("IPSECKEY.Precedence: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.GatewayType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("IPSECKEY.GatewayType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("IPSECKEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
@@ -1770,14 +1772,14 @@ func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType)
if err != nil {
- return off, err
+ return off, fmt.Errorf("IPSECKEY.GatewayHost: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("IPSECKEY.PublicKey: %w", err)
}
return off, nil
}
@@ -1788,14 +1790,14 @@ func (rr *ISDN) unpack(msg []byte, off int) (off1 int, err error) {
rr.Address, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("ISDN.Address: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.SubAddress, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("ISDN.SubAddress: %w", err)
}
return off, nil
}
@@ -1806,28 +1808,28 @@ func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Flags, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("KEY.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Protocol, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("KEY.Protocol: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("KEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("KEY.PublicKey: %w", err)
}
return off, nil
}
@@ -1838,14 +1840,14 @@ func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("KX.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Exchanger, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("KX.Exchanger: %w", err)
}
return off, nil
}
@@ -1856,14 +1858,14 @@ func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("L32.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Locator32, off, err = unpackDataA(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("L32.Locator32: %w", err)
}
return off, nil
}
@@ -1874,14 +1876,14 @@ func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("L64.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Locator64, off, err = unpackUint64(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("L64.Locator64: %w", err)
}
return off, nil
}
@@ -1892,49 +1894,49 @@ func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) {
rr.Version, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.Version: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Size, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.Size: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.HorizPre, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.HorizPre: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.VertPre, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.VertPre: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Latitude, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.Latitude: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Longitude, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.Longitude: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Altitude, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LOC.Altitude: %w", err)
}
return off, nil
}
@@ -1945,14 +1947,14 @@ func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LP.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Fqdn, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("LP.Fqdn: %w", err)
}
return off, nil
}
@@ -1963,7 +1965,7 @@ func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) {
rr.Mb, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MB.Mb: %w", err)
}
return off, nil
}
@@ -1974,7 +1976,7 @@ func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) {
rr.Md, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MD.Md: %w", err)
}
return off, nil
}
@@ -1985,7 +1987,7 @@ func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) {
rr.Mf, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MF.Mf: %w", err)
}
return off, nil
}
@@ -1996,7 +1998,7 @@ func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) {
rr.Mg, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MG.Mg: %w", err)
}
return off, nil
}
@@ -2007,14 +2009,14 @@ func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) {
rr.Rmail, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MINFO.Rmail: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Email, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MINFO.Email: %w", err)
}
return off, nil
}
@@ -2025,7 +2027,7 @@ func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) {
rr.Mr, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MR.Mr: %w", err)
}
return off, nil
}
@@ -2036,14 +2038,14 @@ func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MX.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Mx, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("MX.Mx: %w", err)
}
return off, nil
}
@@ -2054,42 +2056,42 @@ func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) {
rr.Order, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Order: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Flags, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Service, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Service: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Regexp, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Regexp: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Replacement, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NAPTR.Replacement: %w", err)
}
return off, nil
}
@@ -2100,14 +2102,14 @@ func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NID.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.NodeID, off, err = unpackUint64(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NID.NodeID: %w", err)
}
return off, nil
}
@@ -2118,7 +2120,7 @@ func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) {
rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("NIMLOC.Locator: %w", err)
}
return off, nil
}
@@ -2129,7 +2131,7 @@ func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) {
rr.ZSData, off, err = unpackStringTxt(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NINFO.ZSData: %w", err)
}
return off, nil
}
@@ -2140,7 +2142,7 @@ func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) {
rr.Ns, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NS.Ns: %w", err)
}
return off, nil
}
@@ -2151,7 +2153,7 @@ func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) {
rr.Ptr, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSAPPTR.Ptr: %w", err)
}
return off, nil
}
@@ -2162,14 +2164,14 @@ func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) {
rr.NextDomain, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC.NextDomain: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC.TypeBitMap: %w", err)
}
return off, nil
}
@@ -2180,28 +2182,28 @@ func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) {
rr.Hash, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.Hash: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Flags, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Iterations, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.Iterations: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.SaltLength, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.SaltLength: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2212,7 +2214,7 @@ func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.HashLength, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.HashLength: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2223,7 +2225,7 @@ func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3.TypeBitMap: %w", err)
}
return off, nil
}
@@ -2234,28 +2236,28 @@ func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) {
rr.Hash, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3PARAM.Hash: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Flags, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3PARAM.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Iterations, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3PARAM.Iterations: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.SaltLength, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NSEC3PARAM.SaltLength: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2273,7 +2275,7 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) {
rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("NULL.Data: %w", err)
}
return off, nil
}
@@ -2291,14 +2293,14 @@ func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) {
rr.NextDomain, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NXT.NextDomain: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("NXT.TypeBitMap: %w", err)
}
return off, nil
}
@@ -2309,7 +2311,7 @@ func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("OPENPGPKEY.PublicKey: %w", err)
}
return off, nil
}
@@ -2320,7 +2322,7 @@ func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) {
rr.Option, off, err = unpackDataOpt(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("OPT.Option: %w", err)
}
return off, nil
}
@@ -2331,7 +2333,7 @@ func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) {
rr.Ptr, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("PTR.Ptr: %w", err)
}
return off, nil
}
@@ -2342,21 +2344,21 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("PX.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Map822, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("PX.Map822: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Mapx400, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("PX.Mapx400: %w", err)
}
return off, nil
}
@@ -2378,7 +2380,7 @@ func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("RFC3597.Rdata: %w", err)
}
return off, nil
}
@@ -2389,28 +2391,28 @@ func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Flags, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RKEY.Flags: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Protocol, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RKEY.Protocol: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RKEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("RKEY.PublicKey: %w", err)
}
return off, nil
}
@@ -2421,14 +2423,14 @@ func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) {
rr.Mbox, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RP.Mbox: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Txt, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RP.Txt: %w", err)
}
return off, nil
}
@@ -2439,63 +2441,63 @@ func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) {
rr.TypeCovered, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.TypeCovered: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Labels, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.Labels: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.OrigTtl, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.OrigTtl: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Expiration, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.Expiration: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Inception, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.Inception: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.SignerName, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.SignerName: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("RRSIG.Signature: %w", err)
}
return off, nil
}
@@ -2506,14 +2508,14 @@ func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) {
rr.Preference, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RT.Preference: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Host, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("RT.Host: %w", err)
}
return off, nil
}
@@ -2524,63 +2526,63 @@ func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) {
rr.TypeCovered, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.TypeCovered: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Labels, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.Labels: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.OrigTtl, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.OrigTtl: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Expiration, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.Expiration: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Inception, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.Inception: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.SignerName, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.SignerName: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("SIG.Signature: %w", err)
}
return off, nil
}
@@ -2591,28 +2593,28 @@ func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) {
rr.Usage, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SMIMEA.Usage: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Selector, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SMIMEA.Selector: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.MatchingType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SMIMEA.MatchingType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("SMIMEA.Certificate: %w", err)
}
return off, nil
}
@@ -2623,49 +2625,49 @@ func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) {
rr.Ns, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Ns: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Mbox, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Mbox: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Serial, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Serial: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Refresh, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Refresh: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Retry, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Retry: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Expire, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Expire: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Minttl, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SOA.Minttl: %w", err)
}
return off, nil
}
@@ -2676,7 +2678,7 @@ func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) {
rr.Txt, off, err = unpackStringTxt(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SPF.Txt: %w", err)
}
return off, nil
}
@@ -2687,28 +2689,28 @@ func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) {
rr.Priority, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SRV.Priority: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Weight, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SRV.Weight: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Port, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SRV.Port: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Target, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SRV.Target: %w", err)
}
return off, nil
}
@@ -2719,21 +2721,21 @@ func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) {
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SSHFP.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Type, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SSHFP.Type: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("SSHFP.FingerPrint: %w", err)
}
return off, nil
}
@@ -2744,21 +2746,21 @@ func (rr *SVCB) unpack(msg []byte, off int) (off1 int, err error) {
rr.Priority, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SVCB.Priority: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Target, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SVCB.Target: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Value, off, err = unpackDataSVCB(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("SVCB.Value: %w", err)
}
return off, nil
}
@@ -2769,28 +2771,28 @@ func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) {
rr.KeyTag, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TA.KeyTag: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Algorithm, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TA.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.DigestType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TA.DigestType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("TA.Digest: %w", err)
}
return off, nil
}
@@ -2801,14 +2803,14 @@ func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) {
rr.PreviousName, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TALINK.PreviousName: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.NextName, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TALINK.NextName: %w", err)
}
return off, nil
}
@@ -2819,42 +2821,42 @@ func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) {
rr.Algorithm, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Inception, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.Inception: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Expiration, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.Expiration: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Mode, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.Mode: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Error, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.Error: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.KeySize, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.KeySize: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2865,7 +2867,7 @@ func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.OtherLen, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TKEY.OtherLen: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2883,28 +2885,28 @@ func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) {
rr.Usage, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TLSA.Usage: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Selector, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TLSA.Selector: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.MatchingType, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TLSA.MatchingType: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("TLSA.Certificate: %w", err)
}
return off, nil
}
@@ -2915,28 +2917,28 @@ func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) {
rr.Algorithm, off, err = UnpackDomainName(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.Algorithm: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.TimeSigned, off, err = unpackUint48(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.TimeSigned: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Fudge, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.Fudge: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.MACSize, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.MACSize: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2947,21 +2949,21 @@ func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) {
}
rr.OrigId, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.OrigId: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Error, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.Error: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.OtherLen, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TSIG.OtherLen: %w", err)
}
if off == len(msg) {
return off, nil
@@ -2979,7 +2981,7 @@ func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) {
rr.Txt, off, err = unpackStringTxt(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("TXT.Txt: %w", err)
}
return off, nil
}
@@ -2990,7 +2992,7 @@ func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) {
rr.Uid, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("UID.Uid: %w", err)
}
return off, nil
}
@@ -3001,7 +3003,7 @@ func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) {
rr.Uinfo, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("UINFO.Uinfo: %w", err)
}
return off, nil
}
@@ -3012,21 +3014,21 @@ func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) {
rr.Priority, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("URI.Priority: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Weight, off, err = unpackUint16(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("URI.Weight: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Target, off, err = unpackStringOctet(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("URI.Target: %w", err)
}
return off, nil
}
@@ -3037,7 +3039,7 @@ func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) {
rr.PSDNAddress, off, err = unpackString(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("X25.PSDNAddress: %w", err)
}
return off, nil
}
@@ -3048,28 +3050,28 @@ func (rr *ZONEMD) unpack(msg []byte, off int) (off1 int, err error) {
rr.Serial, off, err = unpackUint32(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("ZONEMD.Serial: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Scheme, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("ZONEMD.Scheme: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Hash, off, err = unpackUint8(msg, off)
if err != nil {
- return off, err
+ return off, fmt.Errorf("ZONEMD.Hash: %w", err)
}
if off == len(msg) {
return off, nil
}
rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
if err != nil {
- return off, err
+ return off, fmt.Errorf("ZONEMD.Digest: %w", err)
}
return off, nil
}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
index ee4e7bb6edf..5646309e09c 100644
--- a/vendor/github.com/modern-go/reflect2/safe_type.go
+++ b/vendor/github.com/modern-go/reflect2/safe_type.go
@@ -6,10 +6,12 @@ import (
)
type safeType struct {
- reflect.Type
- cfg *frozenConfig
+ Type reflect.Type
+ cfg *frozenConfig
}
+var _ Type = &safeType{}
+
func (type2 *safeType) New() interface{} {
return reflect.New(type2.Type).Interface()
}
@@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer {
panic("does not support unsafe operation")
}
+func (type2 *safeType) Kind() reflect.Kind {
+ return type2.Type.Kind()
+}
+
+func (type2 *safeType) Len() int {
+ return type2.Type.Len()
+}
+
+func (type2 *safeType) NumField() int {
+ return type2.Type.NumField()
+}
+
+func (type2 *safeType) String() string {
+ return type2.Type.String()
+}
+
func (type2 *safeType) Elem() Type {
return type2.cfg.Type2(type2.Type.Elem())
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md
index 6c0ca81dc86..524bace9da9 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/README.md
@@ -8,7 +8,8 @@
| Warnings | [Statefulness](#warnings) |
| Issues | [](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) [](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fdeltatocumulative) |
| Code coverage | [](https://app.codecov.io/gh/open-telemetry/opentelemetry-collector-contrib/tree/main/?components%5B0%5D=processor_deltatocumulative&displayType=list) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@sh0rez](https://www.github.com/sh0rez), [@RichieSams](https://www.github.com/RichieSams) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@RichieSams](https://www.github.com/RichieSams) |
+| Emeritus | [@tombrk](https://www.github.com/tombrk) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
index f11a329c327..67112dff91a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md
@@ -8,32 +8,32 @@ The following telemetry is emitted by this component.
### otelcol_deltatocumulative_datapoints
-total number of datapoints processed. may have 'error' attribute, if processing failed
+total number of datapoints processed. may have 'error' attribute, if processing failed [Development]
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {datapoint} | Sum | Int | true |
+| Unit | Metric Type | Value Type | Monotonic | Stability |
+| ---- | ----------- | ---------- | --------- | --------- |
+| {datapoint} | Sum | Int | true | Development |
### otelcol_deltatocumulative_streams_limit
-upper limit of tracked streams
+upper limit of tracked streams [Development]
-| Unit | Metric Type | Value Type |
-| ---- | ----------- | ---------- |
-| {stream} | Gauge | Int |
+| Unit | Metric Type | Value Type | Stability |
+| ---- | ----------- | ---------- | --------- |
+| {stream} | Gauge | Int | Development |
### otelcol_deltatocumulative_streams_max_stale
-duration after which streams inactive streams are dropped
+duration after which streams inactive streams are dropped [Development]
-| Unit | Metric Type | Value Type |
-| ---- | ----------- | ---------- |
-| s | Gauge | Int |
+| Unit | Metric Type | Value Type | Stability |
+| ---- | ----------- | ---------- | --------- |
+| s | Gauge | Int | Development |
### otelcol_deltatocumulative_streams_tracked
-number of streams tracked
+number of streams tracked [Development]
-| Unit | Metric Type | Value Type | Monotonic |
-| ---- | ----------- | ---------- | --------- |
-| {dps} | Sum | Int | false |
+| Unit | Metric Type | Value Type | Monotonic | Stability |
+| ---- | ----------- | ---------- | --------- | --------- |
+| {dps} | Sum | Int | false | Development |
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
index 1da69b6c62f..200533f0c25 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/add.go
@@ -31,7 +31,7 @@ type Adder struct{}
var maxBuckets = 160
-func (add Adder) Numbers(state, dp pmetric.NumberDataPoint) error {
+func (Adder) Numbers(state, dp pmetric.NumberDataPoint) error {
switch dp.ValueType() {
case pmetric.NumberDataPointValueTypeDouble:
v := state.DoubleValue() + dp.DoubleValue()
@@ -43,7 +43,7 @@ func (add Adder) Numbers(state, dp pmetric.NumberDataPoint) error {
return nil
}
-func (add Adder) Histograms(state, dp pmetric.HistogramDataPoint) error {
+func (Adder) Histograms(state, dp pmetric.HistogramDataPoint) error {
// bounds different: no way to merge, so reset observation to new boundaries
if !pslice.Equal(state.ExplicitBounds(), dp.ExplicitBounds()) {
dp.CopyTo(state)
@@ -54,7 +54,7 @@ func (add Adder) Histograms(state, dp pmetric.HistogramDataPoint) error {
// given we have limited error handling at this stage (and already verified boundaries are correct),
// doing a best-effort add of whatever we have appears reasonable.
n := min(state.BucketCounts().Len(), dp.BucketCounts().Len())
- for i := 0; i < n; i++ {
+ for i := range n {
sum := state.BucketCounts().At(i) + dp.BucketCounts().At(i)
state.BucketCounts().SetAt(i, sum)
}
@@ -82,7 +82,7 @@ func (add Adder) Histograms(state, dp pmetric.HistogramDataPoint) error {
return nil
}
-func (add Adder) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error {
+func (Adder) Exponential(state, dp pmetric.ExponentialHistogramDataPoint) error {
type H = pmetric.ExponentialHistogramDataPoint
if state.Scale() != dp.Scale() {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
index c7134638b67..3898913658f 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metadata/generated_telemetry.go
@@ -90,25 +90,25 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...Teleme
var err, errs error
builder.DeltatocumulativeDatapoints, err = builder.meter.Int64Counter(
"otelcol_deltatocumulative_datapoints",
- metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed"),
+ metric.WithDescription("total number of datapoints processed. may have 'error' attribute, if processing failed [Development]"),
metric.WithUnit("{datapoint}"),
)
errs = errors.Join(errs, err)
builder.DeltatocumulativeStreamsLimit, err = builder.meter.Int64Gauge(
"otelcol_deltatocumulative_streams_limit",
- metric.WithDescription("upper limit of tracked streams"),
+ metric.WithDescription("upper limit of tracked streams [Development]"),
metric.WithUnit("{stream}"),
)
errs = errors.Join(errs, err)
builder.DeltatocumulativeStreamsMaxStale, err = builder.meter.Int64Gauge(
"otelcol_deltatocumulative_streams_max_stale",
- metric.WithDescription("duration after which streams inactive streams are dropped"),
+ metric.WithDescription("duration after which streams inactive streams are dropped [Development]"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
builder.DeltatocumulativeStreamsTracked, err = builder.meter.Int64ObservableUpDownCounter(
"otelcol_deltatocumulative_streams_tracked",
- metric.WithDescription("number of streams tracked"),
+ metric.WithDescription("number of streams tracked [Development]"),
metric.WithUnit("{dps}"),
)
errs = errors.Join(errs, err)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go
index cb9b2c0ef73..dccd5e31fba 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics/data.go
@@ -59,7 +59,7 @@ func (s Gauge) Ident() Ident {
return (*Metric)(&s).Ident()
}
-func (s Gauge) SetAggregationTemporality(pmetric.AggregationTemporality) {}
+func (Gauge) SetAggregationTemporality(pmetric.AggregationTemporality) {}
type Summary Metric
@@ -71,4 +71,4 @@ func (s Summary) Ident() Ident {
return (*Metric)(&s).Ident()
}
-func (s Summary) SetAggregationTemporality(pmetric.AggregationTemporality) {}
+func (Summary) SetAggregationTemporality(pmetric.AggregationTemporality) {}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
index 3269c0b0113..2b742fef914 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml
@@ -7,36 +7,45 @@ status:
distributions: [contrib, k8s]
warnings: [Statefulness]
codeowners:
- active: [sh0rez, RichieSams]
+ active: [RichieSams]
+ emeritus: [tombrk]
telemetry:
metrics:
# streams
- deltatocumulative_streams_tracked:
- description: number of streams tracked
- unit: "{dps}"
+ deltatocumulative_datapoints:
+ description: total number of datapoints processed. may have 'error' attribute, if processing failed
+ stability:
+ level: development
+ unit: "{datapoint}"
sum:
value_type: int
- monotonic: false
- async: true
+ monotonic: true
enabled: true
deltatocumulative_streams_limit:
description: upper limit of tracked streams
+ stability:
+ level: development
unit: "{stream}"
gauge:
value_type: int
enabled: true
deltatocumulative_streams_max_stale:
description: duration after which streams inactive streams are dropped
+ stability:
+ level: development
unit: "s"
gauge:
value_type: int
enabled: true
# datapoints
- deltatocumulative_datapoints:
- description: total number of datapoints processed. may have 'error' attribute, if processing failed
- unit: "{datapoint}"
+ deltatocumulative_streams_tracked:
+ description: number of streams tracked
+ stability:
+ level: development
+ unit: "{dps}"
sum:
value_type: int
- monotonic: true
+ monotonic: false
+ async: true
enabled: true
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
index 5fd061355dc..849873b7095 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go
@@ -22,9 +22,9 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/telemetry"
)
-var _ processor.Metrics = (*Processor)(nil)
+var _ processor.Metrics = (*deltaToCumulativeProcessor)(nil)
-type Processor struct {
+type deltaToCumulativeProcessor struct {
next consumer.Metrics
cfg Config
@@ -38,11 +38,11 @@ type Processor struct {
tel telemetry.Metrics
}
-func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *Processor {
+func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *deltaToCumulativeProcessor {
ctx, cancel := context.WithCancel(context.Background())
limit := maps.Limit(int64(cfg.MaxStreams))
- proc := Processor{
+ proc := deltaToCumulativeProcessor{
next: next,
cfg: *cfg,
last: state{
@@ -71,7 +71,7 @@ type vals struct {
expo *mutex[pmetric.ExponentialHistogramDataPoint]
}
-func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
+func (p *deltaToCumulativeProcessor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
now := time.Now()
const (
@@ -184,7 +184,7 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro
return p.next.ConsumeMetrics(ctx, md)
}
-func (p *Processor) Start(_ context.Context, _ component.Host) error {
+func (p *deltaToCumulativeProcessor) Start(_ context.Context, _ component.Host) error {
if p.cfg.MaxStale != 0 {
// delete stale streams once per minute
go func() {
@@ -213,12 +213,12 @@ func (p *Processor) Start(_ context.Context, _ component.Host) error {
return nil
}
-func (p *Processor) Shutdown(_ context.Context) error {
+func (p *deltaToCumulativeProcessor) Shutdown(_ context.Context) error {
p.cancel()
return nil
}
-func (p *Processor) Capabilities() consumer.Capabilities {
+func (*deltaToCumulativeProcessor) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: true}
}
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index 4e5ff92a29f..55cc5b0770c 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -27,11 +27,13 @@ import (
"net/url"
"os"
"path/filepath"
+ "slices"
"strings"
"sync"
"time"
- conntrack "github.com/mwitkow/go-conntrack"
+ "github.com/golang-jwt/jwt/v5"
+ "github.com/mwitkow/go-conntrack"
"go.yaml.in/yaml/v2"
"golang.org/x/net/http/httpproxy"
"golang.org/x/net/http2"
@@ -39,6 +41,10 @@ import (
"golang.org/x/oauth2/clientcredentials"
)
+const (
+ grantTypeJWTBearer = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+)
+
var (
// DefaultHTTPClientConfig is the default HTTP client configuration.
DefaultHTTPClientConfig = HTTPClientConfig{
@@ -130,7 +136,7 @@ func (tv *TLSVersion) String() string {
// BasicAuth contains basic HTTP authentication credentials.
type BasicAuth struct {
- Username string `yaml:"username" json:"username"`
+ Username string `yaml:"username,omitempty" json:"username,omitempty"`
UsernameFile string `yaml:"username_file,omitempty" json:"username_file,omitempty"`
// UsernameRef is the name of the secret within the secret manager to use as the username.
UsernameRef string `yaml:"username_ref,omitempty" json:"username_ref,omitempty"`
@@ -232,17 +238,43 @@ func (u URL) MarshalJSON() ([]byte, error) {
// OAuth2 is the oauth2 client configuration.
type OAuth2 struct {
- ClientID string `yaml:"client_id" json:"client_id"`
- ClientSecret Secret `yaml:"client_secret" json:"client_secret"`
- ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"`
+ ClientID string `yaml:"client_id,omitempty" json:"client_id,omitempty"`
+ ClientSecret Secret `yaml:"client_secret,omitempty" json:"client_secret,omitempty"`
+ ClientSecretFile string `yaml:"client_secret_file,omitempty" json:"client_secret_file,omitempty"`
// ClientSecretRef is the name of the secret within the secret manager to use as the client
// secret.
- ClientSecretRef string `yaml:"client_secret_ref" json:"client_secret_ref"`
- Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"`
- TokenURL string `yaml:"token_url" json:"token_url"`
- EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"`
- TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
- ProxyConfig `yaml:",inline"`
+ ClientSecretRef string `yaml:"client_secret_ref,omitempty" json:"client_secret_ref,omitempty"`
+ ClientCertificateKeyID string `yaml:"client_certificate_key_id,omitempty" json:"client_certificate_key_id,omitempty"`
+ ClientCertificateKey Secret `yaml:"client_certificate_key,omitempty" json:"client_certificate_key,omitempty"`
+ ClientCertificateKeyFile string `yaml:"client_certificate_key_file,omitempty" json:"client_certificate_key_file,omitempty"`
+ // ClientCertificateKeyRef is the name of the secret within the secret manager to use as the client
+ // secret.
+ ClientCertificateKeyRef string `yaml:"client_certificate_key_ref,omitempty" json:"client_certificate_key_ref,omitempty"`
+ // GrantType is the OAuth2 grant type to use. It can be one of
+ // "client_credentials" or "urn:ietf:params:oauth:grant-type:jwt-bearer" (RFC 7523).
+ // Default value is "client_credentials"
+ GrantType string `yaml:"grant_type,omitempty" json:"grant_type,omitempty"`
+ // SignatureAlgorithm is the RSA algorithm used to sign JWT token. Only used if
+ // GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+ // Default value is RS256 and valid values RS256, RS384, RS512
+ SignatureAlgorithm string `yaml:"signature_algorithm,omitempty" json:"signature_algorithm,omitempty"`
+ // Iss is the OAuth client identifier used when communicating with
+ // the configured OAuth provider. Default value is client_id. Only used if
+ // GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+ Iss string `yaml:"iss,omitempty" json:"iss,omitempty"`
+ // Audience optionally specifies the intended audience of the
+ // request. If empty, the value of TokenURL is used as the
+ // intended audience. Only used if
+ // GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+ Audience string `yaml:"audience,omitempty" json:"audience,omitempty"`
+ // Claims is a map of claims to be added to the JWT token. Only used if
+ // GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+ Claims map[string]interface{} `yaml:"claims,omitempty" json:"claims,omitempty"`
+ Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"`
+ TokenURL string `yaml:"token_url,omitempty" json:"token_url,omitempty"`
+ EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"`
+ TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
+ ProxyConfig `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -408,8 +440,15 @@ func (c *HTTPClientConfig) Validate() error {
if len(c.OAuth2.TokenURL) == 0 {
return errors.New("oauth2 token_url must be configured")
}
- if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 {
- return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured")
+ if c.OAuth2.GrantType == grantTypeJWTBearer {
+ if nonZeroCount(len(c.OAuth2.ClientCertificateKey) > 0, len(c.OAuth2.ClientCertificateKeyFile) > 0, len(c.OAuth2.ClientCertificateKeyRef) > 0) > 1 {
+ return errors.New("at most one of oauth2 client_certificate_key, client_certificate_key_file & client_certificate_key_ref must be configured using grant-type=urn:ietf:params:oauth:grant-type:jwt-bearer")
+ }
+ if c.OAuth2.SignatureAlgorithm != "" && !slices.Contains(validSignatureAlgorithm, c.OAuth2.SignatureAlgorithm) {
+ return errors.New("valid signature algorithms are RS256, RS384 and RS512")
+ }
+ } else if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 {
+ return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured using grant-type=client_credentials")
}
}
if err := c.ProxyConfig.Validate(); err != nil {
@@ -623,12 +662,6 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon
DialContext: dialContext,
}
if opts.http2Enabled && cfg.EnableHTTP2 {
- // HTTP/2 support is golang had many problematic cornercases where
- // dead connections would be kept and used in connection pools.
- // https://github.com/golang/go/issues/32388
- // https://github.com/golang/go/issues/39337
- // https://github.com/golang/go/issues/39750
-
http2t, err := http2.ConfigureTransports(rt.(*http.Transport))
if err != nil {
return nil, err
@@ -668,11 +701,23 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon
}
if cfg.OAuth2 != nil {
- clientSecret, err := toSecret(opts.secretManager, cfg.OAuth2.ClientSecret, cfg.OAuth2.ClientSecretFile, cfg.OAuth2.ClientSecretRef)
- if err != nil {
- return nil, fmt.Errorf("unable to use client secret: %w", err)
+ var (
+ oauthCredential SecretReader
+ err error
+ )
+
+ if cfg.OAuth2.GrantType == grantTypeJWTBearer {
+ oauthCredential, err = toSecret(opts.secretManager, cfg.OAuth2.ClientCertificateKey, cfg.OAuth2.ClientCertificateKeyFile, cfg.OAuth2.ClientCertificateKeyRef)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use client certificate: %w", err)
+ }
+ } else {
+ oauthCredential, err = toSecret(opts.secretManager, cfg.OAuth2.ClientSecret, cfg.OAuth2.ClientSecretFile, cfg.OAuth2.ClientSecretRef)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use client secret: %w", err)
+ }
}
- rt = NewOAuth2RoundTripper(clientSecret, cfg.OAuth2, rt, &opts)
+ rt = NewOAuth2RoundTripper(oauthCredential, cfg.OAuth2, rt, &opts)
}
if cfg.HTTPHeaders != nil {
@@ -891,27 +936,31 @@ type oauth2RoundTripper struct {
lastSecret string
// Required for interaction with Oauth2 server.
- config *OAuth2
- clientSecret SecretReader
- opts *httpClientOptions
- client *http.Client
+ config *OAuth2
+ oauthCredential SecretReader
+ opts *httpClientOptions
+ client *http.Client
}
-func NewOAuth2RoundTripper(clientSecret SecretReader, config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper {
- if clientSecret == nil {
- clientSecret = NewInlineSecret("")
+func NewOAuth2RoundTripper(oauthCredential SecretReader, config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper {
+ if oauthCredential == nil {
+ oauthCredential = NewInlineSecret("")
}
return &oauth2RoundTripper{
config: config,
// A correct tokenSource will be added later on.
- lastRT: &oauth2.Transport{Base: next},
- opts: opts,
- clientSecret: clientSecret,
+ lastRT: &oauth2.Transport{Base: next},
+ opts: opts,
+ oauthCredential: oauthCredential,
}
}
-func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret string) (client *http.Client, source oauth2.TokenSource, err error) {
+type oauth2TokenSourceConfig interface {
+ TokenSource(ctx context.Context) oauth2.TokenSource
+}
+
+func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, clientCredential string) (client *http.Client, source oauth2.TokenSource, err error) {
tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig, WithSecretManager(rt.opts.secretManager))
if err != nil {
return nil, nil, err
@@ -949,12 +998,49 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str
t = NewUserAgentRoundTripper(ua, t)
}
- config := &clientcredentials.Config{
- ClientID: rt.config.ClientID,
- ClientSecret: secret,
- Scopes: rt.config.Scopes,
- TokenURL: rt.config.TokenURL,
- EndpointParams: mapToValues(rt.config.EndpointParams),
+ var config oauth2TokenSourceConfig
+
+ if rt.config.GrantType == grantTypeJWTBearer {
+ // RFC 7523 3.1 - JWT authorization grants
+ // RFC 7523 3.2 - Client Authentication Processing is not implement upstream yet,
+ // see https://github.com/golang/oauth2/pull/745
+
+ var sig *jwt.SigningMethodRSA
+ switch rt.config.SignatureAlgorithm {
+ case jwt.SigningMethodRS256.Name:
+ sig = jwt.SigningMethodRS256
+ case jwt.SigningMethodRS384.Name:
+ sig = jwt.SigningMethodRS384
+ case jwt.SigningMethodRS512.Name:
+ sig = jwt.SigningMethodRS512
+ default:
+ sig = jwt.SigningMethodRS256
+ }
+
+ iss := rt.config.Iss
+ if iss == "" {
+ iss = rt.config.ClientID
+ }
+ config = &JwtGrantTypeConfig{
+ PrivateKey: []byte(clientCredential),
+ PrivateKeyID: rt.config.ClientCertificateKeyID,
+ Scopes: rt.config.Scopes,
+ TokenURL: rt.config.TokenURL,
+ SigningAlgorithm: sig,
+ Iss: iss,
+ Subject: rt.config.ClientID,
+ Audience: rt.config.Audience,
+ PrivateClaims: rt.config.Claims,
+ EndpointParams: mapToValues(rt.config.EndpointParams),
+ }
+ } else {
+ config = &clientcredentials.Config{
+ ClientID: rt.config.ClientID,
+ ClientSecret: clientCredential,
+ Scopes: rt.config.Scopes,
+ TokenURL: rt.config.TokenURL,
+ EndpointParams: mapToValues(rt.config.EndpointParams),
+ }
}
client = &http.Client{Transport: t}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client)
@@ -973,8 +1059,8 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
rt.mtx.RUnlock()
// Fetch the secret if it's our first run or always if the secret can change.
- if !rt.clientSecret.Immutable() || needsInit {
- newSecret, err := rt.clientSecret.Fetch(req.Context())
+ if !rt.oauthCredential.Immutable() || needsInit {
+ newSecret, err := rt.oauthCredential.Fetch(req.Context())
if err != nil {
return nil, fmt.Errorf("unable to read oauth2 client secret: %w", err)
}
@@ -1383,6 +1469,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
// using GetClientCertificate.
tlsConfig := t.tlsConfig.Clone()
if !updateRootCA(tlsConfig, caData) {
+ if t.settings.CA == nil {
+ return nil, errors.New("unable to use specified CA cert: none configured")
+ }
return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CA.Description())
}
rt, err = t.newRT(tlsConfig)
@@ -1505,19 +1594,19 @@ func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) {
fn = c.proxyFunc
}()
if c.proxyFunc != nil {
- return
+ return fn
}
if c.ProxyFromEnvironment {
proxyFn := httpproxy.FromEnvironment().ProxyFunc()
c.proxyFunc = func(req *http.Request) (*url.URL, error) {
return proxyFn(req.URL)
}
- return
+ return fn
}
if c.ProxyURL.URL != nil && c.ProxyURL.String() != "" {
if c.NoProxy == "" {
c.proxyFunc = http.ProxyURL(c.ProxyURL.URL)
- return
+ return fn
}
proxy := &httpproxy.Config{
HTTPProxy: c.ProxyURL.String(),
@@ -1529,7 +1618,7 @@ func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) {
return proxyFn(req.URL)
}
}
- return
+ return fn
}
// ProxyConnectHeader() return the Proxy Connext Headers.
diff --git a/vendor/github.com/prometheus/common/config/oauth_assertion.go b/vendor/github.com/prometheus/common/config/oauth_assertion.go
new file mode 100644
index 00000000000..bf4bcb949b6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/oauth_assertion.go
@@ -0,0 +1,194 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt/v5"
+ "github.com/google/uuid"
+ "golang.org/x/oauth2"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ validSignatureAlgorithm = []string{"RS256", "RS384", "RS512"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type JwtGrantTypeConfig struct {
+ // Iss is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Iss string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // SigningAlgorithm is the RSA algorithm used to sign JWT payloads
+ SigningAlgorithm *jwt.SigningMethodRSA
+
+ // PrivateKeyID contains an optional hint indicating which key is being
+ // used.
+ PrivateKeyID string
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+
+ // EndpointParams specifies additional parameters for requests to the token endpoint.
+ EndpointParams url.Values
+
+ // Expires optionally specifies how long the token is valid for.
+ Expires time.Duration
+
+ // Audience optionally specifies the intended audience of the
+ // request. If empty, the value of TokenURL is used as the
+ // intended audience.
+ Audience string
+
+ // PrivateClaims optionally specifies custom private claims in the JWT.
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ PrivateClaims map[string]any
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *JwtGrantTypeConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *JwtGrantTypeConfig) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx context.Context
+ conf *JwtGrantTypeConfig
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := jwt.ParseRSAPrivateKeyFromPEM(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ audience := js.conf.TokenURL
+ if aud := js.conf.Audience; aud != "" {
+ audience = aud
+ }
+ expiration := time.Now().Add(10 * time.Minute)
+ if t := js.conf.Expires; t > 0 {
+ expiration = time.Now().Add(t)
+ }
+ scopes := strings.Join(js.conf.Scopes, " ")
+
+ claims := jwt.MapClaims{
+ "iss": js.conf.Iss,
+ "sub": js.conf.Subject,
+ "jti": uuid.New(),
+ "aud": audience,
+ "iat": jwt.NewNumericDate(time.Now()),
+ "exp": jwt.NewNumericDate(expiration),
+ }
+
+ if len(scopes) > 0 {
+ claims["scope"] = scopes
+ }
+
+ for k, v := range js.conf.PrivateClaims {
+ claims[k] = v
+ }
+
+ assertion := jwt.NewWithClaims(js.conf.SigningAlgorithm, claims)
+ if js.conf.PrivateKeyID != "" {
+ assertion.Header["kid"] = js.conf.PrivateKeyID
+ }
+ payload, err := assertion.SignedString(pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ if len(scopes) > 0 {
+ v.Set("scope", scopes)
+ }
+
+ for k, p := range js.conf.EndpointParams {
+ // Allow grant_type to be overridden to allow interoperability with
+ // non-compliant implementations.
+ if _, ok := v[k]; ok && k != "grant_type" {
+ return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k)
+ }
+ v[k] = p
+ }
+
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %w", err)
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %w", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, &oauth2.RetrieveError{
+ Response: resp,
+ Body: body,
+ }
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ oauth2.Token
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %w", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ return token, nil
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 8c8bbaa6243..21b93bca362 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -160,38 +160,38 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
n, err = w.WriteString("# HELP ")
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeName(w, compliantName)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
- return
+ return written, err
}
n, err = writeEscapedString(w, *in.Help, true)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte('\n')
written++
if err != nil {
- return
+ return written, err
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeName(w, compliantName)
written += n
if err != nil {
- return
+ return written, err
}
switch metricType {
case dto.MetricType_COUNTER:
@@ -215,34 +215,34 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
}
written += n
if err != nil {
- return
+ return written, err
}
if toOM.withUnit && in.Unit != nil {
n, err = w.WriteString("# UNIT ")
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeName(w, compliantName)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
- return
+ return written, err
}
n, err = writeEscapedString(w, *in.Unit, true)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte('\n')
written++
if err != nil {
- return
+ return written, err
}
}
@@ -306,7 +306,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
)
written += n
if err != nil {
- return
+ return written, err
}
}
n, err = writeOpenMetricsSample(
@@ -316,7 +316,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
)
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeOpenMetricsSample(
w, compliantName, "_count", metric, "", 0,
@@ -349,7 +349,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
)
written += n
if err != nil {
- return
+ return written, err
}
if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
@@ -367,7 +367,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
// out if needed).
written += n
if err != nil {
- return
+ return written, err
}
}
n, err = writeOpenMetricsSample(
@@ -377,7 +377,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
)
written += n
if err != nil {
- return
+ return written, err
}
if metric.Histogram.GetSampleCountFloat() > 0 {
return written, fmt.Errorf(
@@ -401,10 +401,10 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
}
written += n
if err != nil {
- return
+ return written, err
}
}
- return
+ return written, err
}
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 7e1d23cabc4..6b897814564 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -108,38 +108,38 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
n, err = w.WriteString("# HELP ")
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeName(w, name)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
- return
+ return written, err
}
n, err = writeEscapedString(w, *in.Help, false)
written += n
if err != nil {
- return
+ return written, err
}
err = w.WriteByte('\n')
written++
if err != nil {
- return
+ return written, err
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeName(w, name)
written += n
if err != nil {
- return
+ return written, err
}
metricType := in.GetType()
switch metricType {
@@ -161,7 +161,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
}
written += n
if err != nil {
- return
+ return written, err
}
// Finally the samples, one line for each.
@@ -211,7 +211,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
)
written += n
if err != nil {
- return
+ return written, err
}
}
n, err = writeSample(
@@ -220,7 +220,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
)
written += n
if err != nil {
- return
+ return written, err
}
n, err = writeSample(
w, name, "_count", metric, "", 0,
@@ -245,7 +245,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
)
written += n
if err != nil {
- return
+ return written, err
}
if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
@@ -263,7 +263,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
)
written += n
if err != nil {
- return
+ return written, err
}
}
n, err = writeSample(
@@ -272,7 +272,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
)
written += n
if err != nil {
- return
+ return written, err
}
v := metric.Histogram.GetSampleCountFloat()
if v == 0 {
@@ -286,10 +286,10 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
}
written += n
if err != nil {
- return
+ return written, err
}
}
- return
+ return written, err
}
// writeSample writes a single sample in text format to w, given the metric
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go
index 51da762c957..0a2718d53ea 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go
@@ -24,6 +24,7 @@ import (
"sync"
"golang.org/x/crypto/bcrypt"
+ "golang.org/x/time/rate"
)
// extraHTTPHeaders is a map of HTTP headers that can be added to HTTP
@@ -80,6 +81,7 @@ type webHandler struct {
handler http.Handler
logger *slog.Logger
cache *cache
+ limiter *rate.Limiter
// bcryptMtx is there to ensure that bcrypt.CompareHashAndPassword is run
// only once in parallel as this is CPU intensive.
bcryptMtx sync.Mutex
@@ -93,6 +95,11 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
+ if u.limiter != nil && !u.limiter.Allow() {
+ http.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests)
+ return
+ }
+
// Configure http headers.
for k, v := range c.HTTPConfig.Header {
w.Header().Set(k, v)
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
index 531bfcde167..c760d88ca21 100644
--- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
+++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go
@@ -26,12 +26,14 @@ import (
"path/filepath"
"strconv"
"strings"
+ "time"
"github.com/coreos/go-systemd/v22/activation"
"github.com/mdlayher/vsock"
config_util "github.com/prometheus/common/config"
"go.yaml.in/yaml/v2"
"golang.org/x/sync/errgroup"
+ "golang.org/x/time/rate"
)
var (
@@ -40,9 +42,10 @@ var (
)
type Config struct {
- TLSConfig TLSConfig `yaml:"tls_server_config"`
- HTTPConfig HTTPConfig `yaml:"http_server_config"`
- Users map[string]config_util.Secret `yaml:"basic_auth_users"`
+ TLSConfig TLSConfig `yaml:"tls_server_config"`
+ HTTPConfig HTTPConfig `yaml:"http_server_config"`
+ RateLimiterConfig RateLimiterConfig `yaml:"rate_limit"`
+ Users map[string]config_util.Secret `yaml:"basic_auth_users"`
}
type TLSConfig struct {
@@ -109,6 +112,11 @@ type HTTPConfig struct {
Header map[string]string `yaml:"headers,omitempty"`
}
+type RateLimiterConfig struct {
+ Burst int `yaml:"burst"`
+ Interval time.Duration `yaml:"interval"`
+}
+
func getConfig(configPath string) (*Config, error) {
content, err := os.ReadFile(configPath)
if err != nil {
@@ -365,11 +373,18 @@ func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger *slog.
return err
}
+ var limiter *rate.Limiter
+ if c.RateLimiterConfig.Interval != 0 {
+ limiter = rate.NewLimiter(rate.Every(c.RateLimiterConfig.Interval), c.RateLimiterConfig.Burst)
+ logger.Info("Rate Limiter is enabled.", "burst", c.RateLimiterConfig.Burst, "interval", c.RateLimiterConfig.Interval)
+ }
+
server.Handler = &webHandler{
tlsConfigPath: tlsConfigPath,
logger: logger,
handler: handler,
cache: newCache(),
+ limiter: limiter,
}
config, err := ConfigToTLSConfig(&c.TLSConfig)
diff --git a/vendor/github.com/prometheus/otlptranslator/.golangci.yml b/vendor/github.com/prometheus/otlptranslator/.golangci.yml
index ed5f43f1a6c..c3a00a8fad9 100644
--- a/vendor/github.com/prometheus/otlptranslator/.golangci.yml
+++ b/vendor/github.com/prometheus/otlptranslator/.golangci.yml
@@ -46,8 +46,6 @@ linters:
desc: Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert
- pkg: io/ioutil
desc: Use corresponding 'os' or 'io' functions instead.
- - pkg: regexp
- desc: Use github.com/grafana/regexp instead of regexp
- pkg: github.com/pkg/errors
desc: Use 'errors' or 'fmt' instead of github.com/pkg/errors
- pkg: golang.org/x/exp/slices
diff --git a/vendor/github.com/prometheus/otlptranslator/README.md b/vendor/github.com/prometheus/otlptranslator/README.md
index 3b31a448eca..663d7367160 100644
--- a/vendor/github.com/prometheus/otlptranslator/README.md
+++ b/vendor/github.com/prometheus/otlptranslator/README.md
@@ -1,2 +1,120 @@
-# otlp-prometheus-translator
-Library providing API to convert OTLP metric and attribute names to respectively Prometheus metric and label names.
+# OTLP Prometheus Translator
+
+A Go library for converting [OpenTelemetry Protocol (OTLP)](https://opentelemetry.io/docs/specs/otlp/) metric and attribute names to [Prometheus](https://prometheus.io/)-compliant formats. This is an internal library for both Prometheus and Open Telemetry, without any stability guarantees for external usage.
+
+Part of the [Prometheus](https://prometheus.io/) ecosystem, following the [OpenTelemetry to Prometheus compatibility specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md).
+
+## Features
+
+- **Metric Name and Label Translation**: Convert OTLP metric names and attributes to Prometheus-compliant format
+- **Unit Handling**: Translate OTLP units to Prometheus unit conventions
+- **Type-Aware Suffixes**: Optionally append `_total`, `_ratio` based on metric type
+- **Namespace Support**: Add configurable namespace prefixes
+- **UTF-8 Support**: Choose between Prometheus legacy scheme compliant metric/label names (`[a-zA-Z0-9:_]`) or untranslated metric/label names
+- **Translation Strategy Configuration**: Select a translation strategy with a standard set of strings.
+
+## Installation
+
+```bash
+go get github.com/prometheus/otlptranslator
+```
+
+## Quick Start
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/prometheus/otlptranslator"
+)
+
+func main() {
+ // Create a metric namer using traditional Prometheus name translation, with suffixes added and UTF-8 disallowed.
+ strategy := otlptranslator.UnderscoreEscapingWithSuffixes
+ namer := otlptranslator.NewMetricNamer("myapp", strategy)
+
+ // Translate OTLP metric to Prometheus format
+ metric := otlptranslator.Metric{
+ Name: "http.server.request.duration",
+ Unit: "s",
+ Type: otlptranslator.MetricTypeHistogram,
+ }
+ fmt.Println(namer.Build(metric)) // Output: myapp_http_server_request_duration_seconds
+
+ // Translate label names
+ labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
+ fmt.Println(labelNamer.Build("http.method")) // Output: http_method
+}
+```
+
+## Usage Examples
+
+### Metric Name Translation
+
+```go
+namer := otlptranslator.MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
+
+// Counter gets _total suffix
+counter := otlptranslator.Metric{
+ Name: "requests.count", Unit: "1", Type: otlptranslator.MetricTypeMonotonicCounter,
+}
+fmt.Println(namer.Build(counter)) // requests_count_total
+
+// Gauge with unit conversion
+gauge := otlptranslator.Metric{
+ Name: "memory.usage", Unit: "By", Type: otlptranslator.MetricTypeGauge,
+}
+fmt.Println(namer.Build(gauge)) // memory_usage_bytes
+
+// Dimensionless gauge gets _ratio suffix
+ratio := otlptranslator.Metric{
+ Name: "cpu.utilization", Unit: "1", Type: otlptranslator.MetricTypeGauge,
+}
+fmt.Println(namer.Build(ratio)) // cpu_utilization_ratio
+```
+
+### Label Translation
+
+```go
+labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
+
+labelNamer.Build("http.method") // http_method
+labelNamer.Build("123invalid") // key_123invalid
+labelNamer.Build("_private") // key_private
+labelNamer.Build("__reserved__") // __reserved__ (preserved)
+labelNamer.Build("label@with$symbols") // label_with_symbols
+```
+
+### Unit Translation
+
+```go
+unitNamer := otlptranslator.UnitNamer{UTF8Allowed: false}
+
+unitNamer.Build("s") // seconds
+unitNamer.Build("By") // bytes
+unitNamer.Build("requests/s") // requests_per_second
+unitNamer.Build("1") // "" (dimensionless)
+```
+
+### Configuration Options
+
+```go
+// Prometheus-compliant mode - supports [a-zA-Z0-9:_]
+compliantNamer := otlptranslator.MetricNamer{UTF8Allowed: false, WithMetricSuffixes: true}
+
+// Transparent pass-through mode, aka "NoTranslation"
+utf8Namer := otlptranslator.MetricNamer{UTF8Allowed: true, WithMetricSuffixes: false}
+utf8Namer = otlptranslator.NewMetricNamer("", otlpTranslator.NoTranslation)
+
+// With namespace and suffixes
+productionNamer := otlptranslator.MetricNamer{
+ Namespace: "myservice",
+ WithMetricSuffixes: true,
+ UTF8Allowed: false,
+}
+```
+
+## License
+
+Licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/prometheus/otlptranslator/doc.go b/vendor/github.com/prometheus/otlptranslator/doc.go
new file mode 100644
index 00000000000..a704d819045
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/doc.go
@@ -0,0 +1,24 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package otlptranslator provides utilities for converting OpenTelemetry Protocol (OTLP)
+// metric and attribute names to Prometheus-compliant formats.
+//
+// This package is designed to help users translate OpenTelemetry metrics to Prometheus
+// metrics while following the official OpenTelemetry to Prometheus compatibility specification.
+//
+// Main components:
+// - MetricNamer: Translates OTLP metric names to Prometheus metric names
+// - LabelNamer: Translates OTLP attribute names to Prometheus label names
+// - UnitNamer: Translates OTLP units to Prometheus unit conventions
+package otlptranslator
diff --git a/vendor/github.com/prometheus/otlptranslator/label_namer.go b/vendor/github.com/prometheus/otlptranslator/label_namer.go
new file mode 100644
index 00000000000..368cedaf803
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/label_namer.go
@@ -0,0 +1,100 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The Prometheus Authors
+// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
+
+package otlptranslator
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// LabelNamer is a helper struct to build label names.
+// It translates OpenTelemetry Protocol (OTLP) attribute names to Prometheus-compliant label names.
+//
+// Example usage:
+//
+// namer := LabelNamer{UTF8Allowed: false}
+// result := namer.Build("http.method") // "http_method"
+type LabelNamer struct {
+ UTF8Allowed bool
+ // UnderscoreLabelSanitization, if true, enabled prepending 'key' to labels
+ // starting with '_'. Reserved labels starting with `__` are not modified.
+ //
+ // Deprecated: This will be removed in a future version of otlptranslator.
+ UnderscoreLabelSanitization bool
+ // PreserveMultipleUnderscores enables preserving of multiple
+ // consecutive underscores in label names when UTF8Allowed is false.
+ // This option is discouraged as it violates the OpenTelemetry to Prometheus
+ // specification https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus),
+ // but may be needed for compatibility with legacy systems that rely on the old behavior.
+ PreserveMultipleUnderscores bool
+}
+
+// Build normalizes the specified label to follow Prometheus label names standard.
+//
+// Translation rules:
+// - Replaces invalid characters with underscores
+// - Prefixes labels with invalid start characters (numbers or `_`) with "key"
+// - Preserves double underscore labels (reserved names)
+// - If UTF8Allowed is true, returns label as-is
+//
+// Examples:
+//
+// namer := LabelNamer{UTF8Allowed: false}
+// namer.Build("http.method") // "http_method"
+// namer.Build("123invalid") // "key_123invalid"
+// namer.Build("__reserved__") // "__reserved__" (preserved)
+func (ln *LabelNamer) Build(label string) (string, error) {
+ if len(label) == 0 {
+ return "", errors.New("label name is empty")
+ }
+
+ if ln.UTF8Allowed {
+ if hasUnderscoresOnly(label) {
+ return "", fmt.Errorf("label name %q contains only underscores", label)
+ }
+ return label, nil
+ }
+
+ normalizedName := sanitizeLabelName(label, ln.PreserveMultipleUnderscores)
+
+ // If label starts with a number, prepend with "key_".
+ if unicode.IsDigit(rune(normalizedName[0])) {
+ normalizedName = "key_" + normalizedName
+ } else if ln.UnderscoreLabelSanitization && strings.HasPrefix(normalizedName, "_") && !strings.HasPrefix(normalizedName, "__") {
+ normalizedName = "key" + normalizedName
+ }
+
+ if hasUnderscoresOnly(normalizedName) {
+ return "", fmt.Errorf("normalization for label name %q resulted in invalid name %q", label, normalizedName)
+ }
+
+ return normalizedName, nil
+}
+
+func hasUnderscoresOnly(label string) bool {
+ for _, c := range label {
+ if c != '_' {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/otlptranslator/metric_namer.go b/vendor/github.com/prometheus/otlptranslator/metric_namer.go
index 21c45fcdab8..d958a0f03ad 100644
--- a/vendor/github.com/prometheus/otlptranslator/metric_namer.go
+++ b/vendor/github.com/prometheus/otlptranslator/metric_namer.go
@@ -20,11 +20,10 @@
package otlptranslator
import (
+ "fmt"
"slices"
"strings"
"unicode"
-
- "github.com/grafana/regexp"
)
// The map to translate OTLP units to Prometheus units
@@ -81,13 +80,48 @@ var perUnitMap = map[string]string{
}
// MetricNamer is a helper struct to build metric names.
+// It converts OpenTelemetry Protocol (OTLP) metric names to Prometheus-compliant metric names.
+//
+// Example usage:
+//
+// namer := MetricNamer{
+// WithMetricSuffixes: true,
+// UTF8Allowed: false,
+// }
+//
+// metric := Metric{
+// Name: "http.server.duration",
+// Unit: "s",
+// Type: MetricTypeHistogram,
+// }
+//
+// result := namer.Build(metric) // "http_server_duration_seconds"
type MetricNamer struct {
Namespace string
WithMetricSuffixes bool
UTF8Allowed bool
}
+// NewMetricNamer creates a MetricNamer with the specified namespace (can be
+// blank) and the requested Translation Strategy.
+func NewMetricNamer(namespace string, strategy TranslationStrategyOption) MetricNamer {
+ return MetricNamer{
+ Namespace: namespace,
+ WithMetricSuffixes: strategy.ShouldAddSuffixes(),
+ UTF8Allowed: !strategy.ShouldEscape(),
+ }
+}
+
// Metric is a helper struct that holds information about a metric.
+// It represents an OpenTelemetry metric with its name, unit, and type.
+//
+// Example:
+//
+// metric := Metric{
+// Name: "http.server.request.duration",
+// Unit: "s",
+// Type: MetricTypeHistogram,
+// }
type Metric struct {
Name string
Unit string
@@ -96,31 +130,70 @@ type Metric struct {
// Build builds a metric name for the specified metric.
//
-// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required.
-// Otherwise the metric name is normalized to be Prometheus-compliant.
-// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
-// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
-func (mn *MetricNamer) Build(metric Metric) string {
+// The method applies different transformations based on the MetricNamer configuration:
+// - If UTF8Allowed is true, doesn't translate names - all characters must be valid UTF-8, however.
+// - If UTF8Allowed is false, translates metric names to comply with legacy Prometheus name scheme by escaping invalid characters to `_`.
+// - If WithMetricSuffixes is true, adds appropriate suffixes based on type and unit.
+//
+// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
+//
+// Examples:
+//
+// namer := MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
+//
+// // Counter gets _total suffix
+// counter := Metric{Name: "requests.count", Unit: "1", Type: MetricTypeMonotonicCounter}
+// result := namer.Build(counter) // "requests_count_total"
+//
+// // Gauge with unit suffix
+// gauge := Metric{Name: "memory.usage", Unit: "By", Type: MetricTypeGauge}
+// result = namer.Build(gauge) // "memory_usage_bytes"
+func (mn *MetricNamer) Build(metric Metric) (string, error) {
if mn.UTF8Allowed {
return mn.buildMetricName(metric.Name, metric.Unit, metric.Type)
}
return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type)
}
-func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string {
+func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) (normalizedName string, err error) {
+ defer func() {
+ if len(normalizedName) == 0 {
+ err = fmt.Errorf("normalization for metric %q resulted in empty name", name)
+ return
+ }
+
+ if normalizedName == name {
+ return
+ }
+
+ // Check that the resulting normalized name contains at least one non-underscore character
+ for _, c := range normalizedName {
+ if c != '_' {
+ return
+ }
+ }
+ err = fmt.Errorf("normalization for metric %q resulted in invalid name %q", name, normalizedName)
+ normalizedName = ""
+ }()
+
// Full normalization following standard Prometheus naming conventions
if mn.WithMetricSuffixes {
- return normalizeName(name, unit, metricType, mn.Namespace)
+ normalizedName = normalizeName(name, unit, metricType, mn.Namespace)
+ return
}
// Simple case (no full normalization, no units, etc.).
metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool {
- return invalidMetricCharRE.MatchString(string(r))
+ return !isValidCompliantMetricChar(r) && r != '_'
}), "_")
// Namespace?
if mn.Namespace != "" {
- return mn.Namespace + "_" + metricName
+ namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool {
+ return !isValidCompliantMetricChar(r) && r != '_'
+ }), "_")
+ normalizedName = namespace + "_" + metricName
+ return
}
// Metric name starts with a digit? Prefix it with an underscore.
@@ -128,15 +201,10 @@ func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType Me
metricName = "_" + metricName
}
- return metricName
+ normalizedName = metricName
+ return
}
-var (
- // Regexp for metric name characters that should be replaced with _.
- invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`)
- multipleUnderscoresRE = regexp.MustCompile(`__+`)
-)
-
// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :).
func isValidCompliantMetricChar(r rune) bool {
return (r >= 'a' && r <= 'z') ||
@@ -240,33 +308,54 @@ func removeItem(slice []string, value string) []string {
return newSlice
}
-func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string {
+func (mn *MetricNamer) buildMetricName(inputName, unit string, metricType MetricType) (name string, err error) {
+ name = inputName
if mn.Namespace != "" {
name = mn.Namespace + "_" + name
}
if mn.WithMetricSuffixes {
- mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
- if mainUnitSuffix != "" {
- name = name + "_" + mainUnitSuffix
- }
- if perUnitSuffix != "" {
- name = name + "_" + perUnitSuffix
- }
-
- // Append _total for Counters
- if metricType == MetricTypeMonotonicCounter {
- name += "_total"
- }
-
// Append _ratio for metrics with unit "1"
// Some OTel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
if unit == "1" && metricType == MetricTypeGauge {
- name += "_ratio"
+ name = trimSuffixAndDelimiter(name, "ratio")
+ defer func() {
+ name += "_ratio"
+ }()
+ }
+
+ // Append _total for Counters.
+ if metricType == MetricTypeMonotonicCounter {
+ name = trimSuffixAndDelimiter(name, "total")
+ defer func() {
+ name += "_total"
+ }()
+ }
+
+ mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
+ if perUnitSuffix != "" {
+ name = trimSuffixAndDelimiter(name, perUnitSuffix)
+ defer func() {
+ name = name + "_" + perUnitSuffix
+ }()
}
+ // We don't need to trim and re-append the suffix here because this is
+ // the inner-most suffix.
+ if mainUnitSuffix != "" && !strings.HasSuffix(name, mainUnitSuffix) {
+ name = name + "_" + mainUnitSuffix
+ }
+ }
+ return
+}
+
+// trimSuffixAndDelimiter trims a suffix, plus one extra character which is
+// assumed to be a delimiter.
+func trimSuffixAndDelimiter(name, suffix string) string {
+ if strings.HasSuffix(name, suffix) && len(name) > len(suffix)+1 {
+ return name[:len(name)-(len(suffix)+1)]
}
return name
}
diff --git a/vendor/github.com/prometheus/otlptranslator/normalize_label.go b/vendor/github.com/prometheus/otlptranslator/normalize_label.go
deleted file mode 100644
index aa771f7840b..00000000000
--- a/vendor/github.com/prometheus/otlptranslator/normalize_label.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2025 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Copyright The Prometheus Authors
-// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
-
-package otlptranslator
-
-import (
- "strings"
- "unicode"
-)
-
-// LabelNamer is a helper struct to build label names.
-type LabelNamer struct {
- UTF8Allowed bool
-}
-
-// Build normalizes the specified label to follow Prometheus label names standard.
-//
-// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
-//
-// Labels that start with non-letter rune will be prefixed with "key_".
-// An exception is made for double-underscores which are allowed.
-//
-// If UTF8Allowed is true, the label is returned as is. This option is provided just to
-// keep a consistent interface with the MetricNamer.
-func (ln *LabelNamer) Build(label string) string {
- // Trivial case.
- if len(label) == 0 || ln.UTF8Allowed {
- return label
- }
-
- label = sanitizeLabelName(label)
-
- // If label starts with a number, prepend with "key_".
- if unicode.IsDigit(rune(label[0])) {
- label = "key_" + label
- } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {
- label = "key" + label
- }
-
- return label
-}
diff --git a/vendor/github.com/prometheus/otlptranslator/strategy.go b/vendor/github.com/prometheus/otlptranslator/strategy.go
new file mode 100644
index 00000000000..20fe0197504
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/strategy.go
@@ -0,0 +1,86 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/3602785a89162ccc99a940fb9d862219a2d02241/config/config.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The Prometheus Authors
+
+package otlptranslator
+
+// TranslationStrategyOption is a constant that defines how metric and label
+// names should be handled during translation. The recommended approach is to
+// use either UnderscoreEscapingWithSuffixes for full Prometheus-style
+// compatibility, or NoTranslation for Otel-style names.
+type TranslationStrategyOption string
+
+var (
+ // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
+ // and type suffixes may be added to metric names, according to certain rules.
+ NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes"
+ // UnderscoreEscapingWithSuffixes is the default option for translating OTLP
+ // to Prometheus. This option will translate metric name characters that are
+ // not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores. Unit and
+ // type suffixes may be appended to metric names, according to certain rules.
+ UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes"
+ // UnderscoreEscapingWithoutSuffixes translates metric name characters that
+ // are not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores, but
+ // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
+ // the names.
+ UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
+ // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
+ // and label names. This offers a way for the OTLP users to use native metric
+ // names, reducing confusion.
+ //
+ // WARNING: This setting has significant known risks and limitations (see
+ // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
+ // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
+ // configuration). * Series collisions which in the best case may result in
+ // OOO errors, in the worst case a silently malformed time series. For
+ // instance, you may end up in situation of ingesting `foo.bar` series with
+ // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
+ //
+ // As a result, this setting is experimental and currently, should not be used
+ // in production systems.
+ //
+ // TODO(ArthurSens): Mention `type-and-unit-labels` feature
+ // (https://github.com/prometheus/proposals/pull/39) once released, as
+ // potential mitigation of the above risks.
+ NoTranslation TranslationStrategyOption = "NoTranslation"
+)
+
+// ShouldEscape returns true if the translation strategy requires that metric
+// names be escaped.
+func (o TranslationStrategyOption) ShouldEscape() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
+ return true
+ case NoTranslation, NoUTF8EscapingWithSuffixes:
+ return false
+ default:
+ return false
+ }
+}
+
+// ShouldAddSuffixes returns a bool deciding whether the given translation
+// strategy should have suffixes added.
+func (o TranslationStrategyOption) ShouldAddSuffixes() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
+ return true
+ case UnderscoreEscapingWithoutSuffixes, NoTranslation:
+ return false
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/prometheus/otlptranslator/strconv.go b/vendor/github.com/prometheus/otlptranslator/strconv.go
index 81d534e8d9e..90404324ea0 100644
--- a/vendor/github.com/prometheus/otlptranslator/strconv.go
+++ b/vendor/github.com/prometheus/otlptranslator/strconv.go
@@ -25,18 +25,92 @@ import (
// sanitizeLabelName replaces any characters not valid according to the
// classical Prometheus label naming scheme with an underscore.
-// Note: this does not handle all Prometheus label name restrictions (such as
-// not starting with a digit 0-9), and hence should only be used if the label
-// name is prefixed with a known valid string.
-func sanitizeLabelName(name string) string {
+// When preserveMultipleUnderscores is true, multiple consecutive underscores are preserved.
+// When false, multiple consecutive underscores are collapsed to a single underscore.
+func sanitizeLabelName(name string, preserveMultipleUnderscores bool) string {
+ nameLength := len(name)
+
+ if preserveMultipleUnderscores {
+ // Simple case: just replace invalid characters, preserve multiple underscores
+ var b strings.Builder
+ b.Grow(nameLength)
+ for _, r := range name {
+ if isValidCompliantLabelChar(r) {
+ b.WriteRune(r)
+ } else {
+ b.WriteRune('_')
+ }
+ }
+ return b.String()
+ }
+
+ isReserved, labelName := isReservedLabel(name)
+ if isReserved {
+ name = labelName
+ }
+
+ // Collapse multiple underscores while replacing invalid characters.
var b strings.Builder
- b.Grow(len(name))
+ b.Grow(nameLength)
+ prevWasUnderscore := false
+
for _, r := range name {
- if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') {
+ if isValidCompliantLabelChar(r) {
b.WriteRune(r)
- } else {
+ prevWasUnderscore = false
+ } else if !prevWasUnderscore {
+ // Invalid character - replace with underscore.
b.WriteRune('_')
+ prevWasUnderscore = true
}
}
+ if isReserved {
+ return "__" + b.String() + "__"
+ }
+ return b.String()
+}
+
+// isValidCompliantLabelChar checks if a rune is a valid label name character (a-z, A-Z, 0-9).
+func isValidCompliantLabelChar(r rune) bool {
+ return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9')
+}
+
+// isReservedLabel checks if a label is a reserved label.
+// Reserved labels are labels that start and end with exactly __.
+// The returned label name is the label name without the __ prefix and suffix.
+func isReservedLabel(name string) (bool, string) {
+ if len(name) < 4 {
+ return false, ""
+ }
+ if !strings.HasPrefix(name, "__") || !strings.HasSuffix(name, "__") {
+ return false, ""
+ }
+ return true, name[2 : len(name)-2]
+}
+
+// collapseMultipleUnderscores replaces multiple consecutive underscores with a single underscore.
+// This is equivalent to regexp.MustCompile(`__+`).ReplaceAllString(s, "_") but without using regex.
+func collapseMultipleUnderscores(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+
+ var b strings.Builder
+ b.Grow(len(s))
+ prevWasUnderscore := false
+
+ for _, r := range s {
+ if r == '_' {
+ if !prevWasUnderscore {
+ b.WriteRune('_')
+ prevWasUnderscore = true
+ }
+ // Skip consecutive underscores
+ } else {
+ b.WriteRune(r)
+ prevWasUnderscore = false
+ }
+ }
+
return b.String()
}
diff --git a/vendor/github.com/prometheus/otlptranslator/unit_namer.go b/vendor/github.com/prometheus/otlptranslator/unit_namer.go
index 4bbf93ef97c..bb6d4f8cd14 100644
--- a/vendor/github.com/prometheus/otlptranslator/unit_namer.go
+++ b/vendor/github.com/prometheus/otlptranslator/unit_namer.go
@@ -15,14 +15,34 @@ package otlptranslator
import "strings"
// UnitNamer is a helper for building compliant unit names.
+// It processes OpenTelemetry Protocol (OTLP) unit strings and converts them
+// to Prometheus-compliant unit names.
+//
+// Example usage:
+//
+// namer := UnitNamer{UTF8Allowed: false}
+// result := namer.Build("s") // "seconds"
+// result = namer.Build("By/s") // "bytes_per_second"
type UnitNamer struct {
UTF8Allowed bool
}
// Build builds a unit name for the specified unit string.
// It processes the unit by splitting it into main and per components,
-// applying appropriate unit mappings, and cleaning up invalid characters
-// when the whole UTF-8 character set is not allowed.
+// applying unit mappings, and cleaning up invalid characters when UTF8Allowed is false.
+//
+// Unit mappings include:
+// - Time: s→seconds, ms→milliseconds, h→hours
+// - Bytes: By→bytes, KBy→kilobytes, MBy→megabytes
+// - SI: m→meters, V→volts, W→watts
+// - Special: 1→"" (empty), %→percent
+//
+// Examples:
+//
+// namer := UnitNamer{UTF8Allowed: false}
+// namer.Build("s") // "seconds"
+// namer.Build("requests/s") // "requests_per_second"
+// namer.Build("1") // "" (dimensionless)
func (un *UnitNamer) Build(unit string) string {
mainUnit, perUnit := buildUnitSuffixes(unit)
if !un.UTF8Allowed {
@@ -103,8 +123,7 @@ func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) {
func cleanUpUnit(unit string) string {
// Multiple consecutive underscores are replaced with a single underscore.
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
- return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString(
+ return strings.TrimPrefix(collapseMultipleUnderscores(
strings.Map(replaceInvalidMetricChar, unit),
- "_",
), "_")
}
diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE
index 8605c258e32..b2fcf4deb4c 100644
--- a/vendor/github.com/prometheus/prometheus/NOTICE
+++ b/vendor/github.com/prometheus/prometheus/NOTICE
@@ -101,6 +101,11 @@ https://github.com/microsoft/vscode-codicons
Copyright (c) Microsoft Corporation and other contributors
See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details.
+Mantine UI
+https://github.com/mantinedev/mantine
+Copyright (c) 2021 Vitaly Rtishchev
+See https://github.com/mantinedev/mantine/blob/master/LICENSE for license details.
+
We also use code from a large number of npm packages. For details, see:
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go
index 7099ba325ab..30c8a8ed21c 100644
--- a/vendor/github.com/prometheus/prometheus/config/config.go
+++ b/vendor/github.com/prometheus/prometheus/config/config.go
@@ -29,10 +29,12 @@ import (
"github.com/alecthomas/units"
"github.com/grafana/regexp"
+ remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
+ "github.com/prometheus/otlptranslator"
"github.com/prometheus/sigv4"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/model/labels"
@@ -104,9 +106,9 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
}
switch cfg.OTLPConfig.TranslationStrategy {
- case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
+ case otlptranslator.UnderscoreEscapingWithSuffixes, otlptranslator.UnderscoreEscapingWithoutSuffixes:
case "":
- case NoTranslation, NoUTF8EscapingWithSuffixes:
+ case otlptranslator.NoTranslation, otlptranslator.NoUTF8EscapingWithSuffixes:
if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation {
return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
}
@@ -156,15 +158,22 @@ var (
OTLPConfig: DefaultOTLPConfig,
}
+ f bool
// DefaultGlobalConfig is the default global configuration.
DefaultGlobalConfig = GlobalConfig{
ScrapeInterval: model.Duration(1 * time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(1 * time.Minute),
RuleQueryOffset: model.Duration(0 * time.Minute),
- // When native histogram feature flag is enabled, ScrapeProtocols default
- // changes to DefaultNativeHistogramScrapeProtocols.
- ScrapeProtocols: DefaultScrapeProtocols,
+ // This is nil to be able to distinguish between the case when
+ // the normal default should be used and the case when a
+ // new default is needed due to an enabled feature flag.
+ // E.g. set to `DefaultProtoFirstScrapeProtocols` when
+ // the feature flag `created-timestamp-zero-ingestion` is set.
+ ScrapeProtocols: nil,
+ // When the native histogram feature flag is enabled,
+ // ScrapeNativeHistograms default changes to true.
+ ScrapeNativeHistograms: &f,
ConvertClassicHistogramsToNHCB: false,
AlwaysScrapeClassicHistograms: false,
MetricNameValidationScheme: model.UTF8Validation,
@@ -206,7 +215,7 @@ var (
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
- ProtobufMessage: RemoteWriteProtoMsgV1,
+ ProtobufMessage: remoteapi.WriteV1MessageType,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
@@ -257,7 +266,11 @@ var (
// DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{
- TranslationStrategy: UnderscoreEscapingWithSuffixes,
+ TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
+ // For backwards compatibility.
+ LabelNameUnderscoreSanitization: true,
+ // For backwards compatibility.
+ LabelNamePreserveMultipleUnderscores: true,
}
)
@@ -366,7 +379,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
-func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultConfig
// We want to set c to the defaults and then overwrite it with the input.
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
@@ -412,6 +425,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
jobNames[scfg.JobName] = struct{}{}
}
+ if err := c.AlertingConfig.Validate(c.GlobalConfig.MetricNameValidationScheme); err != nil {
+ return err
+ }
+
rwNames := map[string]struct{}{}
for _, rwcfg := range c.RemoteWriteConfigs {
if rwcfg == nil {
@@ -421,6 +438,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if _, ok := rwNames[rwcfg.Name]; ok && rwcfg.Name != "" {
return fmt.Errorf("found multiple remote write configs with job name %q", rwcfg.Name)
}
+ if err := rwcfg.Validate(c.GlobalConfig.MetricNameValidationScheme); err != nil {
+ return err
+ }
rwNames[rwcfg.Name] = struct{}{}
}
rrNames := map[string]struct{}{}
@@ -447,7 +467,7 @@ type GlobalConfig struct {
// The protocols to negotiate during a scrape. It tells clients what
// protocol are accepted by Prometheus and with what weight (most wanted is first).
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
- // OpenMetricsText1.0.0, PrometheusText0.0.4.
+ // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// How frequently to evaluate rules by default.
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
@@ -487,6 +507,8 @@ type GlobalConfig struct {
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
+ // Whether to scrape native histograms.
+ ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
// Whether to convert all scraped classic histograms into native histograms with custom buckets.
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
@@ -550,8 +572,8 @@ var (
// DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it
// favors protobuf Prometheus exposition format.
- // Used by default for certain feature-flags like
- // "native-histograms" and "created-timestamp-zero-ingestion".
+ // Used by default by the "scrape_native_histograms" option and for certain
+ // feature-flags like "created-timestamp-zero-ingestion".
DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{
PrometheusProto,
OpenMetricsText1_0_0,
@@ -586,7 +608,7 @@ func (c *GlobalConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
gc := &GlobalConfig{}
@@ -595,8 +617,14 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
+ switch gc.MetricNameValidationScheme {
+ case model.UTF8Validation, model.LegacyValidation:
+ default:
+ gc.MetricNameValidationScheme = DefaultGlobalConfig.MetricNameValidationScheme
+ }
+
if err := gc.ExternalLabels.Validate(func(l labels.Label) error {
- if !model.LabelName(l.Name).IsValid() {
+ if !gc.MetricNameValidationScheme.IsValidLabelName(l.Name) {
return fmt.Errorf("%q is not a valid label name", l.Name)
}
if !model.LabelValue(l.Value).IsValid() {
@@ -616,21 +644,31 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return errors.New("global scrape timeout greater than scrape interval")
}
if gc.ScrapeTimeout == 0 {
- if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval {
- gc.ScrapeTimeout = gc.ScrapeInterval
- } else {
- gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout
- }
+ gc.ScrapeTimeout = min(DefaultGlobalConfig.ScrapeTimeout, gc.ScrapeInterval)
}
if gc.EvaluationInterval == 0 {
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
}
-
- if gc.ScrapeProtocols == nil {
- gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
+ if gc.ScrapeNativeHistograms == nil {
+ gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
}
- if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
- return fmt.Errorf("%w for global config", err)
+ if gc.ScrapeProtocols == nil {
+ if DefaultGlobalConfig.ScrapeProtocols != nil {
+ // This is the case where the defaults are set due to a feature flag.
+ // E.g. if the created-timestamp-zero-ingestion feature flag is
+ // used.
+ gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
+ }
+ // Otherwise, we leave ScrapeProtocols at nil for now. In the
+ // per-job scrape config, we have to recognize the unset case to
+ // correctly set the default depending on the local value of
+ // ScrapeNativeHistograms.
+ }
+ if gc.ScrapeProtocols != nil {
+ // Only validate if not-nil at this point.
+ if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
+ return fmt.Errorf("%w for global config", err)
+ }
}
*c = *gc
@@ -647,6 +685,7 @@ func (c *GlobalConfig) isZero() bool {
c.QueryLogFile == "" &&
c.ScrapeFailureLogFile == "" &&
c.ScrapeProtocols == nil &&
+ c.ScrapeNativeHistograms == nil &&
!c.ConvertClassicHistogramsToNHCB &&
!c.AlwaysScrapeClassicHistograms
}
@@ -709,6 +748,8 @@ type ScrapeConfig struct {
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
+ // Whether to scrape native histograms.
+ ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"`
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
@@ -776,7 +817,7 @@ func (c *ScrapeConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultScrapeConfig
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
@@ -827,11 +868,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
}
if c.ScrapeTimeout == 0 {
- if globalConfig.ScrapeTimeout > c.ScrapeInterval {
- c.ScrapeTimeout = c.ScrapeInterval
- } else {
- c.ScrapeTimeout = globalConfig.ScrapeTimeout
- }
+ c.ScrapeTimeout = min(globalConfig.ScrapeTimeout, c.ScrapeInterval)
}
if c.BodySizeLimit == 0 {
c.BodySizeLimit = globalConfig.BodySizeLimit
@@ -857,9 +894,23 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
if c.ScrapeFailureLogFile == "" {
c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile
}
+ if c.ScrapeNativeHistograms == nil {
+ c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms
+ }
if c.ScrapeProtocols == nil {
- c.ScrapeProtocols = globalConfig.ScrapeProtocols
+ switch {
+ case globalConfig.ScrapeProtocols != nil:
+ // global ScrapeProtocols either set explicitly or via a
+ // default triggered by a feature flag. This overrides
+ // the selection based on locally active scraping of
+ // native histograms.
+ c.ScrapeProtocols = globalConfig.ScrapeProtocols
+ case c.ScrapeNativeHistogramsEnabled():
+ c.ScrapeProtocols = DefaultProtoFirstScrapeProtocols
+ default:
+ c.ScrapeProtocols = DefaultScrapeProtocols
+ }
}
if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil {
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
@@ -877,15 +928,15 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
}
switch globalConfig.MetricNameValidationScheme {
- case model.UnsetValidation:
- globalConfig.MetricNameValidationScheme = model.UTF8Validation
case model.LegacyValidation, model.UTF8Validation:
default:
- return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
+ return errors.New("global name validation method must be set")
}
// Scrapeconfig validation scheme matches global if left blank.
+ localValidationUnset := false
switch c.MetricNameValidationScheme {
case model.UnsetValidation:
+ localValidationUnset = true
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
case model.LegacyValidation, model.UTF8Validation:
default:
@@ -905,8 +956,20 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameEscapingScheme)
}
+ // Similarly, if ScrapeConfig escaping scheme is blank, infer it from the
+ // ScrapeConfig validation scheme if that was set, or the Global validation
+ // scheme if the ScrapeConfig validation scheme was also not set. This ensures
+ // that local ScrapeConfigs that only specify Legacy validation do not inherit
+ // the global AllowUTF8 escaping setting, which is an error.
if c.MetricNameEscapingScheme == "" {
- c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme
+ //nolint:gocritic
+ if localValidationUnset {
+ c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme
+ } else if c.MetricNameValidationScheme == model.LegacyValidation {
+ c.MetricNameEscapingScheme = model.EscapeUnderscores
+ } else {
+ c.MetricNameEscapingScheme = model.AllowUTF8
+ }
}
switch c.MetricNameEscapingScheme {
@@ -929,11 +992,22 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
c.AlwaysScrapeClassicHistograms = &global
}
+ for _, rc := range c.RelabelConfigs {
+ if err := rc.Validate(c.MetricNameValidationScheme); err != nil {
+ return err
+ }
+ }
+ for _, rc := range c.MetricRelabelConfigs {
+ if err := rc.Validate(c.MetricNameValidationScheme); err != nil {
+ return err
+ }
+ }
+
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
-func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
+func (c *ScrapeConfig) MarshalYAML() (any, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
@@ -956,6 +1030,11 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
return model.ToEscapingScheme(s)
}
+// ScrapeNativeHistogramsEnabled returns whether to scrape native histograms.
+func (c *ScrapeConfig) ScrapeNativeHistogramsEnabled() bool {
+ return c.ScrapeNativeHistograms != nil && *c.ScrapeNativeHistograms
+}
+
// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB.
func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool {
return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB
@@ -972,6 +1051,15 @@ type StorageConfig struct {
ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"`
}
+// TSDBRetentionConfig holds the configuration retention of data in storage storage.
+type TSDBRetentionConfig struct {
+ // How long to retain samples in storage.
+ Time model.Duration `yaml:"time,omitempty"`
+
+ // Maximum number of bytes that can be stored for blocks.
+ Size units.Base2Bytes `yaml:"size,omitempty"`
+}
+
// TSDBConfig configures runtime reloadable configuration options.
type TSDBConfig struct {
// OutOfOrderTimeWindow sets how long back in time an out-of-order sample can be inserted
@@ -984,10 +1072,12 @@ type TSDBConfig struct {
// During unmarshall, this is converted into milliseconds and stored in OutOfOrderTimeWindow.
// This should not be used directly and must be converted into OutOfOrderTimeWindow.
OutOfOrderTimeWindowFlag model.Duration `yaml:"out_of_order_time_window,omitempty"`
+
+ Retention *TSDBRetentionConfig `yaml:"retention,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (t *TSDBConfig) UnmarshalYAML(unmarshal func(any) error) error {
*t = TSDBConfig{}
type plain TSDBConfig
if err := unmarshal((*plain)(t)); err != nil {
@@ -1009,7 +1099,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (t *TracingClientType) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (t *TracingClientType) UnmarshalYAML(unmarshal func(any) error) error {
*t = TracingClientType("")
type plain TracingClientType
if err := unmarshal((*plain)(t)); err != nil {
@@ -1043,7 +1133,7 @@ func (t *TracingConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (t *TracingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (t *TracingConfig) UnmarshalYAML(unmarshal func(any) error) error {
*t = TracingConfig{
ClientType: TracingClientGRPC,
}
@@ -1081,6 +1171,20 @@ type AlertingConfig struct {
AlertmanagerConfigs AlertmanagerConfigs `yaml:"alertmanagers,omitempty"`
}
+func (c *AlertingConfig) Validate(nameValidationScheme model.ValidationScheme) error {
+ for _, rc := range c.AlertRelabelConfigs {
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ return err
+ }
+ }
+ for _, rc := range c.AlertmanagerConfigs {
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// SetDirectory joins any relative file paths with dir.
func (c *AlertingConfig) SetDirectory(dir string) {
for _, c := range c.AlertmanagerConfigs {
@@ -1089,7 +1193,7 @@ func (c *AlertingConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *AlertingConfig) UnmarshalYAML(unmarshal func(any) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
@@ -1124,7 +1228,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
type AlertmanagerAPIVersion string
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(any) error) error {
*v = AlertmanagerAPIVersion("")
type plain AlertmanagerAPIVersion
if err := unmarshal((*plain)(v)); err != nil {
@@ -1183,7 +1287,7 @@ func (c *AlertmanagerConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultAlertmanagerConfig
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
@@ -1225,8 +1329,22 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
return nil
}
+func (c *AlertmanagerConfig) Validate(nameValidationScheme model.ValidationScheme) error {
+ for _, rc := range c.AlertRelabelConfigs {
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ return err
+ }
+ }
+ for _, rc := range c.RelabelConfigs {
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// MarshalYAML implements the yaml.Marshaler interface.
-func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) {
+func (c *AlertmanagerConfig) MarshalYAML() (any, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
@@ -1256,50 +1374,6 @@ func CheckTargetAddress(address model.LabelValue) error {
return nil
}
-// RemoteWriteProtoMsg represents the known protobuf message for the remote write
-// 1.0 and 2.0 specs.
-type RemoteWriteProtoMsg string
-
-// Validate returns error if the given reference for the protobuf message is not supported.
-func (s RemoteWriteProtoMsg) Validate() error {
- switch s {
- case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
- return nil
- default:
- return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
- }
-}
-
-type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
-
-func (m RemoteWriteProtoMsgs) Strings() []string {
- ret := make([]string, 0, len(m))
- for _, typ := range m {
- ret = append(ret, string(typ))
- }
- return ret
-}
-
-func (m RemoteWriteProtoMsgs) String() string {
- return strings.Join(m.Strings(), ", ")
-}
-
-var (
- // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
- // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
- // which will eventually be deprecated.
- //
- // NOTE: This string is used for both HTTP header values and config value, so don't change
- // this reference.
- RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
- // RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
- // message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
- //
- // NOTE: This string is used for both HTTP header values and config value, so don't change
- // this reference.
- RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
-)
-
// RemoteWriteConfig is the configuration for writing to remote storage.
type RemoteWriteConfig struct {
URL *config.URL `yaml:"url"`
@@ -1312,7 +1386,7 @@ type RemoteWriteConfig struct {
RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"`
// ProtobufMessage specifies the protobuf message to use against the remote
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
- ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
+ ProtobufMessage remoteapi.WriteMessageType `yaml:"protobuf_message,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -1330,7 +1404,7 @@ func (c *RemoteWriteConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRemoteWriteConfig
type plain RemoteWriteConfig
if err := unmarshal((*plain)(c)); err != nil {
@@ -1362,6 +1436,16 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return validateAuthConfigs(c)
}
+func (c *RemoteWriteConfig) Validate(nameValidationScheme model.ValidationScheme) error {
+ for _, rc := range c.WriteRelabelConfigs {
+ if err := rc.Validate(nameValidationScheme); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured.
func validateAuthConfigs(c *RemoteWriteConfig) error {
var authConfigured []string
@@ -1485,7 +1569,7 @@ func (c *RemoteReadConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRemoteReadConfig
type plain RemoteReadConfig
if err := unmarshal((*plain)(c)); err != nil {
@@ -1531,86 +1615,29 @@ func getGoGC() int {
return DefaultGoGCPercentage
}
-type translationStrategyOption string
-
-var (
- // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
- // and type suffixes may be added to metric names, according to certain rules.
- NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
- // UnderscoreEscapingWithSuffixes is the default option for translating OTLP
- // to Prometheus. This option will translate metric name characters that are
- // not alphanumerics/underscores/colons to underscores, and label name
- // characters that are not alphanumerics/underscores to underscores. Unit and
- // type suffixes may be appended to metric names, according to certain rules.
- UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
- // UnderscoreEscapingWithoutSuffixes translates metric name characters that
- // are not alphanumerics/underscores/colons to underscores, and label name
- // characters that are not alphanumerics/underscores to underscores, but
- // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
- // the names.
- UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
- // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
- // and label names. This offers a way for the OTLP users to use native metric
- // names, reducing confusion.
- //
- // WARNING: This setting has significant known risks and limitations (see
- // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
- // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
- // configuration). * Series collisions which in the best case may result in
- // OOO errors, in the worst case a silently malformed time series. For
- // instance, you may end up in situation of ingesting `foo.bar` series with
- // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
- //
- // As a result, this setting is experimental and currently, should not be used
- // in production systems.
- //
- // TODO(ArthurSens): Mention `type-and-unit-labels` feature
- // (https://github.com/prometheus/proposals/pull/39) once released, as
- // potential mitigation of the above risks.
- NoTranslation translationStrategyOption = "NoTranslation"
-)
-
-// ShouldEscape returns true if the translation strategy requires that metric
-// names be escaped.
-func (o translationStrategyOption) ShouldEscape() bool {
- switch o {
- case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
- return true
- case NoTranslation, NoUTF8EscapingWithSuffixes:
- return false
- default:
- return false
- }
-}
-
-// ShouldAddSuffixes returns a bool deciding whether the given translation
-// strategy should have suffixes added.
-func (o translationStrategyOption) ShouldAddSuffixes() bool {
- switch o {
- case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
- return true
- case UnderscoreEscapingWithoutSuffixes, NoTranslation:
- return false
- default:
- return false
- }
-}
-
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
- PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"`
- PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
- IgnoreResourceAttributes []string `yaml:"ignore_resource_attributes,omitempty"`
- TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
- KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"`
- ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"`
+ PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"`
+ PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
+ IgnoreResourceAttributes []string `yaml:"ignore_resource_attributes,omitempty"`
+ TranslationStrategy otlptranslator.TranslationStrategyOption `yaml:"translation_strategy,omitempty"`
+ KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"`
+ ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"`
// PromoteScopeMetadata controls whether to promote OTel scope metadata (i.e. name, version, schema URL, and attributes) to metric labels.
// As per OTel spec, the aforementioned scope metadata should be identifying, i.e. made into metric labels.
PromoteScopeMetadata bool `yaml:"promote_scope_metadata,omitempty"`
+ // LabelNameUnderscoreSanitization controls whether to enable prepending of 'key_' to labels
+ // starting with '_'. Reserved labels starting with `__` are not modified.
+ // This is only relevant when AllowUTF8 is false (i.e., when using underscore escaping).
+ LabelNameUnderscoreSanitization bool `yaml:"label_name_underscore_sanitization,omitempty"`
+ // LabelNamePreserveMultipleUnderscores enables preserving of multiple consecutive underscores
+ // in label names when AllowUTF8 is false. When false, multiple consecutive underscores are
+ // collapsed to a single underscore during label name sanitization.
+ LabelNamePreserveMultipleUnderscores bool `yaml:"label_name_preserve_multiple_underscores,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *OTLPConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultOTLPConfig
type plain OTLPConfig
if err := unmarshal((*plain)(c)); err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/config/reload.go b/vendor/github.com/prometheus/prometheus/config/reload.go
index cc0cc971586..07a077a6a9d 100644
--- a/vendor/github.com/prometheus/prometheus/config/reload.go
+++ b/vendor/github.com/prometheus/prometheus/config/reload.go
@@ -21,7 +21,7 @@ import (
"path/filepath"
promconfig "github.com/prometheus/common/config"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
type ExternalFilesConfig struct {
diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go
index c400de3632f..2157b820b97 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go
@@ -108,7 +108,7 @@ func (c *Configs) SetDirectory(dir string) {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *Configs) UnmarshalYAML(unmarshal func(any) error) error {
cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
@@ -123,7 +123,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (c Configs) MarshalYAML() (interface{}, error) {
+func (c Configs) MarshalYAML() (any, error) {
cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
@@ -148,7 +148,7 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are
// needed for this service discovery mechanism.
-func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
+func (StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
return &NoopDiscovererMetrics{}
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
index 405dba44f7e..24af8f65d96 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go
@@ -82,7 +82,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go
index beea03222bb..e0225891ce3 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log/slog"
+ "maps"
"os"
"path/filepath"
"strings"
@@ -32,7 +33,7 @@ import (
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
@@ -78,7 +79,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
@@ -120,9 +121,7 @@ func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) {
t.lock.RLock()
for fileSD := range t.discoverers {
fileSD.lock.RLock()
- for filename, timestamp := range fileSD.timestamps {
- uniqueFiles[filename] = timestamp
- }
+ maps.Copy(uniqueFiles, fileSD.timestamps)
fileSD.lock.RUnlock()
}
t.lock.RUnlock()
diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go
index 51a46ca2317..6688152da98 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/manager.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go
@@ -17,6 +17,7 @@ import (
"context"
"fmt"
"log/slog"
+ "maps"
"reflect"
"sync"
"time"
@@ -37,7 +38,7 @@ type poolKey struct {
type Provider struct {
name string
d Discoverer
- config interface{}
+ config any
cancel context.CancelFunc
// done should be called after cleaning up resources associated with cancelled provider.
@@ -62,7 +63,7 @@ func (p *Provider) IsStarted() bool {
return p.cancel != nil
}
-func (p *Provider) Config() interface{} {
+func (p *Provider) Config() any {
return p.config
}
@@ -255,9 +256,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
}
if l := len(refTargets); l > 0 {
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
- for k, v := range refTargets {
- m.targets[poolKey{s, prov.name}][k] = v
- }
+ maps.Copy(m.targets[poolKey{s, prov.name}], refTargets)
}
}
m.targetsMtx.Unlock()
diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go
index c13ce533178..19dfd4e2479 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/metrics_k8s_client.go
@@ -176,27 +176,27 @@ func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() {
workqueue.SetProvider(f)
}
-func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
+func (*clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
return clientGoWorkqueueDepthMetricVec.WithLabelValues(name)
}
-func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
+func (*clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
return clientGoWorkqueueAddsMetricVec.WithLabelValues(name)
}
-func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
+func (*clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name)
}
-func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
+func (*clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name)
}
-func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
+func (*clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
}
-func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
+func (*clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
}
diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go
index 92fa3d3d169..33938cef3e7 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/registry.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go
@@ -23,7 +23,7 @@ import (
"sync"
"github.com/prometheus/client_golang/prometheus"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@@ -42,8 +42,8 @@ var (
configTypesMu sync.Mutex
configTypes = make(map[reflect.Type]reflect.Type)
- emptyStructType = reflect.TypeOf(struct{}{})
- configsType = reflect.TypeOf(Configs{})
+ emptyStructType = reflect.TypeFor[struct{}]()
+ configsType = reflect.TypeFor[Configs]()
)
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
@@ -54,7 +54,7 @@ func RegisterConfig(config Config) {
func init() {
// N.B.: static_configs is the only Config type implemented by default.
// All other types are registered at init by their implementing packages.
- elemTyp := reflect.TypeOf(&targetgroup.Group{})
+ elemTyp := reflect.TypeFor[*targetgroup.Group]()
registerConfig(staticConfigsKey, elemTyp, StaticConfig{})
}
@@ -110,7 +110,7 @@ func getConfigType(out reflect.Type) reflect.Type {
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
// that have a Configs field that should be inlined.
-func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
+func UnmarshalYAMLWithInlineConfigs(out any, unmarshal func(any) error) error {
outVal := reflect.ValueOf(out)
if outVal.Kind() != reflect.Ptr {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
@@ -198,7 +198,7 @@ func readConfigs(structVal reflect.Value, startField int) (Configs, error) {
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
// that have a Configs field that should be inlined.
-func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
+func MarshalYAMLWithInlineConfigs(in any) (any, error) {
inVal := reflect.ValueOf(in)
for inVal.Kind() == reflect.Ptr {
inVal = inVal.Elem()
diff --git a/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go b/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go
index e74870f0462..5c3b67d6e8c 100644
--- a/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go
+++ b/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go
@@ -37,7 +37,7 @@ func (tg Group) String() string {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (tg *Group) UnmarshalYAML(unmarshal func(any) error) error {
g := struct {
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels"`
@@ -56,7 +56,7 @@ func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements the yaml.Marshaler interface.
-func (tg Group) MarshalYAML() (interface{}, error) {
+func (tg Group) MarshalYAML() (any, error) {
g := &struct {
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels,omitempty"`
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/convert.go b/vendor/github.com/prometheus/prometheus/model/histogram/convert.go
new file mode 100644
index 00000000000..218fbe197e9
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/convert.go
@@ -0,0 +1,145 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package histogram
+
+import (
+ "errors"
+ "fmt"
+ "math"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+// ConvertNHCBToClassic converts Native Histogram Custom Buckets (NHCB) to classic histogram series.
+// This conversion is needed in various scenarios where users need to get NHCB back to classic histogram format,
+// such as Remote Write v1 for external system compatibility and migration use cases.
+//
+// When calling this function, caller must ensure that provided nhcb is valid NHCB histogram.
+func ConvertNHCBToClassic(nhcb any, lset labels.Labels, lsetBuilder *labels.Builder, emitSeriesFn func(labels labels.Labels, value float64) error) error {
+ baseName := lset.Get(model.MetricNameLabel)
+ if baseName == "" {
+ return errors.New("metric name label '__name__' is missing")
+ }
+
+ // We preserve original labels and restore them after conversion.
+ // This is to ensure that no modifications are made to the original labels
+ // that the queue_manager relies on.
+ oldLabels := lsetBuilder.Labels()
+ defer lsetBuilder.Reset(oldLabels)
+
+ var (
+ customValues []float64
+ positiveBuckets []float64
+ count, sum float64
+ idx int // This index is to track buckets in Classic Histogram
+ currIdx int // This index is to track buckets in Native Histogram
+ )
+
+ switch h := nhcb.(type) {
+ case *Histogram:
+ if !IsCustomBucketsSchema(h.Schema) {
+ return errors.New("unsupported histogram schema, not a NHCB")
+ }
+
+ // Validate the histogram before conversion.
+ // The caller must ensure that the provided histogram is valid NHCB.
+ if h.Validate() != nil {
+ return errors.New(h.Validate().Error())
+ }
+
+ customValues = h.CustomValues
+ positiveBuckets = make([]float64, len(customValues)+1)
+
+ // Histograms are in delta format so we first bring them to absolute format.
+ acc := int64(0)
+ for _, s := range h.PositiveSpans {
+ for i := 0; i < int(s.Offset); i++ {
+ positiveBuckets[idx] = float64(acc)
+ idx++
+ }
+ for i := 0; i < int(s.Length); i++ {
+ acc += h.PositiveBuckets[currIdx]
+ positiveBuckets[idx] = float64(acc)
+ idx++
+ currIdx++
+ }
+ }
+ count = float64(h.Count)
+ sum = h.Sum
+ case *FloatHistogram:
+ if !IsCustomBucketsSchema(h.Schema) {
+ return errors.New("unsupported histogram schema, not a NHCB")
+ }
+
+ // Validate the histogram before conversion.
+ // The caller must ensure that the provided histogram is valid NHCB.
+ if h.Validate() != nil {
+ return errors.New(h.Validate().Error())
+ }
+ customValues = h.CustomValues
+ positiveBuckets = make([]float64, len(customValues)+1)
+
+ for _, span := range h.PositiveSpans {
+ // Since Float Histogram is already in absolute format we should
+ // keep the sparse buckets empty so we jump and go to next filled
+ // bucket index.
+ idx += int(span.Offset)
+ for i := 0; i < int(span.Length); i++ {
+ positiveBuckets[idx] = h.PositiveBuckets[currIdx]
+ idx++
+ currIdx++
+ }
+ }
+ count = h.Count
+ sum = h.Sum
+ default:
+ return fmt.Errorf("unsupported histogram type: %T", h)
+ }
+
+ currCount := float64(0)
+ for i, val := range customValues {
+ currCount += positiveBuckets[i]
+ lsetBuilder.Reset(lset)
+ lsetBuilder.Set(model.MetricNameLabel, baseName+"_bucket")
+ lsetBuilder.Set(model.BucketLabel, labels.FormatOpenMetricsFloat(val))
+ if err := emitSeriesFn(lsetBuilder.Labels(), currCount); err != nil {
+ return err
+ }
+ }
+
+ currCount += positiveBuckets[len(positiveBuckets)-1]
+
+ lsetBuilder.Reset(lset)
+ lsetBuilder.Set(model.MetricNameLabel, baseName+"_bucket")
+ lsetBuilder.Set(model.BucketLabel, labels.FormatOpenMetricsFloat(math.Inf(1)))
+ if err := emitSeriesFn(lsetBuilder.Labels(), currCount); err != nil {
+ return err
+ }
+
+ lsetBuilder.Reset(lset)
+ lsetBuilder.Set(model.MetricNameLabel, baseName+"_count")
+ if err := emitSeriesFn(lsetBuilder.Labels(), count); err != nil {
+ return err
+ }
+
+ lsetBuilder.Reset(lset)
+ lsetBuilder.Set(model.MetricNameLabel, baseName+"_sum")
+ if err := emitSeriesFn(lsetBuilder.Labels(), sum); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
index 92f084bdf67..c607448f38e 100644
--- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
@@ -283,7 +283,8 @@ func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
// bucket counts including the zero bucket and the count and the sum of
// observations. The bucket layout stays the same. This method changes the
// receiving histogram directly (rather than acting on a copy). It returns a
-// pointer to the receiving histogram for convenience.
+// pointer to the receiving histogram for convenience. If factor is negative,
+// the counter reset hint is set to GaugeType.
func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
h.ZeroCount *= factor
h.Count *= factor
@@ -294,11 +295,15 @@ func (h *FloatHistogram) Mul(factor float64) *FloatHistogram {
for i := range h.NegativeBuckets {
h.NegativeBuckets[i] *= factor
}
+ if factor < 0 {
+ h.CounterResetHint = GaugeType
+ }
return h
}
// Div works like Mul but divides instead of multiplies.
-// When dividing by 0, everything will be set to Inf.
+// When dividing by 0, everything will be set to Inf. If scalar is negative,
+// the counter reset hint is set to GaugeType.
func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
h.ZeroCount /= scalar
h.Count /= scalar
@@ -317,6 +322,9 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
for i := range h.NegativeBuckets {
h.NegativeBuckets[i] /= scalar
}
+ if scalar < 0 {
+ h.CounterResetHint = GaugeType
+ }
return h
}
@@ -330,41 +338,20 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
// The method reconciles differences in the zero threshold and in the schema, and
// changes them if needed. The other histogram will not be modified in any case.
// Adding is currently only supported between 2 exponential histograms, or between
-// 2 custom buckets histograms with the exact same custom bounds.
+// 2 custom buckets histograms with the exact same custom bounds. If CounterResetHint
+// values conflict, the receiver's hint is set to unknown, and counterResetCollision
+// is returned as true. A counter reset conflict occurs iff one of two histograms indicate
+// a counter reset (CounterReset) while the other indicates no reset (NotCounterReset).
+//
+// In case of mismatched NHCB bounds, they will be reconciled to the intersection of
+// both histograms, and nhcbBoundsReconciled will be returned as true.
//
// This method returns a pointer to the receiving histogram for convenience.
-func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
- if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
- return nil, ErrHistogramsIncompatibleSchema
- }
- if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
- return nil, ErrHistogramsIncompatibleBounds
- }
-
- switch {
- case other.CounterResetHint == h.CounterResetHint:
- // Adding apples to apples, all good. No need to change anything.
- case h.CounterResetHint == GaugeType:
- // Adding something else to a gauge. That's probably OK. Outcome is a gauge.
- // Nothing to do since the receiver is already marked as gauge.
- case other.CounterResetHint == GaugeType:
- // Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
- h.CounterResetHint = GaugeType
- case h.CounterResetHint == UnknownCounterReset:
- // With the receiver's CounterResetHint being "unknown", this could still be legitimate
- // if the caller knows what they are doing. Outcome is then again "unknown".
- // No need to do anything since the receiver's CounterResetHint is already "unknown".
- case other.CounterResetHint == UnknownCounterReset:
- // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
- h.CounterResetHint = UnknownCounterReset
- default:
- // All other cases shouldn't actually happen.
- // They are a direct collision of CounterReset and NotCounterReset.
- // Conservatively set the CounterResetHint to "unknown" and issue a warning.
- h.CounterResetHint = UnknownCounterReset
- // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
+func (h *FloatHistogram) Add(other *FloatHistogram) (res *FloatHistogram, counterResetCollision, nhcbBoundsReconciled bool, err error) {
+ if err := h.checkSchemaAndBounds(other); err != nil {
+ return nil, false, false, err
}
-
+ counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
@@ -380,8 +367,21 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
)
if h.UsesCustomBuckets() {
- h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
- return h, nil
+ if CustomBucketBoundsMatch(h.CustomValues, other.CustomValues) {
+ h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+ } else {
+ nhcbBoundsReconciled = true
+ intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
+
+ // Add with mapping - maps both histograms to intersected layout.
+ h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ false,
+ hPositiveSpans, hPositiveBuckets, h.CustomValues,
+ otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
+ intersectedBounds)
+ h.CustomValues = intersectedBounds
+ }
+ return h, counterResetCollision, nhcbBoundsReconciled, nil
}
var (
@@ -405,18 +405,19 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
- return h, nil
+ return h, counterResetCollision, nhcbBoundsReconciled, nil
}
-// Sub works like Add but subtracts the other histogram.
-func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
- if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
- return nil, ErrHistogramsIncompatibleSchema
+// Sub works like Add but subtracts the other histogram. It uses the same logic
+// to adjust the counter reset hint. This is useful where this method is used
+// for incremental mean calculation. However, if it is used for the actual "-"
+// operator in PromQL, the counter reset needs to be set to GaugeType after
+// calling this method.
+func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counterResetCollision, nhcbBoundsReconciled bool, err error) {
+ if err := h.checkSchemaAndBounds(other); err != nil {
+ return nil, false, false, err
}
- if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
- return nil, ErrHistogramsIncompatibleBounds
- }
-
+ counterResetCollision = h.adjustCounterReset(other)
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount -= otherZeroCount
@@ -432,8 +433,21 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
)
if h.UsesCustomBuckets() {
- h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
- return h, nil
+ if CustomBucketBoundsMatch(h.CustomValues, other.CustomValues) {
+ h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
+ } else {
+ nhcbBoundsReconciled = true
+ intersectedBounds := intersectCustomBucketBounds(h.CustomValues, other.CustomValues)
+
+ // Subtract with mapping - maps both histograms to intersected layout.
+ h.PositiveSpans, h.PositiveBuckets = addCustomBucketsWithMismatches(
+ true,
+ hPositiveSpans, hPositiveBuckets, h.CustomValues,
+ otherPositiveSpans, otherPositiveBuckets, other.CustomValues,
+ intersectedBounds)
+ h.CustomValues = intersectedBounds
+ }
+ return h, counterResetCollision, nhcbBoundsReconciled, nil
}
var (
@@ -456,7 +470,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
- return h, nil
+ return h, counterResetCollision, nhcbBoundsReconciled, nil
}
// Equals returns true if the given float histogram matches exactly.
@@ -480,7 +494,7 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
}
if h.UsesCustomBuckets() {
- if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
+ if !CustomBucketBoundsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
@@ -493,14 +507,14 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
- if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
+ if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false
}
- if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
+ if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
@@ -619,11 +633,17 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.Count < previous.Count {
return true
}
- if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
- // Mark that something has changed or that the application has been restarted. However, this does
- // not matter so much since the change in schema will be handled directly in the chunks and PromQL
- // functions.
- return true
+ if h.UsesCustomBuckets() {
+ if !previous.UsesCustomBuckets() {
+ // Mark that something has changed or that the application has been restarted. However, this does
+ // not matter so much since the change in schema will be handled directly in the chunks and PromQL
+ // functions.
+ return true
+ }
+ if !CustomBucketBoundsMatch(h.CustomValues, previous.CustomValues) {
+ // Custom bounds don't match - check if any reconciled bucket value has decreased.
+ return h.detectResetWithMismatchedCustomBounds(previous, h.CustomValues, previous.CustomValues)
+ }
}
if h.Schema > previous.Schema {
return true
@@ -778,23 +798,24 @@ func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
// create false positives here.
func (h *FloatHistogram) Validate() error {
var nCount, pCount float64
- if h.UsesCustomBuckets() {
+ switch {
+ case IsCustomBucketsSchema(h.Schema):
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
- return errors.New("custom buckets: must have zero count of 0")
+ return ErrHistogramCustomBucketsZeroCount
}
if h.ZeroThreshold != 0 {
- return errors.New("custom buckets: must have zero threshold of 0")
+ return ErrHistogramCustomBucketsZeroThresh
}
if len(h.NegativeSpans) > 0 {
- return errors.New("custom buckets: must not have negative spans")
+ return ErrHistogramCustomBucketsNegSpans
}
if len(h.NegativeBuckets) > 0 {
- return errors.New("custom buckets: must not have negative buckets")
+ return ErrHistogramCustomBucketsNegBuckets
}
- } else {
+ case IsExponentialSchema(h.Schema):
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
@@ -805,9 +826,17 @@ func (h *FloatHistogram) Validate() error {
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
+ if h.ZeroCount < 0 {
+ return fmt.Errorf("zero bucket has observation count of %v: %w", h.ZeroCount, ErrHistogramNegativeBucketCount)
+ }
if h.CustomValues != nil {
- return errors.New("histogram with exponential schema must not have custom bounds")
+ return ErrHistogramExpSchemaCustomBounds
}
+ default:
+ return InvalidSchemaError(h.Schema)
+ }
+ if h.Count < 0 {
+ return fmt.Errorf("observation count is %v: %w", h.Count, ErrHistogramNegativeCount)
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil {
@@ -975,7 +1004,7 @@ func (h *FloatHistogram) floatBucketIterator(
return i
}
-// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
+// newReverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
func newReverseFloatBucketIterator(
spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
) reverseFloatBucketIterator {
@@ -1323,7 +1352,9 @@ func addBuckets(
return spansA, bucketsA
}
-func FloatBucketsMatch(b1, b2 []float64) bool {
+// floatBucketsMatch compares bucket values of two float histograms using binary float comparison
+// and returns true if all values match.
+func floatBucketsMatch(b1, b2 []float64) bool {
if len(b1) != len(b2) {
return false
}
@@ -1335,6 +1366,204 @@ func FloatBucketsMatch(b1, b2 []float64) bool {
return true
}
+// detectResetWithMismatchedCustomBounds checks if any bucket count has decreased when
+// comparing NHCBs with mismatched custom bounds. It maps both histograms
+// to the intersected bounds on-the-fly and compares values without allocating
+// arrays for all mapped buckets.
+// Will panic if called with histograms that are not NHCB.
+func (h *FloatHistogram) detectResetWithMismatchedCustomBounds(
+ previous *FloatHistogram, currBounds, prevBounds []float64,
+) bool {
+ if h.Schema != CustomBucketsSchema || previous.Schema != CustomBucketsSchema {
+ panic("detectResetWithMismatchedCustomBounds called with non-NHCB schema")
+ }
+ currIt := h.floatBucketIterator(true, 0, CustomBucketsSchema)
+ prevIt := previous.floatBucketIterator(true, 0, CustomBucketsSchema)
+
+ rollupSumForBound := func(iter *floatBucketIterator, iterStarted bool, iterBucket Bucket[float64], bound float64) (float64, Bucket[float64], bool) {
+ if !iterStarted {
+ if !iter.Next() {
+ return 0, Bucket[float64]{}, false
+ }
+ iterBucket = iter.At()
+ }
+ var sum float64
+ for iterBucket.Upper <= bound {
+ sum += iterBucket.Count
+ if !iter.Next() {
+ return sum, Bucket[float64]{}, false
+ }
+ iterBucket = iter.At()
+ }
+ return sum, iterBucket, true
+ }
+
+ var (
+ currBoundIdx, prevBoundIdx = 0, 0
+ currBucket, prevBucket Bucket[float64]
+ currIterStarted, currHasMore bool
+ prevIterStarted, prevHasMore bool
+ )
+
+ for currBoundIdx <= len(currBounds) && prevBoundIdx <= len(prevBounds) {
+ currBound := math.Inf(1)
+ if currBoundIdx < len(currBounds) {
+ currBound = currBounds[currBoundIdx]
+ }
+ prevBound := math.Inf(1)
+ if prevBoundIdx < len(prevBounds) {
+ prevBound = prevBounds[prevBoundIdx]
+ }
+
+ switch {
+ case currBound == prevBound:
+ // Check matching bound, rolling up lesser buckets that have not been accounter for yet.
+ currRollupSum := 0.0
+ if !currIterStarted || currHasMore {
+ currRollupSum, currBucket, currHasMore = rollupSumForBound(&currIt, currIterStarted, currBucket, currBound)
+ currIterStarted = true
+ }
+
+ prevRollupSum := 0.0
+ if !prevIterStarted || prevHasMore {
+ prevRollupSum, prevBucket, prevHasMore = rollupSumForBound(&prevIt, prevIterStarted, prevBucket, currBound)
+ prevIterStarted = true
+ }
+
+ if currRollupSum < prevRollupSum {
+ return true
+ }
+
+ currBoundIdx++
+ prevBoundIdx++
+ case currBound < prevBound:
+ currBoundIdx++
+ default:
+ prevBoundIdx++
+ }
+ }
+
+ return false
+}
+
+// intersectCustomBucketBounds returns the intersection of two custom bucket boundary sets.
+func intersectCustomBucketBounds(boundsA, boundsB []float64) []float64 {
+ if len(boundsA) == 0 || len(boundsB) == 0 {
+ return nil
+ }
+
+ var (
+ result []float64
+ i, j = 0, 0
+ )
+
+ for i < len(boundsA) && j < len(boundsB) {
+ switch {
+ case boundsA[i] == boundsB[j]:
+ if result == nil {
+ // Allocate a new slice because FloatHistogram.CustomValues has to be immutable.
+ result = make([]float64, 0, min(len(boundsA), len(boundsB)))
+ }
+ result = append(result, boundsA[i])
+ i++
+ j++
+ case boundsA[i] < boundsB[j]:
+ i++
+ default:
+ j++
+ }
+ }
+
+ return result
+}
+
+// addCustomBucketsWithMismatches handles adding/subtracting custom bucket histograms
+// with mismatched bucket layouts by mapping both to an intersected layout.
+func addCustomBucketsWithMismatches(
+ negative bool,
+ spansA []Span, bucketsA, boundsA []float64,
+ spansB []Span, bucketsB, boundsB []float64,
+ intersectedBounds []float64,
+) ([]Span, []float64) {
+ targetBuckets := make([]float64, len(intersectedBounds)+1)
+
+ mapBuckets := func(spans []Span, buckets, bounds []float64, negative bool) {
+ srcIdx := 0
+ bucketIdx := 0
+ intersectIdx := 0
+
+ for _, span := range spans {
+ srcIdx += int(span.Offset)
+ for range span.Length {
+ if bucketIdx < len(buckets) {
+ value := buckets[bucketIdx]
+
+ // Find target bucket index.
+ targetIdx := len(targetBuckets) - 1 // Default to +Inf bucket.
+ if srcIdx < len(bounds) {
+ srcBound := bounds[srcIdx]
+ // Since both arrays are sorted, we can continue from where we left off.
+ for intersectIdx < len(intersectedBounds) {
+ if intersectedBounds[intersectIdx] >= srcBound {
+ targetIdx = intersectIdx
+ break
+ }
+ intersectIdx++
+ }
+ }
+
+ if negative {
+ targetBuckets[targetIdx] -= value
+ } else {
+ targetBuckets[targetIdx] += value
+ }
+ }
+ srcIdx++
+ bucketIdx++
+ }
+ }
+ }
+
+ // Map both histograms to the intersected layout.
+ mapBuckets(spansA, bucketsA, boundsA, false)
+ mapBuckets(spansB, bucketsB, boundsB, negative)
+
+ // Build spans and buckets, excluding zero-valued buckets from the final result.
+ destSpans := spansA[:0] // Reuse spansA capacity for destSpans since we don't need it anymore.
+ destBuckets := targetBuckets[:0] // Reuse targetBuckets capacity for destBuckets since it's guaranteed to be large enough.
+ lastIdx := int32(-1)
+
+ for i, count := range targetBuckets {
+ if count == 0 {
+ continue
+ }
+
+ destBuckets = append(destBuckets, count)
+ idx := int32(i)
+
+ if len(destSpans) > 0 && idx == lastIdx+1 {
+ // Consecutive bucket, extend the last span.
+ destSpans[len(destSpans)-1].Length++
+ } else {
+ // New span needed.
+ // TODO: optimize away small gaps.
+ offset := idx
+ if len(destSpans) > 0 {
+ // Convert to relative offset from the end of the last span.
+ prevEnd := lastIdx
+ offset = idx - prevEnd - 1
+ }
+ destSpans = append(destSpans, Span{
+ Offset: offset,
+ Length: 1,
+ })
+ }
+ lastIdx = idx
+ }
+
+ return destSpans, destBuckets
+}
+
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
// The target schema must be smaller than the current float histogram's schema.
// This will panic if the histogram has custom buckets or if the target schema is
@@ -1356,3 +1585,45 @@ func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
h.Schema = targetSchema
return h
}
+
+// checkSchemaAndBounds checks if two histograms are compatible because they
+// both use a standard exponential schema or because they both are NHCBs.
+func (h *FloatHistogram) checkSchemaAndBounds(other *FloatHistogram) error {
+ if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
+ return ErrHistogramsIncompatibleSchema
+ }
+ return nil
+}
+
+// adjustCounterReset is used for addition and subtraction. Those operation are
+// usually only performed between gauge histograms, but if one or both are
+// counters, we try to at least set the counter reset hint to something
+// meaningful (see code comments below). The return counterResetCollision is
+// true if one histogram has a counter reset hint of CounterReset and the other
+// NotCounterReset. All other combinations are not considered a collision.
+func (h *FloatHistogram) adjustCounterReset(other *FloatHistogram) (counterResetCollision bool) {
+ switch {
+ case other.CounterResetHint == h.CounterResetHint:
+ // Adding apples to apples, all good. No need to change anything.
+ case h.CounterResetHint == GaugeType:
+ // Adding something else to a gauge. That's probably OK. Outcome is a gauge.
+ // Nothing to do since the receiver is already marked as gauge.
+ case other.CounterResetHint == GaugeType:
+ // Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
+ h.CounterResetHint = GaugeType
+ case h.CounterResetHint == UnknownCounterReset:
+ // With the receiver's CounterResetHint being "unknown", this could still be legitimate
+ // if the caller knows what they are doing. Outcome is then again "unknown".
+ // No need to do anything since the receiver's CounterResetHint is already "unknown".
+ case other.CounterResetHint == UnknownCounterReset:
+ // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
+ h.CounterResetHint = UnknownCounterReset
+ default:
+ // All other cases shouldn't actually happen.
+ // They are a direct collision of CounterReset and NotCounterReset.
+ // Conservatively set the CounterResetHint to "unknown" and issue a warning.
+ h.CounterResetHint = UnknownCounterReset
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go
index a36b58d0696..cd385407d53 100644
--- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go
@@ -21,24 +21,50 @@ import (
)
const (
- ExponentialSchemaMax int32 = 8
- ExponentialSchemaMin int32 = -4
- CustomBucketsSchema int32 = -53
+ ExponentialSchemaMax int32 = 8
+ ExponentialSchemaMaxReserved int32 = 52
+ ExponentialSchemaMin int32 = -4
+ ExponentialSchemaMinReserved int32 = -9
+ CustomBucketsSchema int32 = -53
)
+type Error struct {
+ error
+}
+
+func (e Error) Unwrap() error {
+ return e.error
+}
+
var (
- ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
- ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
- ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
- ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
- ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
- ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
- ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
- ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
- ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
- ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
+ ErrHistogramCountNotBigEnough = Error{error: errors.New("histogram's observation count should be at least the number of observations found in the buckets")}
+ ErrHistogramCountMismatch = Error{error: errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")}
+ ErrHistogramNegativeCount = Error{error: errors.New("histogram's observation count is negative")}
+ ErrHistogramNegativeBucketCount = Error{error: errors.New("histogram has a bucket whose observation count is negative")}
+ ErrHistogramSpanNegativeOffset = Error{error: errors.New("histogram has a span whose offset is negative")}
+ ErrHistogramSpansBucketsMismatch = Error{error: errors.New("histogram spans specify different number of buckets than provided")}
+ ErrHistogramCustomBucketsMismatch = Error{error: errors.New("histogram custom bounds are too few")}
+ ErrHistogramCustomBucketsInvalid = Error{error: errors.New("histogram custom bounds must be in strictly increasing order")}
+ ErrHistogramCustomBucketsInfinite = Error{error: errors.New("histogram custom bounds must be finite")}
+ ErrHistogramCustomBucketsNaN = Error{error: errors.New("histogram custom bounds must not be NaN")}
+ ErrHistogramsIncompatibleSchema = Error{error: errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")}
+ ErrHistogramCustomBucketsZeroCount = Error{error: errors.New("custom buckets: must have zero count of 0")}
+ ErrHistogramCustomBucketsZeroThresh = Error{error: errors.New("custom buckets: must have zero threshold of 0")}
+ ErrHistogramCustomBucketsNegSpans = Error{error: errors.New("custom buckets: must not have negative spans")}
+ ErrHistogramCustomBucketsNegBuckets = Error{error: errors.New("custom buckets: must not have negative buckets")}
+ ErrHistogramExpSchemaCustomBounds = Error{error: errors.New("histogram with exponential schema must not have custom bounds")}
+ ErrHistogramsInvalidSchema = Error{error: fmt.Errorf("histogram has an invalid schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMin, ExponentialSchemaMax, CustomBucketsSchema)}
+ ErrHistogramsUnknownSchema = Error{error: fmt.Errorf("histogram has an unknown schema, which must be between %d and %d for exponential buckets, or %d for custom buckets", ExponentialSchemaMinReserved, ExponentialSchemaMaxReserved, CustomBucketsSchema)}
)
+func InvalidSchemaError(s int32) error {
+ return Error{error: fmt.Errorf("%w, got schema %d", ErrHistogramsInvalidSchema, s)}
+}
+
+func UnknownSchemaError(s int32) error {
+ return Error{error: fmt.Errorf("%w, got schema %d", ErrHistogramsUnknownSchema, s)}
+}
+
func IsCustomBucketsSchema(s int32) bool {
return s == CustomBucketsSchema
}
@@ -47,6 +73,34 @@ func IsExponentialSchema(s int32) bool {
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
}
+func IsExponentialSchemaReserved(s int32) bool {
+ return s >= ExponentialSchemaMinReserved && s <= ExponentialSchemaMaxReserved
+}
+
+func IsValidSchema(s int32) bool {
+ return IsCustomBucketsSchema(s) || IsExponentialSchema(s)
+}
+
+// IsKnownSchema returns bool if we known and accept the schema, but need to
+// reduce resolution to the nearest supported schema.
+func IsKnownSchema(s int32) bool {
+ return IsCustomBucketsSchema(s) || IsExponentialSchemaReserved(s)
+}
+
+// CustomBucketBoundsMatch compares histogram custom bucket bounds (CustomValues)
+// and returns true if all values match.
+func CustomBucketBoundsMatch(c1, c2 []float64) bool {
+ if len(c1) != len(c2) {
+ return false
+ }
+ for i, c := range c1 {
+ if c != c2[i] {
+ return false
+ }
+ }
+ return true
+}
+
// BucketCount is a type constraint for the count in a bucket, which can be
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
type BucketCount interface {
@@ -402,7 +456,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
}
var last IBC
- for i := 0; i < len(buckets); i++ {
+ for i := range buckets {
var c IBC
if deltas {
c = last + buckets[i]
@@ -421,8 +475,11 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
prev := math.Inf(-1)
- for _, curr := range bounds {
- if curr <= prev {
+ for i, curr := range bounds {
+ if math.IsNaN(curr) {
+ return ErrHistogramCustomBucketsNaN
+ }
+ if i > 0 && curr <= prev {
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
}
prev = curr
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
index cfb63e63416..a7d9ce80f01 100644
--- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
@@ -14,7 +14,6 @@
package histogram
import (
- "errors"
"fmt"
"math"
"slices"
@@ -256,7 +255,7 @@ func (h *Histogram) Equals(h2 *Histogram) bool {
}
if h.UsesCustomBuckets() {
- if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
+ if !CustomBucketBoundsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
@@ -425,23 +424,24 @@ func resize[T any](items []T, n int) []T {
// the total h.Count).
func (h *Histogram) Validate() error {
var nCount, pCount uint64
- if h.UsesCustomBuckets() {
+ switch {
+ case IsCustomBucketsSchema(h.Schema):
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
- return errors.New("custom buckets: must have zero count of 0")
+ return ErrHistogramCustomBucketsZeroCount
}
if h.ZeroThreshold != 0 {
- return errors.New("custom buckets: must have zero threshold of 0")
+ return ErrHistogramCustomBucketsZeroThresh
}
if len(h.NegativeSpans) > 0 {
- return errors.New("custom buckets: must not have negative spans")
+ return ErrHistogramCustomBucketsNegSpans
}
if len(h.NegativeBuckets) > 0 {
- return errors.New("custom buckets: must not have negative buckets")
+ return ErrHistogramCustomBucketsNegBuckets
}
- } else {
+ case IsExponentialSchema(h.Schema):
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
@@ -453,8 +453,10 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
- return errors.New("histogram with exponential schema must not have custom bounds")
+ return ErrHistogramExpSchemaCustomBounds
}
+ default:
+ return InvalidSchemaError(h.Schema)
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go
index e6b33863bd4..a4871ada311 100644
--- a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go
+++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go
@@ -22,7 +22,7 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
var histograms []*Histogram
- for i := 0; i < numHistograms; i++ {
+ for i := range numHistograms {
h := &Histogram{
Count: uint64(i) + observationCount,
ZeroCount: uint64(i),
@@ -35,13 +35,13 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
PositiveBuckets: make([]int64, bucketsPerSide),
}
- for j := 0; j < numSpans; j++ {
+ for j := range numSpans {
s := Span{Offset: 1, Length: spanLength}
h.NegativeSpans[j] = s
h.PositiveSpans[j] = s
}
- for j := 0; j < bucketsPerSide; j++ {
+ for j := range bucketsPerSide {
h.NegativeBuckets[j] = 1
h.PositiveBuckets[j] = 1
}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/float.go b/vendor/github.com/prometheus/prometheus/model/labels/float.go
new file mode 100644
index 00000000000..c526a5b2a6a
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/model/labels/float.go
@@ -0,0 +1,60 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+ "bytes"
+ "math"
+ "strconv"
+ "sync"
+)
+
+// floatFormatBufPool is exclusively used in FormatOpenMetricsFloat.
+var floatFormatBufPool = sync.Pool{
+ New: func() any {
+ // To contain at most 17 digits and additional syntax for a float64.
+ b := make([]byte, 0, 24)
+ return &b
+ },
+}
+
+// FormatOpenMetricsFloat works like the usual Go string formatting of a float
+// but appends ".0" if the resulting number would otherwise contain neither a
+// "." nor an "e".
+func FormatOpenMetricsFloat(f float64) string {
+ // A few common cases hardcoded.
+ switch {
+ case f == 1:
+ return "1.0"
+ case f == 0:
+ return "0.0"
+ case f == -1:
+ return "-1.0"
+ case math.IsNaN(f):
+ return "NaN"
+ case math.IsInf(f, +1):
+ return "+Inf"
+ case math.IsInf(f, -1):
+ return "-Inf"
+ }
+ bp := floatFormatBufPool.Get().(*[]byte)
+ defer floatFormatBufPool.Put(bp)
+
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ if bytes.ContainsAny(*bp, "e.") {
+ return string(*bp)
+ }
+ *bp = append(*bp, '.', '0')
+ return string(*bp)
+}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
index 5f46d6c35f4..5a3979784cb 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go
@@ -25,6 +25,7 @@ import (
const (
// MetricName is a special label name that represent a metric name.
+ //
// Deprecated: Use schema.Metadata structure and its methods.
MetricName = "__name__"
@@ -43,6 +44,15 @@ type Label struct {
}
func (ls Labels) String() string {
+ return ls.stringImpl(true)
+}
+
+// StringNoSpace is like String but does not add a space after commas.
+func (ls Labels) StringNoSpace() string {
+ return ls.stringImpl(false)
+}
+
+func (ls Labels) stringImpl(addSpace bool) string {
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
b := bytes.NewBuffer(bytea[:0])
@@ -51,9 +61,11 @@ func (ls Labels) String() string {
ls.Range(func(l Label) {
if i > 0 {
b.WriteByte(',')
- b.WriteByte(' ')
+ if addSpace {
+ b.WriteByte(' ')
+ }
}
- if !model.LabelName(l.Name).IsValidLegacy() {
+ if !model.LegacyValidation.IsValidLabelName(l.Name) {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name))
} else {
b.WriteString(l.Name)
@@ -84,12 +96,12 @@ func (ls *Labels) UnmarshalJSON(b []byte) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (ls Labels) MarshalYAML() (interface{}, error) {
+func (ls Labels) MarshalYAML() (any, error) {
return ls.Map(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (ls *Labels) UnmarshalYAML(unmarshal func(any) error) error {
var m map[string]string
if err := unmarshal(&m); err != nil {
@@ -106,18 +118,11 @@ func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool {
if l.Name == model.MetricNameLabel {
// If the default validation scheme has been overridden with legacy mode,
// we need to call the special legacy validation checker.
- if validationScheme == model.LegacyValidation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) {
- return strconv.ErrSyntax
- }
- if !model.IsValidMetricName(model.LabelValue(l.Value)) {
+ if !validationScheme.IsValidMetricName(l.Value) {
return strconv.ErrSyntax
}
}
- if validationScheme == model.LegacyValidation {
- if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() {
- return strconv.ErrSyntax
- }
- } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
+ if !validationScheme.IsValidLabelName(l.Name) || !model.LabelValue(l.Value).IsValid() {
return strconv.ErrSyntax
}
return nil
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
index edc6ff8e825..1e736c832ea 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go
@@ -55,7 +55,7 @@ func NewSymbolTable() *SymbolTable {
nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)},
byName: make(map[string]int, defaultSymbolTableSize),
}
- t.nameTable.symbolTable = t
+ t.symbolTable = t
return t
}
@@ -95,8 +95,8 @@ func (t *SymbolTable) toNumUnlocked(name string) int {
func (t *SymbolTable) checkNum(name string) (int, bool) {
t.mx.Lock()
defer t.mx.Unlock()
- i, bool := t.byName[name]
- return i, bool
+ i, ok := t.byName[name]
+ return i, ok
}
// ToName maps an integer to a string.
@@ -117,7 +117,7 @@ func decodeVarint(data string, index int) (int, int) {
}
func decodeVarintRest(b int, data string, index int) (int, int) {
- value := int(b & 0x7FFF)
+ value := b & 0x7FFF
b = int(data[index])
index++
if b < 0x80 {
@@ -165,7 +165,7 @@ func (ls Labels) IsZero() bool {
// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
-// TODO: This is only used in printing an error message
+// TODO: This is only used in printing an error message.
func (ls Labels) MatchLabels(on bool, names ...string) Labels {
b := NewBuilder(ls)
if on {
@@ -506,7 +506,7 @@ func Compare(a, b Labels) int {
return (la - ia) - (lb - ib)
}
-// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
+// CopyFrom copies labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) {
*ls = b // Straightforward memberwise copy is all we need.
}
@@ -552,16 +552,17 @@ func (ls Labels) Validate(f func(l Label) error) error {
}
// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
-func (ls *Labels) InternStrings(intern func(string) string) {
+func (*Labels) InternStrings(func(string) string) {
// TODO: remove these calls as there is nothing to do.
}
// ReleaseStrings calls release on every string value inside ls.
-func (ls Labels) ReleaseStrings(release func(string)) {
+func (Labels) ReleaseStrings(func(string)) {
// TODO: remove these calls as there is nothing to do.
}
// DropMetricName returns Labels with the "__name__" removed.
+//
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
@@ -701,7 +702,7 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int {
return base
}
-// Special code for the common case that a value is less than 32768
+// Special code for the common case that a value is less than 32768.
func encodeVarint(data []byte, offset, v int) int {
if v < 1<<15 {
offset -= 2
@@ -747,7 +748,7 @@ func appendLabelTo(nameNum, valueNum int, buf []byte) []byte {
}
i := sizeRequired
i = encodeVarint(buf, i, valueNum)
- i = encodeVarint(buf, i, nameNum)
+ encodeVarint(buf, i, nameNum)
return buf
}
@@ -775,6 +776,14 @@ func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) {
b.syms = s
}
+// SetUnsafeAdd allows turning on/off the assumptions that added strings are unsafe
+// for reuse. ScratchBuilder implementations that do reuse strings, must clone
+// the strings.
+//
+// DedupeLabels implementation copies any new strings to the symbolTable when
+// Labels() is called, so this operation is noop.
+func (ScratchBuilder) SetUnsafeAdd(bool) {}
+
func (b *ScratchBuilder) Reset() {
b.add = b.add[:0]
b.output = EmptyLabels()
@@ -782,16 +791,11 @@ func (b *ScratchBuilder) Reset() {
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+// The values must remain live until Labels() is called.
func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
-// Add a name/value pair, using []byte instead of string to reduce memory allocations.
-// The values must remain live until Labels() is called.
-func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
- b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)})
-}
-
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
@@ -815,7 +819,7 @@ func (b *ScratchBuilder) Labels() Labels {
return b.output
}
-// Write the newly-built Labels out to ls, reusing an internal buffer.
+// Overwrite the newly-built Labels out to ls, reusing an internal buffer.
// Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) {
var size int
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go
index a6e5654fa70..21ad145c1cc 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_slicelabels.go
@@ -19,6 +19,7 @@ import (
"bytes"
"slices"
"strings"
+ "unique"
"unsafe"
"github.com/cespare/xxhash/v2"
@@ -252,7 +253,7 @@ func (ls Labels) WithoutEmpty() Labels {
// the two string headers size for name and value.
// Slice header size is ignored because it should be amortized to zero.
func (ls Labels) ByteSize() uint64 {
- var size uint64 = 0
+ var size uint64
for _, l := range ls {
size += uint64(len(l.Name)+len(l.Value)) + 2*uint64(unsafe.Sizeof(""))
}
@@ -348,6 +349,7 @@ func (ls Labels) Validate(f func(l Label) error) error {
}
// DropMetricName returns Labels with the "__name__" removed.
+//
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
@@ -437,7 +439,8 @@ func (b *Builder) Labels() Labels {
// ScratchBuilder allows efficient construction of a Labels from scratch.
type ScratchBuilder struct {
- add Labels
+ add Labels
+ unsafeAdd bool
}
// SymbolTable is no-op, just for api parity with dedupelabels.
@@ -445,7 +448,7 @@ type SymbolTable struct{}
func NewSymbolTable() *SymbolTable { return nil }
-func (t *SymbolTable) Len() int { return 0 }
+func (*SymbolTable) Len() int { return 0 }
// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
func NewScratchBuilder(n int) ScratchBuilder {
@@ -453,7 +456,7 @@ func NewScratchBuilder(n int) ScratchBuilder {
}
// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels.
-func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder {
+func NewBuilderWithSymbolTable(*SymbolTable) *Builder {
return NewBuilder(EmptyLabels())
}
@@ -462,27 +465,36 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder {
return NewScratchBuilder(n)
}
-func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) {
+func (*ScratchBuilder) SetSymbolTable(*SymbolTable) {
// no-op
}
+// SetUnsafeAdd allows turning on/off the assumptions that added strings are unsafe
+// for reuse. ScratchBuilder implementations that do reuse strings, must clone
+// the strings.
+//
+// SliceLabels will clone all added strings when this option is true.
+func (b *ScratchBuilder) SetUnsafeAdd(unsafeAdd bool) {
+ b.unsafeAdd = unsafeAdd
+}
+
func (b *ScratchBuilder) Reset() {
b.add = b.add[:0]
}
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+// If SetUnsafeAdd was set to false, the values must remain live until Labels() is called.
func (b *ScratchBuilder) Add(name, value string) {
+ if b.unsafeAdd {
+ // Underlying label structure for slicelabels shares memory, so we need to
+ // copy it if the input is unsafe.
+ name = unique.Make(name).Value()
+ value = unique.Make(value).Value()
+ }
b.add = append(b.add, Label{Name: name, Value: value})
}
-// UnsafeAddBytes adds a name/value pair, using []byte instead of string.
-// The default version of this function is unsafe, hence the name.
-// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
-func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
- b.add = append(b.add, Label{Name: string(name), Value: string(value)})
-}
-
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
index 4b9bfd15afb..f0872238029 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go
@@ -421,6 +421,7 @@ func (ls Labels) Validate(f func(l Label) error) error {
}
// DropMetricName returns Labels with the "__name__" removed.
+//
// Deprecated: Use DropReserved instead.
func (ls Labels) DropMetricName() Labels {
return ls.DropReserved(func(n string) bool { return n == MetricName })
@@ -449,11 +450,11 @@ func (ls Labels) DropReserved(shouldDropFn func(name string) bool) Labels {
}
// InternStrings is a no-op because it would only save when the whole set of labels is identical.
-func (ls *Labels) InternStrings(_ func(string) string) {
+func (*Labels) InternStrings(func(string) string) {
}
// ReleaseStrings is a no-op for the same reason as InternStrings.
-func (ls Labels) ReleaseStrings(_ func(string)) {
+func (Labels) ReleaseStrings(func(string)) {
}
// Builder allows modifying Labels.
@@ -614,16 +615,11 @@ func (b *ScratchBuilder) Reset() {
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
+// The values must remain live until Labels() is called.
func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
-// UnsafeAddBytes adds a name/value pair using []byte instead of string to reduce memory allocations.
-// The values must remain live until Labels() is called.
-func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
- b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)})
-}
-
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
@@ -664,10 +660,10 @@ type SymbolTable struct{}
func NewSymbolTable() *SymbolTable { return nil }
-func (t *SymbolTable) Len() int { return 0 }
+func (*SymbolTable) Len() int { return 0 }
// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels.
-func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder {
+func NewBuilderWithSymbolTable(*SymbolTable) *Builder {
return NewBuilder(EmptyLabels())
}
@@ -676,10 +672,17 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder {
return NewScratchBuilder(n)
}
-func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) {
+func (*ScratchBuilder) SetSymbolTable(*SymbolTable) {
// no-op
}
+// SetUnsafeAdd allows turning on/off the assumptions that added strings are unsafe
+// for reuse. ScratchBuilder implementations that do reuse strings, must clone
+// the strings.
+//
+// StringLabels implementation copies all strings when Labels() is called, so this operation is noop.
+func (ScratchBuilder) SetUnsafeAdd(bool) {}
+
// SizeOfLabels returns the approximate space required for n copies of a label.
func SizeOfLabels(name, value string, n uint64) uint64 {
return uint64(labelSize(&Label{Name: name, Value: value})) * n
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
index 1636aacc21d..47b50e703a5 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go
@@ -67,8 +67,6 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if err != nil {
return nil, err
}
- // Simplify the syntax tree to run faster.
- parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$")
if err != nil {
return nil, err
@@ -372,7 +370,7 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []st
}
if len(sub) == 0 {
- return
+ return prefix, suffix, contains
}
// Given Prometheus regex matchers are always anchored to the begin/end
@@ -393,7 +391,7 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []st
}
}
- return
+ return prefix, suffix, contains
}
// StringMatcher is a matcher that matches a string in place of a regular expression.
@@ -695,7 +693,7 @@ func (m *literalSuffixStringMatcher) Matches(s string) bool {
// emptyStringMatcher matches an empty string.
type emptyStringMatcher struct{}
-func (m emptyStringMatcher) Matches(s string) bool {
+func (emptyStringMatcher) Matches(s string) bool {
return len(s) == 0
}
@@ -756,7 +754,7 @@ func (m *equalMultiStringSliceMatcher) add(s string) {
m.values = append(m.values, s)
}
-func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) {
+func (*equalMultiStringSliceMatcher) addPrefix(string, bool, StringMatcher) {
panic("not implemented")
}
@@ -897,7 +895,7 @@ func toNormalisedLowerSlow(s string, i int, a []byte) string {
// (including an empty one) as far as it doesn't contain any newline character.
type anyStringWithoutNewlineMatcher struct{}
-func (m anyStringWithoutNewlineMatcher) Matches(s string) bool {
+func (anyStringWithoutNewlineMatcher) Matches(s string) bool {
// We need to make sure it doesn't contain a newline. Since the newline is
// an ASCII character, we can use strings.IndexByte().
return strings.IndexByte(s, '\n') == -1
@@ -947,7 +945,7 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{}
-func (m trueMatcher) Matches(_ string) bool {
+func (trueMatcher) Matches(string) bool {
return true
}
diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
index d060def4811..66020799e9a 100644
--- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
+++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go
@@ -62,8 +62,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
- labelChunks := strings.Split(s, ",")
- for _, labelChunk := range labelChunks {
+ for labelChunk := range strings.SplitSeq(s, ",") {
split := strings.Split(labelChunk, ":")
b.Add(split[0], split[1])
}
diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
index 70daef426f5..f7085037fdb 100644
--- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
+++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
@@ -69,7 +69,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (a *Action) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -86,7 +86,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
type Config struct {
// A list of labels from which values are taken and concatenated
// with the configured separator in order.
- SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"sourceLabels,omitempty"`
+ SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"source_labels,omitempty"`
// Separator is the string between concatenated values from the source labels.
Separator string `yaml:"separator,omitempty" json:"separator,omitempty"`
// Regex against which the concatenation is matched.
@@ -95,15 +95,17 @@ type Config struct {
Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"`
// TargetLabel is the label to which the resulting string is written in a replacement.
// Regexp interpolation is allowed for the replace action.
- TargetLabel string `yaml:"target_label,omitempty" json:"targetLabel,omitempty"`
+ TargetLabel string `yaml:"target_label,omitempty" json:"target_label,omitempty"`
// Replacement is the regex replacement pattern to be used.
Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"`
// Action is the action to be performed for the relabeling.
Action Action `yaml:"action,omitempty" json:"action,omitempty"`
+ // NameValidationScheme to use when validating labels.
+ NameValidationScheme model.ValidationScheme `yaml:"-" json:"-"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRelabelConfig
type plain Config
if err := unmarshal((*plain)(c)); err != nil {
@@ -112,10 +114,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Regex.Regexp == nil {
c.Regex = MustNewRegexp("")
}
- return c.Validate()
+ return nil
}
-func (c *Config) Validate() error {
+func (c *Config) Validate(nameValidationScheme model.ValidationScheme) error {
if c.Action == "" {
return errors.New("relabel action cannot be empty")
}
@@ -125,7 +127,17 @@ func (c *Config) Validate() error {
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
}
- if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() {
+
+ // Relabel config validation scheme matches global if left blank.
+ switch c.NameValidationScheme {
+ case model.LegacyValidation, model.UTF8Validation:
+ case model.UnsetValidation:
+ c.NameValidationScheme = nameValidationScheme
+ default:
+ return fmt.Errorf("unknown relabel config name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.NameValidationScheme)
+ }
+
+ if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
@@ -133,12 +145,18 @@ func (c *Config) Validate() error {
// UTF-8 allows ${} characters, so standard validation allow $variables by default.
// TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values.
// Design escaping mechanism to allow that, once valid use case appears.
- return model.LabelName(value).IsValid()
+ switch c.NameValidationScheme {
+ case model.UTF8Validation:
+ return c.NameValidationScheme.IsValidLabelName(value)
+ default:
+ // For legacy validation, use the legacy regex that allows $variables.
+ return relabelTargetLegacy.MatchString(value)
+ }
}
if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
- if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() {
+ if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement {
@@ -147,7 +165,7 @@ func (c *Config) Validate() error {
if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) {
return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
}
- if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() {
+ if c.Action == HashMod && !c.NameValidationScheme.IsValidLabelName(c.TargetLabel) {
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
}
@@ -195,7 +213,7 @@ func MustNewRegexp(s string) Regexp {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (re *Regexp) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -209,7 +227,7 @@ func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements the yaml.Marshaler interface.
-func (re Regexp) MarshalYAML() (interface{}, error) {
+func (re Regexp) MarshalYAML() (any, error) {
if re.String() != "" {
return re.String(), nil
}
@@ -318,16 +336,16 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) {
if indexes == nil {
break
}
- target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes))
- if !target.IsValid() {
+ target := string(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes))
+ if !cfg.NameValidationScheme.IsValidLabelName(target) {
break
}
res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes)
if len(res) == 0 {
- lb.Del(string(target))
+ lb.Del(target)
break
}
- lb.Set(string(target), string(res))
+ lb.Set(target, string(res))
case Lowercase:
lb.Set(cfg.TargetLabel, strings.ToLower(val))
case Uppercase:
diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go
index 9b1c897a983..83203ba7691 100644
--- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go
+++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go
@@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/template"
+ "github.com/prometheus/prometheus/util/namevalidationutil"
)
// Error represents semantic errors on parsing rule groups.
@@ -96,7 +97,12 @@ type ruleGroups struct {
}
// Validate validates all rules in the rule groups.
-func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
+func (g *RuleGroups) Validate(node ruleGroups, nameValidationScheme model.ValidationScheme) (errs []error) {
+ if err := namevalidationutil.CheckNameValidationScheme(nameValidationScheme); err != nil {
+ errs = append(errs, err)
+ return errs
+ }
+
set := map[string]struct{}{}
for j, g := range g.Groups {
@@ -112,7 +118,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
}
for k, v := range g.Labels {
- if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
+ if !nameValidationScheme.IsValidLabelName(k) || k == model.MetricNameLabel {
errs = append(
errs, fmt.Errorf("invalid label name: %s", k),
)
@@ -128,7 +134,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
set[g.Name] = struct{}{}
for i, r := range g.Rules {
- for _, node := range r.Validate(node.Groups[j].Rules[i]) {
+ for _, node := range r.Validate(node.Groups[j].Rules[i], nameValidationScheme) {
var ruleName string
if r.Alert != "" {
ruleName = r.Alert
@@ -192,7 +198,7 @@ type RuleNode struct {
}
// Validate the rule and return a list of encountered errors.
-func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) {
+func (r *Rule) Validate(node RuleNode, nameValidationScheme model.ValidationScheme) (nodes []WrappedError) {
if r.Record != "" && r.Alert != "" {
nodes = append(nodes, WrappedError{
err: errors.New("only one of 'record' and 'alert' must be set"),
@@ -238,7 +244,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) {
node: &node.Record,
})
}
- if !model.IsValidMetricName(model.LabelValue(r.Record)) {
+ if !nameValidationScheme.IsValidMetricName(r.Record) {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid recording rule name: %s", r.Record),
node: &node.Record,
@@ -255,7 +261,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) {
}
for k, v := range r.Labels {
- if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
+ if !nameValidationScheme.IsValidLabelName(k) || k == model.MetricNameLabel {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid label name: %s", k),
})
@@ -269,7 +275,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) {
}
for k := range r.Annotations {
- if !model.LabelName(k).IsValid() {
+ if !nameValidationScheme.IsValidLabelName(k) {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid annotation name: %s", k),
})
@@ -280,7 +286,7 @@ func (r *Rule) Validate(node RuleNode) (nodes []WrappedError) {
nodes = append(nodes, WrappedError{err: err})
}
- return
+ return nodes
}
// testTemplateParsing checks if the templates used in labels and annotations
@@ -333,7 +339,7 @@ func testTemplateParsing(rl *Rule) (errs []error) {
}
// Parse parses and validates a set of rules.
-func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) {
+func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
var (
groups RuleGroups
node ruleGroups
@@ -358,16 +364,16 @@ func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) {
return nil, errs
}
- return &groups, groups.Validate(node)
+ return &groups, groups.Validate(node, nameValidationScheme)
}
// ParseFile reads and parses rules from a file.
-func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) {
+func ParseFile(file string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*RuleGroups, []error) {
b, err := os.ReadFile(file)
if err != nil {
return nil, []error{fmt.Errorf("%s: %w", file, err)}
}
- rgs, errs := Parse(b, ignoreUnknownFields)
+ rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme)
for i := range errs {
errs[i] = fmt.Errorf("%s: %w", file, errs[i])
}
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go
index c97e1f02eee..37b1b761a03 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go
@@ -122,6 +122,39 @@ func extractMediaType(contentType, fallbackType string) (string, error) {
return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType)
}
+type ParserOptions struct {
+ // EnableTypeAndUnitLabels enables parsing and inclusion of type and unit labels
+ // in the parsed metrics.
+ EnableTypeAndUnitLabels bool
+
+ // IgnoreNativeHistograms causes the parser to completely ignore all
+ // parts of native histograms, but to keep the ability to convert
+ // classic histograms to NHCB. This has the implication that even a
+ // histogram that has some native parts but not a single classic bucket
+ // will be parsed as a classic histogram (with only the +Inf bucket and
+ // count and sum). Setting this also allows converting a classic
+ // histogram that already has a native representation to an NHCB. This
+ // option has no effect on parsers for formats that do not support
+ // native histograms.
+ IgnoreNativeHistograms bool
+
+ // ConvertClassicHistogramsToNHCB enables conversion of classic histograms
+ // to native histogram custom buckets (NHCB) format.
+ ConvertClassicHistogramsToNHCB bool
+
+ // KeepClassicOnClassicAndNativeHistograms causes parser to output classic histogram
+ // that is also present as a native histogram. (Proto parsing only).
+ KeepClassicOnClassicAndNativeHistograms bool
+
+ // OpenMetricsSkipCTSeries determines whether to skip `_created` timestamp series
+ // during (OpenMetrics parsing only).
+ OpenMetricsSkipCTSeries bool
+
+ // FallbackContentType specifies the fallback content type to use when the provided
+ // Content-Type header cannot be parsed or is not supported.
+ FallbackContentType string
+}
+
// New returns a new parser of the byte slice.
//
// This function no longer guarantees to return a valid parser.
@@ -130,23 +163,41 @@ func extractMediaType(contentType, fallbackType string) (string, error) {
// An error may also be returned if fallbackType had to be used or there was some
// other error parsing the supplied Content-Type.
// If the returned parser is nil then the scrape must fail.
-func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries, enableTypeAndUnitLabels bool, st *labels.SymbolTable) (Parser, error) {
- mediaType, err := extractMediaType(contentType, fallbackType)
+func New(b []byte, contentType string, st *labels.SymbolTable, opts ParserOptions) (Parser, error) {
+ if st == nil {
+ st = labels.NewSymbolTable()
+ }
+
+ mediaType, err := extractMediaType(contentType, opts.FallbackContentType)
// err may be nil or something we want to warn about.
+ var baseParser Parser
switch mediaType {
case "application/openmetrics-text":
- return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
- o.skipCTSeries = skipOMCTSeries
- o.enableTypeAndUnitLabels = enableTypeAndUnitLabels
- }), err
+ baseParser = NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
+ o.skipCTSeries = opts.OpenMetricsSkipCTSeries
+ o.enableTypeAndUnitLabels = opts.EnableTypeAndUnitLabels
+ })
case "application/vnd.google.protobuf":
- return NewProtobufParser(b, parseClassicHistograms, enableTypeAndUnitLabels, st), err
+ return NewProtobufParser(
+ b,
+ opts.IgnoreNativeHistograms,
+ opts.KeepClassicOnClassicAndNativeHistograms,
+ opts.ConvertClassicHistogramsToNHCB,
+ opts.EnableTypeAndUnitLabels,
+ st,
+ ), err
case "text/plain":
- return NewPromParser(b, st, enableTypeAndUnitLabels), err
+ baseParser = NewPromParser(b, st, opts.EnableTypeAndUnitLabels)
default:
return nil, err
}
+
+ if baseParser != nil && opts.ConvertClassicHistogramsToNHCB {
+ baseParser = NewNHCBParser(baseParser, st, opts.KeepClassicOnClassicAndNativeHistograms)
+ }
+
+ return baseParser, err
}
// Entry represents the type of a parsed entry.
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go
index e7cfcc028ef..8ec541de8ab 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go
@@ -18,7 +18,6 @@ import (
"io"
"math"
"strconv"
- "strings"
"github.com/prometheus/common/model"
@@ -373,7 +372,16 @@ func (p *NHCBParser) processNHCB() bool {
p.hNHCB = nil
p.fhNHCB = fh
}
- p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",")
+
+ lblsWithMetricName := p.tempLsetNHCB.DropReserved(func(n string) bool { return n == labels.MetricName })
+ // Ensure we return `metric` instead of `metric{}` for name only
+ // series, for consistency with wrapped parsers.
+ if lblsWithMetricName.IsEmpty() {
+ p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName)
+ } else {
+ p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + lblsWithMetricName.StringNoSpace()
+ }
+
p.bytesNHCB = []byte(p.metricStringNHCB)
p.lsetNHCB = p.tempLsetNHCB
p.swapExemplars()
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go
index c0b2fcdb4d8..a99bb9df8ee 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go
@@ -74,10 +74,10 @@ yystart1:
yystate2:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate3
}
@@ -101,43 +101,43 @@ yystate5:
yystate6:
c = l.next()
yystart6:
- switch {
+ switch c {
default:
goto yyabort
- case c == 'E':
+ case 'E':
goto yystate7
- case c == 'H':
+ case 'H':
goto yystate11
- case c == 'T':
+ case 'T':
goto yystate16
- case c == 'U':
+ case 'U':
goto yystate21
}
yystate7:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'O':
+ case 'O':
goto yystate8
}
yystate8:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'F':
+ case 'F':
goto yystate9
}
yystate9:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule5
- case c == '\n':
+ case '\n':
goto yystate10
}
@@ -147,37 +147,37 @@ yystate10:
yystate11:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'E':
+ case 'E':
goto yystate12
}
yystate12:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'L':
+ case 'L':
goto yystate13
}
yystate13:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'P':
+ case 'P':
goto yystate14
}
yystate14:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate15
}
@@ -187,37 +187,37 @@ yystate15:
yystate16:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'Y':
+ case 'Y':
goto yystate17
}
yystate17:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'P':
+ case 'P':
goto yystate18
}
yystate18:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'E':
+ case 'E':
goto yystate19
}
yystate19:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate20
}
@@ -227,37 +227,37 @@ yystate20:
yystate21:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'N':
+ case 'N':
goto yystate22
}
yystate22:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'I':
+ case 'I':
goto yystate23
}
yystate23:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'T':
+ case 'T':
goto yystate24
}
yystate24:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate25
}
@@ -315,10 +315,10 @@ yystate30:
yystate31:
c = l.next()
yystart31:
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate32
}
@@ -405,10 +405,10 @@ yystate41:
yystate42:
c = l.next()
yystart42:
- switch {
+ switch c {
default:
goto yyabort
- case c == '"':
+ case '"':
goto yystate43
}
@@ -441,12 +441,12 @@ yystate45:
yystate46:
c = l.next()
yystart46:
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate47
- case c == '{':
+ case '{':
goto yystate49
}
@@ -475,12 +475,12 @@ yystate49:
yystate50:
c = l.next()
yystart50:
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate52
- case c == '\n':
+ case '\n':
goto yystate51
}
@@ -521,10 +521,10 @@ yystate54:
yystate55:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == '{':
+ case '{':
goto yystate56
}
@@ -600,12 +600,12 @@ yystate64:
yystate65:
c = l.next()
yystart65:
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate66
- case c == '"':
+ case '"':
goto yystate68
}
@@ -656,12 +656,12 @@ yystate70:
yystate71:
c = l.next()
yystart71:
- switch {
+ switch c {
default:
goto yyabort
- case c == ' ':
+ case ' ':
goto yystate73
- case c == '\n':
+ case '\n':
goto yystate72
}
@@ -696,61 +696,61 @@ yyrule2: // HELP{S}
{
l.state = sMeta1
return tHelp
- goto yystate0
+
}
yyrule3: // TYPE{S}
{
l.state = sMeta1
return tType
- goto yystate0
+
}
yyrule4: // UNIT{S}
{
l.state = sMeta1
return tUnit
- goto yystate0
+
}
yyrule5: // "EOF"\n?
{
l.state = sInit
return tEOFWord
- goto yystate0
+
}
yyrule6: // \"(\\.|[^\\"])*\"
{
l.state = sMeta2
return tMName
- goto yystate0
+
}
yyrule7: // {M}({M}|{D})*
{
l.state = sMeta2
return tMName
- goto yystate0
+
}
yyrule8: // {S}{C}*\n
{
l.state = sInit
return tText
- goto yystate0
+
}
yyrule9: // {M}({M}|{D})*
{
l.state = sValue
return tMName
- goto yystate0
+
}
yyrule10: // \{
{
l.state = sLabels
return tBraceOpen
- goto yystate0
+
}
yyrule11: // \{
{
l.state = sLabels
return tBraceOpen
- goto yystate0
+
}
yyrule12: // {L}({L}|{D})*
{
@@ -760,19 +760,19 @@ yyrule13: // \"(\\.|[^\\"])*\"
{
l.state = sLabels
return tQString
- goto yystate0
+
}
yyrule14: // \}
{
l.state = sValue
return tBraceClose
- goto yystate0
+
}
yyrule15: // =
{
l.state = sLValue
return tEqual
- goto yystate0
+
}
yyrule16: // ,
{
@@ -782,13 +782,13 @@ yyrule17: // \"(\\.|[^\\"\n])*\"
{
l.state = sLabels
return tLValue
- goto yystate0
+
}
yyrule18: // {S}[^ \n]+
{
l.state = sTimestamp
return tValue
- goto yystate0
+
}
yyrule19: // {S}[^ \n]+
{
@@ -798,13 +798,13 @@ yyrule20: // \n
{
l.state = sInit
return tLinebreak
- goto yystate0
+
}
yyrule21: // {S}#{S}\{
{
l.state = sExemplar
return tComment
- goto yystate0
+
}
yyrule22: // {L}({L}|{D})*
{
@@ -814,25 +814,25 @@ yyrule23: // \"(\\.|[^\\"\n])*\"
{
l.state = sExemplar
return tQString
- goto yystate0
+
}
yyrule24: // \}
{
l.state = sEValue
return tBraceClose
- goto yystate0
+
}
yyrule25: // =
{
l.state = sEValue
return tEqual
- goto yystate0
+
}
yyrule26: // \"(\\.|[^\\"\n])*\"
{
l.state = sExemplar
return tLValue
- goto yystate0
+
}
yyrule27: // ,
{
@@ -842,7 +842,7 @@ yyrule28: // {S}[^ \n]+
{
l.state = sETimestamp
return tValue
- goto yystate0
+
}
yyrule29: // {S}[^ \n]+
{
@@ -852,7 +852,7 @@ yyrule30: // \n
if true { // avoid go vet determining the below panic will not be reached
l.state = sInit
return tLinebreak
- goto yystate0
+
}
panic("unreachable")
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
index a0d259ce7c4..505e45fc40e 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
@@ -172,7 +172,7 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not
// support sparse histograms yet.
-func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
+func (*OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil
}
@@ -436,10 +436,7 @@ func (p *OpenMetricsParser) nextToken() token {
}
func (p *OpenMetricsParser) parseError(exp string, got token) error {
- e := p.l.i + 1
- if len(p.l.b) < e {
- e = len(p.l.b)
- }
+ e := min(len(p.l.b), p.l.i+1)
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}
@@ -776,7 +773,7 @@ func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string {
if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) {
f, err := strconv.ParseFloat(v, 64)
if err == nil {
- return formatOpenMetricsFloat(f)
+ return labels.FormatOpenMetricsFloat(f)
}
}
return v
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go
index a083e5549b6..9ff5d6c9f86 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go
@@ -92,10 +92,10 @@ yystate2:
yystate3:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule3
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate3
}
@@ -105,19 +105,19 @@ yystate4:
yystate5:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule5
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate6
}
yystate6:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule4
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate6
}
@@ -137,104 +137,104 @@ yystate8:
yystate9:
c = l.next()
yystart9:
- switch {
+ switch c {
default:
goto yyabort
- case c == 'H':
+ case 'H':
goto yystate10
- case c == 'T':
+ case 'T':
goto yystate15
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate3
}
yystate10:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'E':
+ case 'E':
goto yystate11
}
yystate11:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'L':
+ case 'L':
goto yystate12
}
yystate12:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'P':
+ case 'P':
goto yystate13
}
yystate13:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate14
}
yystate14:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule6
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate14
}
yystate15:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'Y':
+ case 'Y':
goto yystate16
}
yystate16:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'P':
+ case 'P':
goto yystate17
}
yystate17:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == 'E':
+ case 'E':
goto yystate18
}
yystate18:
c = l.next()
- switch {
+ switch c {
default:
goto yyabort
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate19
}
yystate19:
c = l.next()
- switch {
+ switch c {
default:
goto yyrule7
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate19
}
@@ -389,12 +389,12 @@ yystate35:
yystate36:
c = l.next()
yystart36:
- switch {
+ switch c {
default:
goto yyabort
- case c == '"':
+ case '"':
goto yystate37
- case c == '\t' || c == ' ':
+ case '\t', ' ':
goto yystate3
}
@@ -486,7 +486,7 @@ yyrule2: // \n
{
l.state = sInit
return tLinebreak
- goto yystate0
+
}
yyrule3: // [ \t]+
{
@@ -505,49 +505,49 @@ yyrule6: // HELP[\t ]+
{
l.state = sMeta1
return tHelp
- goto yystate0
+
}
yyrule7: // TYPE[\t ]+
{
l.state = sMeta1
return tType
- goto yystate0
+
}
yyrule8: // \"(\\.|[^\\"])*\"
{
l.state = sMeta2
return tMName
- goto yystate0
+
}
yyrule9: // {M}({M}|{D})*
{
l.state = sMeta2
return tMName
- goto yystate0
+
}
yyrule10: // {C}*
{
l.state = sInit
return tText
- goto yystate0
+
}
yyrule11: // {M}({M}|{D})*
{
l.state = sValue
return tMName
- goto yystate0
+
}
yyrule12: // \{
{
l.state = sLabels
return tBraceOpen
- goto yystate0
+
}
yyrule13: // \{
{
l.state = sLabels
return tBraceOpen
- goto yystate0
+
}
yyrule14: // {L}({L}|{D})*
{
@@ -557,19 +557,19 @@ yyrule15: // \"(\\.|[^\\"])*\"
{
l.state = sLabels
return tQString
- goto yystate0
+
}
yyrule16: // \}
{
l.state = sValue
return tBraceClose
- goto yystate0
+
}
yyrule17: // =
{
l.state = sLValue
return tEqual
- goto yystate0
+
}
yyrule18: // ,
{
@@ -579,13 +579,13 @@ yyrule19: // \"(\\.|[^\\"])*\"
{
l.state = sLabels
return tLValue
- goto yystate0
+
}
yyrule20: // [^{ \t\n]+
{
l.state = sTimestamp
return tValue
- goto yystate0
+
}
yyrule21: // {D}+
{
@@ -595,7 +595,7 @@ yyrule22: // \n
if true { // avoid go vet determining the below panic will not be reached
l.state = sInit
return tLinebreak
- goto yystate0
+
}
panic("unreachable")
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go
index 5f828d26dda..2b4b750b4d7 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go
@@ -189,7 +189,7 @@ func (p *PromParser) Series() ([]byte, *int64, float64) {
// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text
// format does not support sparse histograms yet.
-func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
+func (*PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
return nil, nil, nil, nil
}
@@ -216,7 +216,7 @@ func (p *PromParser) Type() ([]byte, model.MetricType) {
// Unit returns the metric name and unit in the current entry.
// Must only be called after Next returned a unit entry.
// The returned byte slices become invalid after the next call to Next.
-func (p *PromParser) Unit() ([]byte, []byte) {
+func (*PromParser) Unit() ([]byte, []byte) {
// The Prometheus format does not have units.
return nil, nil
}
@@ -270,13 +270,13 @@ func (p *PromParser) Labels(l *labels.Labels) {
// Exemplar implements the Parser interface. However, since the classic
// Prometheus text format does not support exemplars, this implementation simply
// returns false and does nothing else.
-func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
+func (*PromParser) Exemplar(*exemplar.Exemplar) bool {
return false
}
// CreatedTimestamp returns 0 as it's not implemented yet.
// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980
-func (p *PromParser) CreatedTimestamp() int64 {
+func (*PromParser) CreatedTimestamp() int64 {
return 0
}
@@ -291,10 +291,7 @@ func (p *PromParser) nextToken() token {
}
func (p *PromParser) parseError(exp string, got token) error {
- e := p.l.i + 1
- if len(p.l.b) < e {
- e = len(p.l.b)
- }
+ e := min(len(p.l.b), p.l.i+1)
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}
diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
index 2ca6c03af71..800f02085e2 100644
--- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
+++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
@@ -19,9 +19,7 @@ import (
"fmt"
"io"
"math"
- "strconv"
"strings"
- "sync"
"unicode/utf8"
"github.com/gogo/protobuf/types"
@@ -32,17 +30,9 @@ import (
"github.com/prometheus/prometheus/model/labels"
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
"github.com/prometheus/prometheus/schema"
+ "github.com/prometheus/prometheus/util/convertnhcb"
)
-// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
-var floatFormatBufPool = sync.Pool{
- New: func() interface{} {
- // To contain at most 17 digits and additional syntax for a float64.
- b := make([]byte, 0, 24)
- return &b
- },
-}
-
// ProtobufParser parses the old Prometheus protobuf format and present it
// as the text-style textparse.Parser interface.
//
@@ -76,22 +66,42 @@ type ProtobufParser struct {
// that we have to decode the next MetricDescriptor.
state Entry
+ // Whether to completely ignore any native parts of histograms.
+ ignoreNativeHistograms bool
// Whether to also parse a classic histogram that is also present as a
// native histogram.
- parseClassicHistograms bool
+ parseClassicHistograms bool
+ // Whether to add type and unit labels.
enableTypeAndUnitLabels bool
+
+ // Whether to convert classic histograms to native histograms with custom buckets.
+ convertClassicHistogramsToNHCB bool
+ // Reusable classic to NHCB converter.
+ tmpNHCB convertnhcb.TempHistogram
+ // We need to preload NHCB since we cannot do error handling in Histogram().
+ nhcbH *histogram.Histogram
+ nhcbFH *histogram.FloatHistogram
}
// NewProtobufParser returns a parser for the payload in the byte slice.
-func NewProtobufParser(b []byte, parseClassicHistograms, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser {
+func NewProtobufParser(
+ b []byte,
+ ignoreNativeHistograms, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels bool,
+ st *labels.SymbolTable,
+) Parser {
+ builder := labels.NewScratchBuilderWithSymbolTable(st, 16)
+ builder.SetUnsafeAdd(true)
return &ProtobufParser{
dec: dto.NewMetricStreamingDecoder(b),
entryBytes: &bytes.Buffer{},
- builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder.
-
- state: EntryInvalid,
- parseClassicHistograms: parseClassicHistograms,
- enableTypeAndUnitLabels: enableTypeAndUnitLabels,
+ builder: builder,
+
+ state: EntryInvalid,
+ ignoreNativeHistograms: ignoreNativeHistograms,
+ parseClassicHistograms: parseClassicHistograms,
+ enableTypeAndUnitLabels: enableTypeAndUnitLabels,
+ convertClassicHistogramsToNHCB: convertClassicHistogramsToNHCB,
+ tmpNHCB: convertnhcb.NewTempHistogram(),
}
}
@@ -182,6 +192,15 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
h = p.dec.GetHistogram()
)
+ if p.ignoreNativeHistograms || !isNativeHistogram(h) {
+ // This only happens if we have a classic histogram and
+ // we converted it to NHCB already in Next.
+ if *ts != 0 {
+ return p.entryBytes.Bytes(), ts, p.nhcbH, p.nhcbFH
+ }
+ return p.entryBytes.Bytes(), nil, p.nhcbH, p.nhcbFH
+ }
+
if p.parseClassicHistograms && len(h.GetBucket()) > 0 {
p.redoClassic = true
}
@@ -299,7 +318,7 @@ func (p *ProtobufParser) Unit() ([]byte, []byte) {
// Comment always returns nil because comments aren't supported by the protobuf
// format.
-func (p *ProtobufParser) Comment() []byte {
+func (*ProtobufParser) Comment() []byte {
return nil
}
@@ -406,6 +425,8 @@ func (p *ProtobufParser) CreatedTimestamp() int64 {
// read.
func (p *ProtobufParser) Next() (Entry, error) {
p.exemplarReturned = false
+ p.nhcbH = nil
+ p.nhcbFH = nil
switch p.state {
// Invalid state occurs on:
// * First Next() call.
@@ -428,7 +449,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
// We are at the beginning of a metric family. Put only the name
// into entryBytes and validate only name, help, and type for now.
name := p.dec.GetName()
- if !model.IsValidMetricName(model.LabelValue(name)) {
+ if !model.UTF8Validation.IsValidMetricName(name) {
return EntryInvalid, fmt.Errorf("invalid metric name: %s", name)
}
if help := p.dec.GetHelp(); !utf8.ValidString(help) {
@@ -468,8 +489,12 @@ func (p *ProtobufParser) Next() (Entry, error) {
p.state = EntryType
case EntryType:
t := p.dec.GetType()
- if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
- isNativeHistogram(p.dec.GetHistogram()) {
+ if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
+ if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
+ p.state = EntrySeries
+ p.fieldPos = -3 // We have not returned anything, let p.Next() increment it to -2.
+ return p.Next()
+ }
p.state = EntryHistogram
} else {
p.state = EntrySeries
@@ -480,14 +505,19 @@ func (p *ProtobufParser) Next() (Entry, error) {
case EntrySeries:
// Potentially a second series in the metric family.
t := p.dec.GetType()
+ decodeNext := true
if t == dto.MetricType_SUMMARY ||
t == dto.MetricType_HISTOGRAM ||
t == dto.MetricType_GAUGE_HISTOGRAM {
// Non-trivial series (complex metrics, with magic suffixes).
+ isClassicHistogram := (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
+ (p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()))
+ skipSeries := p.convertClassicHistogramsToNHCB && isClassicHistogram && !p.parseClassicHistograms
+
// Did we iterate over all the classic representations fields?
// NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate.
- if !p.fieldsDone {
+ if !p.fieldsDone && !skipSeries {
// Still some fields to iterate over.
p.fieldPos++
if err := p.onSeriesOrHistogramUpdate(); err != nil {
@@ -504,25 +534,39 @@ func (p *ProtobufParser) Next() (Entry, error) {
// If this is a metric family containing native
// histograms, it means we are here thanks to redoClassic state.
// Return to native histograms for the consistent flow.
- if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
- isNativeHistogram(p.dec.GetHistogram()) {
- p.state = EntryHistogram
+ // If this is a metric family containing classic histograms,
+ // it means we might need to do NHCB conversion.
+ if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
+ if !isClassicHistogram {
+ p.state = EntryHistogram
+ } else if p.convertClassicHistogramsToNHCB {
+ // We still need to spit out the NHCB.
+ var err error
+ p.nhcbH, p.nhcbFH, err = p.convertToNHCB(t)
+ if err != nil {
+ return EntryInvalid, err
+ }
+ p.state = EntryHistogram
+ // We have an NHCB to emit, no need to decode the next series.
+ decodeNext = false
+ }
}
}
// Is there another series?
- if err := p.dec.NextMetric(); err != nil {
- if errors.Is(err, io.EOF) {
- p.state = EntryInvalid
- return p.Next()
+ if decodeNext {
+ if err := p.dec.NextMetric(); err != nil {
+ if errors.Is(err, io.EOF) {
+ p.state = EntryInvalid
+ return p.Next()
+ }
+ return EntryInvalid, err
}
- return EntryInvalid, err
}
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
case EntryHistogram:
- // Was Histogram() called and parseClassicHistograms is true?
- if p.redoClassic {
+ switchToClassic := func() (Entry, error) {
p.redoClassic = false
p.fieldPos = -3
p.fieldsDone = false
@@ -530,6 +574,11 @@ func (p *ProtobufParser) Next() (Entry, error) {
return p.Next() // Switch to classic histogram.
}
+ // Was Histogram() called and parseClassicHistograms is true?
+ if p.redoClassic {
+ return switchToClassic()
+ }
+
// Is there another series?
if err := p.dec.NextMetric(); err != nil {
if errors.Is(err, io.EOF) {
@@ -538,6 +587,15 @@ func (p *ProtobufParser) Next() (Entry, error) {
}
return EntryInvalid, err
}
+
+ // If this metric is not a native histograms or we are ignoring
+ // native histograms, it means we are here thanks to NHCB
+ // conversion. Return to classic histograms for the consistent
+ // flow.
+ if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
+ return switchToClassic()
+ }
+
if err := p.onSeriesOrHistogramUpdate(); err != nil {
return EntryInvalid, err
}
@@ -564,10 +622,7 @@ func (p *ProtobufParser) onSeriesOrHistogramUpdate() error {
Unit: p.dec.GetUnit(),
}
m.AddToLabels(&p.builder)
- if err := p.dec.Label(schema.IgnoreOverriddenMetadataLabelsScratchBuilder{
- Overwrite: m,
- ScratchBuilder: &p.builder,
- }); err != nil {
+ if err := p.dec.Label(m.NewIgnoreOverriddenMetadataLabelScratchBuilder(&p.builder)); err != nil {
return err
}
} else {
@@ -632,7 +687,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
qq := p.dec.GetSummary().GetQuantile()
q := qq[p.fieldPos]
p.fieldsDone = p.fieldPos == len(qq)-1
- return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile())
+ return true, model.QuantileLabel, labels.FormatOpenMetricsFloat(q.GetQuantile())
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
bb := p.dec.GetHistogram().GetBucket()
if p.fieldPos >= len(bb) {
@@ -641,41 +696,11 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
}
b := bb[p.fieldPos]
p.fieldsDone = math.IsInf(b.GetUpperBound(), +1)
- return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound())
+ return true, model.BucketLabel, labels.FormatOpenMetricsFloat(b.GetUpperBound())
}
return false, "", ""
}
-// formatOpenMetricsFloat works like the usual Go string formatting of a float
-// but appends ".0" if the resulting number would otherwise contain neither a
-// "." nor an "e".
-func formatOpenMetricsFloat(f float64) string {
- // A few common cases hardcoded.
- switch {
- case f == 1:
- return "1.0"
- case f == 0:
- return "0.0"
- case f == -1:
- return "-1.0"
- case math.IsNaN(f):
- return "NaN"
- case math.IsInf(f, +1):
- return "+Inf"
- case math.IsInf(f, -1):
- return "-Inf"
- }
- bp := floatFormatBufPool.Get().(*[]byte)
- defer floatFormatBufPool.Put(bp)
-
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- if bytes.ContainsAny(*bp, "e.") {
- return string(*bp)
- }
- *bp = append(*bp, '.', '0')
- return string(*bp)
-}
-
// isNativeHistogram returns false iff the provided histograms has no spans at
// all (neither positive nor negative) and a zero threshold of 0 and a zero
// count of 0. In principle, this could still be meant to be a native histogram
@@ -690,3 +715,43 @@ func isNativeHistogram(h *dto.Histogram) bool {
h.GetZeroThreshold() > 0 ||
h.GetZeroCount() > 0
}
+
+func (p *ProtobufParser) convertToNHCB(t dto.MetricType) (*histogram.Histogram, *histogram.FloatHistogram, error) {
+ h := p.dec.GetHistogram()
+ p.tmpNHCB.Reset()
+ // TODO(krajorama): convertnhcb should support setting integer mode up
+ // front since we know it here. That would avoid the converter having
+ // to guess it based on counts.
+ v := h.GetSampleCountFloat()
+ if v == 0 {
+ v = float64(h.GetSampleCount())
+ }
+ if err := p.tmpNHCB.SetCount(v); err != nil {
+ return nil, nil, err
+ }
+
+ if err := p.tmpNHCB.SetSum(h.GetSampleSum()); err != nil {
+ return nil, nil, err
+ }
+ for _, b := range h.GetBucket() {
+ v := b.GetCumulativeCountFloat()
+ if v == 0 {
+ v = float64(b.GetCumulativeCount())
+ }
+ if err := p.tmpNHCB.SetBucketCount(b.GetUpperBound(), v); err != nil {
+ return nil, nil, err
+ }
+ }
+ ch, cfh, err := p.tmpNHCB.Convert()
+ if err != nil {
+ return nil, nil, err
+ }
+ if t == dto.MetricType_GAUGE_HISTOGRAM {
+ if ch != nil {
+ ch.CounterResetHint = histogram.GaugeType
+ } else {
+ cfh.CounterResetHint = histogram.GaugeType
+ }
+ }
+ return ch, cfh, nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/notifier/alert.go b/vendor/github.com/prometheus/prometheus/notifier/alert.go
index 88245c9a7f2..83e7a97fe03 100644
--- a/vendor/github.com/prometheus/prometheus/notifier/alert.go
+++ b/vendor/github.com/prometheus/prometheus/notifier/alert.go
@@ -84,7 +84,18 @@ func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Label
if !keep {
continue
}
- a.Labels = lb.Labels()
+
+ // If relabeling has altered the labels, create a new Alert to preserve immutability.
+ if !labels.Equal(a.Labels, lb.Labels()) {
+ a = &Alert{
+ Labels: lb.Labels(),
+ Annotations: a.Annotations,
+ StartsAt: a.StartsAt,
+ EndsAt: a.EndsAt,
+ GeneratorURL: a.GeneratorURL,
+ }
+ }
+
relabeledAlerts = append(relabeledAlerts, a)
}
return relabeledAlerts
diff --git a/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go b/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go
index 50471098add..c47c9ea23ad 100644
--- a/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go
+++ b/vendor/github.com/prometheus/prometheus/notifier/alertmanagerset.go
@@ -22,7 +22,7 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/sigv4"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
diff --git a/vendor/github.com/prometheus/prometheus/notifier/manager.go b/vendor/github.com/prometheus/prometheus/notifier/manager.go
index c9463b24a8d..65adfd5c3ea 100644
--- a/vendor/github.com/prometheus/prometheus/notifier/manager.go
+++ b/vendor/github.com/prometheus/prometheus/notifier/manager.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version"
@@ -92,7 +93,7 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
}
// NewManager is the manager constructor.
-func NewManager(o *Options, logger *slog.Logger) *Manager {
+func NewManager(o *Options, nameValidationScheme model.ValidationScheme, logger *slog.Logger) *Manager {
if o.Do == nil {
o.Do = do
}
@@ -104,6 +105,14 @@ func NewManager(o *Options, logger *slog.Logger) *Manager {
logger = promslog.NewNopLogger()
}
+ for _, rc := range o.RelabelConfigs {
+ switch rc.NameValidationScheme {
+ case model.LegacyValidation, model.UTF8Validation:
+ default:
+ rc.NameValidationScheme = nameValidationScheme
+ }
+ }
+
n := &Manager{
queue: make([]*Alert, 0, o.QueueCapacity),
more: make(chan struct{}, 1),
@@ -133,6 +142,13 @@ func (n *Manager) ApplyConfig(conf *config.Config) error {
n.opts.ExternalLabels = conf.GlobalConfig.ExternalLabels
n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs
+ for i, rc := range n.opts.RelabelConfigs {
+ switch rc.NameValidationScheme {
+ case model.LegacyValidation, model.UTF8Validation:
+ default:
+ n.opts.RelabelConfigs[i].NameValidationScheme = conf.GlobalConfig.MetricNameValidationScheme
+ }
+ }
amSets := make(map[string]*alertmanagerSet)
// configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig,
diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md
index a33d7bfb881..ed6d0fc0e3a 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/README.md
+++ b/vendor/github.com/prometheus/prometheus/prompb/README.md
@@ -1,9 +1,53 @@
-The compiled protobufs are version controlled and you won't normally need to
-re-compile them when building Prometheus.
+This directory contains Protocol Buffer (protobuf) definitions for Prometheus'
+remote read and write protocols. These definitions are used to serialize and
+deserialize time series data, such as metrics, labels, samples, and queries,
+for network communication to Prometheus.
-If however you have modified the defs and do need to re-compile, run
-`make proto` from the parent dir.
+The files here are synced to [buf.build](https://buf.build/prometheus/prometheus),
+a public protobuf schema registry, from the `main` branch of the Prometheus
+repository.
-In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in
-your PATH.
+## What This Package/Directory Hosts
+Protobuf messages and services for:
+- Remote Write: Sending time series data to Prometheus (e.g., `WriteRequest`,
+ `TimeSeries`).
+- Remote Read: Querying data from Prometheus (e.g., `ReadRequest`, `Query`,
+ `ChunkedReadResponse`).
+- Core types: Shared definitions like `Label`, `MetricMetadata`, and exemplars.
+
+Key files include:
+- `remote.proto`: Defines the remote read/write services and messages.
+- `types.proto`: Common types used across protocols.
+- `io/prometheus/client/metrics.proto`: Client metrics definitions.
+- `io/prometheus/write/v2/types.proto`: Remote Write v2 protocol types.
+
+## Stability Guarantees
+
+These protobuf definitions follow the stability policies of the Prometheus
+project. Backward-compatible changes may occur in minor releases, but breaking
+changes are reserved for major versions (e.g., Prometheus 3.0). Experimental
+or unstable features are clearly marked in the documentation.
+
+## Related Specifications
+
+- Remote Write Spec v1:
+ [https://prometheus.io/docs/specs/remote_write_spec/](https://prometheus.io/docs/specs/remote_write_spec/).
+- Remote Write Spec v2:
+ [https://prometheus.io/docs/specs/prw/remote_write_spec_2_0/](https://prometheus.io/docs/specs/prw/remote_write_spec_2_0/).
+- Experimental Prometheus client packages:
+ [https://github.com/prometheus/client_golang/tree/main/exp](https://github.com/prometheus/client_golang/tree/main/exp).
+
+## How to Change or Contribute
+
+To modify these definitions, view and edit the source in the Prometheus GitHub
+repository: [https://github.com/prometheus/prometheus/tree/main/prompb](https://github.com/prometheus/prometheus/tree/main/prompb).
+
+## How to Use
+
+### Steps
+
+- Run `make proto` in the root directory to regenerate the compiled protobuf
+ code.
+- The compiled Go code is version-controlled in the repository, so you
+ typically don't need to re-generate unless making changes.
diff --git a/vendor/github.com/prometheus/prometheus/prompb/codec.go b/vendor/github.com/prometheus/prometheus/prompb/codec.go
index b2574fd9e1f..6cc0cdc861f 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/codec.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/codec.go
@@ -130,7 +130,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
@@ -183,7 +183,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go
index 983803846e8..6bc9600ab68 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go
@@ -146,25 +146,32 @@ func (m *MetricStreamingDecoder) resetMetric() {
}
}
-func (m *MetricStreamingDecoder) GetMetric() {
+func (*MetricStreamingDecoder) GetMetric() {
panic("don't use GetMetric, use Metric directly")
}
-func (m *MetricStreamingDecoder) GetLabel() {
+func (*MetricStreamingDecoder) GetLabel() {
panic("don't use GetLabel, use Label instead")
}
-type scratchBuilder interface {
+// unsafeLabelAdder adds labels for a single metric.
+// The "unsafe" word highlights that some strings must not be retained on a
+// caller side. When used with labels.ScratchBuilder ensure it's used
+// with SetUnsafeAdd set to true.
+type unsafeLabelAdder interface {
Add(name, value string)
- UnsafeAddBytes(name, value []byte)
}
-// Label parses labels into labels scratch builder. Metric name is missing
+// Label parses labels into unsafeLabelAdder. Metric name is missing
// given the protobuf metric model and has to be deduced from the metric family name.
-// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label
+//
+// TODO: The Label method name intentionally hide MetricStreamingDecoder.Metric.Label
// field to avoid direct use (it's not parsed). In future generator will generate
// structs tailored for streaming decoding.
-func (m *MetricStreamingDecoder) Label(b scratchBuilder) error {
+//
+// Unsafe in this context means that bytes and strings are reused across iterations.
+// They are live only until the next NextMetric() or NextMetricFamily() call.
+func (m *MetricStreamingDecoder) Label(b unsafeLabelAdder) error {
for _, l := range m.labels {
if err := parseLabel(m.mData[l.start:l.end], b); err != nil {
return err
@@ -173,10 +180,9 @@ func (m *MetricStreamingDecoder) Label(b scratchBuilder) error {
return nil
}
-// parseLabel is essentially LabelPair.Unmarshal but directly adding into scratch builder
-// via UnsafeAddBytes method to reuse strings.
-func parseLabel(dAtA []byte, b scratchBuilder) error {
- var name, value []byte
+// parseLabel is essentially LabelPair.Unmarshal but directly adding into unsafeLabelAdder.
+func parseLabel(dAtA []byte, b unsafeLabelAdder) error {
+ var unsafeName, unsafeValue string
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -235,9 +241,9 @@ func parseLabel(dAtA []byte, b scratchBuilder) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- name = dAtA[iNdEx:postIndex]
- if !model.LabelName(name).IsValid() {
- return fmt.Errorf("invalid label name: %s", name)
+ unsafeName = yoloString(dAtA[iNdEx:postIndex])
+ if !model.UTF8Validation.IsValidLabelName(unsafeName) {
+ return fmt.Errorf("invalid label name: %s", unsafeName)
}
iNdEx = postIndex
case 2:
@@ -270,9 +276,9 @@ func parseLabel(dAtA []byte, b scratchBuilder) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- value = dAtA[iNdEx:postIndex]
- if !utf8.ValidString(yoloString(value)) {
- return fmt.Errorf("invalid label value: %s", value)
+ unsafeValue = yoloString(dAtA[iNdEx:postIndex])
+ if !utf8.ValidString(unsafeValue) {
+ return fmt.Errorf("invalid label value: %s", unsafeValue)
}
iNdEx = postIndex
default:
@@ -293,7 +299,7 @@ func parseLabel(dAtA []byte, b scratchBuilder) error {
if iNdEx > l {
return io.ErrUnexpectedEOF
}
- b.UnsafeAddBytes(name, value)
+ b.Add(unsafeName, unsafeValue)
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto
index fe55638bb78..681554a7caf 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto
@@ -81,8 +81,7 @@ message Histogram {
google.protobuf.Timestamp created_timestamp = 15;
- // Everything below here is for native histograms (also known as sparse histograms).
- // Native histograms are an experimental feature without stability guarantees.
+ // Everything below here is for native histograms (formerly known as sparse histograms).
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go
index 4434c525fcb..71196edb885 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/codec.go
@@ -25,7 +25,7 @@ import (
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
// ToLabels return model labels.Labels from timeseries' remote labels.
-func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) (labels.Labels, error) {
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
}
@@ -142,7 +142,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
@@ -200,20 +200,25 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan {
return nil
}
spans := make([]BucketSpan, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
-func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
+func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) (exemplar.Exemplar, error) {
timestamp := m.Timestamp
+ lbls, err := desymbolizeLabels(b, m.LabelsRefs, symbols)
+ if err != nil {
+ return exemplar.Exemplar{}, err
+ }
+
return exemplar.Exemplar{
- Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
+ Labels: lbls,
Value: m.Value,
Ts: timestamp,
HasTs: timestamp != 0,
- }
+ }, nil
}
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go
index 3aa778eb606..5721aec532d 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/custom.go
@@ -80,11 +80,6 @@ func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.CreatedTimestamp != 0 {
- i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
- i--
- dAtA[i] = 0x30
- }
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go
index f316a976f26..7c7feca2397 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/symbols.go
@@ -13,7 +13,11 @@
package writev2
-import "github.com/prometheus/prometheus/model/labels"
+import (
+ "fmt"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
// SymbolsTable implements table for easy symbol use.
type SymbolsTable struct {
@@ -73,11 +77,22 @@ func (t *SymbolsTable) Reset() {
}
// desymbolizeLabels decodes label references, with given symbols to labels.
-func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
+// This function requires labelRefs to have an even number of elements (name-value pairs) and
+// all references must be valid indices within the symbols table. It will return an error if
+// these invariants are violated.
+func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) (labels.Labels, error) {
+ if len(labelRefs)%2 != 0 {
+ return labels.EmptyLabels(), fmt.Errorf("invalid labelRefs length %d", len(labelRefs))
+ }
+
b.Reset()
for i := 0; i < len(labelRefs); i += 2 {
- b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
+ nameRef, valueRef := labelRefs[i], labelRefs[i+1]
+ if int(nameRef) >= len(symbols) || int(valueRef) >= len(symbols) {
+ return labels.EmptyLabels(), fmt.Errorf("labelRefs %d (name) = %d (value) outside of symbols table (size %d)", nameRef, valueRef, len(symbols))
+ }
+ b.Add(symbols[nameRef], symbols[valueRef])
}
b.Sort()
- return b.Labels()
+ return b.Labels(), nil
}
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go
index 1419de217ea..a726efb5b5b 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.pb.go
@@ -106,6 +106,8 @@ func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
// The canonical Content-Type request header value for this message is
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
//
+// Version: v2.0-rc.4
+//
// NOTE: gogoproto options might change in future for this file, they
// are not part of the spec proto (they only modify the generated Go code, not
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
@@ -181,7 +183,7 @@ type TimeSeries struct {
//
// Note that there might be multiple TimeSeries objects in the same
// Requests with the same labels e.g. for different exemplars, metadata
- // or created timestamp.
+ // or start timestamp.
LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
// Timeseries messages can either specify samples or (native) histogram samples
// (histogram field), but not both. For a typical sender (real-time metric
@@ -193,24 +195,7 @@ type TimeSeries struct {
// exemplars represents an optional set of exemplars attached to this series' samples.
Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
// metadata represents the metadata associated with the given series' samples.
- Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
- // created_timestamp represents an optional created timestamp associated with
- // this series' samples in ms format, typically for counter or histogram type
- // metrics. Created timestamp represents the time when the counter started
- // counting (sometimes referred to as start timestamp), which can increase
- // the accuracy of query results.
- //
- // Note that some receivers might require this and in return fail to
- // ingest such samples within the Request.
- //
- // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
- // for conversion from/to time.Time to Prometheus timestamp.
- //
- // Note that the "optional" keyword is omitted due to
- // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
- // Zero value means value not set. If you need to use exactly zero value for
- // the timestamp, use 1 millisecond before or after.
- CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
+ Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -284,13 +269,6 @@ func (m *TimeSeries) GetMetadata() Metadata {
return Metadata{}
}
-func (m *TimeSeries) GetCreatedTimestamp() int64 {
- if m != nil {
- return m.CreatedTimestamp
- }
- return 0
-}
-
// Exemplar is an additional information attached to some series' samples.
// It is typically used to attach an example trace or request ID associated with
// the metric changes.
@@ -375,7 +353,27 @@ type Sample struct {
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
- Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // start_timestamp represents an optional start timestamp for the sample,
+ // in ms format. This information is typically used for counter, histogram (cumulative)
+ // or delta type metrics.
+ //
+ // For cumulative metrics, the start timestamp represents the time when the
+ // counter started counting (sometimes referred to as start timestamp), which
+ // can increase the accuracy of certain processing and query semantics (e.g. rates).
+ //
+ // Note:
+ // * That some receivers might require start timestamps for certain metric
+ // types; rejecting such samples within the Request as a result.
+ // * start timestamp is the same as "created timestamp" name Prometheus used in the past.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to efficiency and consistency.
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ StartTimestamp int64 `protobuf:"varint,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -428,6 +426,13 @@ func (m *Sample) GetTimestamp() int64 {
return 0
}
+func (m *Sample) GetStartTimestamp() int64 {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return 0
+}
+
// Metadata represents the metadata associated with the given series' samples.
type Metadata struct {
Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"`
@@ -498,12 +503,11 @@ func (m *Metadata) GetUnitRef() uint32 {
return 0
}
-// A native histogram, also known as a sparse histogram.
-// Original design doc:
-// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
-// The appendix of this design doc also explains the concept of float
-// histograms. This Histogram message can represent both, the usual
-// integer histogram as well as a float histogram.
+// A native histogram message, supporting
+// * sparse exponential bucketing, custom bucketing.
+// * float or integer histograms.
+//
+// See the full spec: https://prometheus.io/docs/specs/native_histograms/
type Histogram struct {
// Types that are valid to be assigned to Count:
//
@@ -581,10 +585,27 @@ type Histogram struct {
//
// The last element is not only the upper inclusive bound of the last regular
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
- CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
+ // start_timestamp represents an optional start timestamp for the histogram sample,
+ // in ms format. The start timestamp represents the time when the histogram
+ // started counting, which can increase the accuracy of certain processing and
+ // query semantics (e.g. rates).
+ //
+ // Note:
+ // * That some receivers might require start timestamps for certain metric
+ // types; rejecting such samples within the Request as a result.
+ // * start timestamp is the same as "created timestamp" name Prometheus used in the past.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to efficiency and consistency.
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ StartTimestamp int64 `protobuf:"varint,17,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
@@ -774,6 +795,13 @@ func (m *Histogram) GetCustomValues() []float64 {
return nil
}
+func (m *Histogram) GetStartTimestamp() int64 {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return 0
+}
+
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Histogram) XXX_OneofWrappers() []interface{} {
return []interface{}{
@@ -861,65 +889,66 @@ func init() {
}
var fileDescriptor_f139519efd9fa8d7 = []byte{
- // 926 bytes of a gzipped FileDescriptorProto
+ // 931 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44,
- 0x14, 0xed, 0xc4, 0x69, 0x3e, 0x6e, 0x9a, 0xac, 0x33, 0xb4, 0x5d, 0x6f, 0x81, 0x6c, 0xd6, 0x08,
- 0x88, 0x58, 0x29, 0x91, 0xc2, 0xeb, 0x0a, 0xd4, 0xb4, 0x6e, 0x93, 0x95, 0x92, 0xac, 0x26, 0x2e,
- 0x52, 0x79, 0xb1, 0xdc, 0x64, 0x92, 0x58, 0xd8, 0xb1, 0xf1, 0x4c, 0x02, 0xe5, 0xf7, 0xf1, 0xb0,
- 0x8f, 0xfc, 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0x42, 0xbb, 0xe2, 0x6d, 0xe6,
- 0xdc, 0x73, 0xee, 0x3d, 0xb9, 0xbe, 0x77, 0x02, 0xba, 0xe3, 0xf7, 0x82, 0xd0, 0xf7, 0x28, 0x5f,
- 0xd3, 0x2d, 0xeb, 0xfd, 0x14, 0x3a, 0x9c, 0xf6, 0x76, 0xfd, 0x1e, 0xbf, 0x0d, 0x28, 0xeb, 0x06,
- 0xa1, 0xcf, 0x7d, 0x7c, 0xec, 0xf8, 0xdd, 0x8c, 0xd3, 0x95, 0x9c, 0xee, 0xae, 0x7f, 0x72, 0xb8,
- 0xf2, 0x57, 0xbe, 0xa4, 0xf4, 0xc4, 0x29, 0x62, 0xeb, 0x0c, 0xca, 0x84, 0xfe, 0xb8, 0xa5, 0x8c,
- 0x63, 0x0d, 0xca, 0xec, 0xd6, 0xbb, 0xf1, 0x5d, 0xa6, 0x15, 0xdb, 0x4a, 0xa7, 0x4a, 0x92, 0x2b,
- 0x1e, 0x02, 0x70, 0xc7, 0xa3, 0x8c, 0x86, 0x0e, 0x65, 0xda, 0x7e, 0x5b, 0xe9, 0xd4, 0xfa, 0x7a,
- 0xf7, 0xbf, 0xeb, 0x74, 0x4d, 0xc7, 0xa3, 0x33, 0xc9, 0x1c, 0x14, 0xdf, 0xff, 0xf1, 0x72, 0x8f,
- 0xe4, 0xb4, 0x6f, 0x8b, 0x15, 0xa4, 0x16, 0xf5, 0xbf, 0x0b, 0x00, 0x19, 0x0d, 0xbf, 0x84, 0x9a,
- 0x6b, 0xdf, 0x50, 0x97, 0x59, 0x21, 0x5d, 0x32, 0x0d, 0xb5, 0x95, 0x4e, 0x9d, 0x40, 0x04, 0x11,
- 0xba, 0x64, 0xf8, 0x1b, 0x28, 0x33, 0xdb, 0x0b, 0x5c, 0xca, 0xb4, 0x82, 0x2c, 0xde, 0x7a, 0xac,
- 0xf8, 0x4c, 0xd2, 0xe2, 0xc2, 0x89, 0x08, 0x5f, 0x02, 0xac, 0x1d, 0xc6, 0xfd, 0x55, 0x68, 0x7b,
- 0x4c, 0x53, 0x64, 0x8a, 0x57, 0x8f, 0xa5, 0x18, 0x26, 0xcc, 0xc4, 0x7e, 0x26, 0xc5, 0xe7, 0x50,
- 0xa5, 0x3f, 0x53, 0x2f, 0x70, 0xed, 0x30, 0x6a, 0x52, 0xad, 0xdf, 0x7e, 0x2c, 0x8f, 0x11, 0x13,
- 0xe3, 0x34, 0x99, 0x10, 0x0f, 0xa0, 0xe2, 0x51, 0x6e, 0x2f, 0x6c, 0x6e, 0x6b, 0xfb, 0x6d, 0xf4,
- 0x54, 0x92, 0x71, 0xcc, 0x8b, 0x93, 0xa4, 0x3a, 0xfc, 0x1a, 0x9a, 0xf3, 0x90, 0xda, 0x9c, 0x2e,
- 0x2c, 0xd9, 0x5e, 0x6e, 0x7b, 0x81, 0x56, 0x6a, 0xa3, 0x8e, 0x42, 0xd4, 0x38, 0x60, 0x26, 0xb8,
- 0x6e, 0x41, 0x25, 0x71, 0xf3, 0xe1, 0x66, 0x1f, 0xc2, 0xfe, 0xce, 0x76, 0xb7, 0x54, 0x2b, 0xb4,
- 0x51, 0x07, 0x91, 0xe8, 0x82, 0x3f, 0x81, 0x6a, 0x56, 0x47, 0x91, 0x75, 0x32, 0x40, 0x7f, 0x03,
- 0xa5, 0xa8, 0xf3, 0x99, 0x1a, 0x3d, 0xaa, 0x2e, 0x3c, 0x54, 0xff, 0x55, 0x80, 0x4a, 0xf2, 0x43,
- 0xf1, 0xb7, 0x50, 0x14, 0xd3, 0x2c, 0xf5, 0x8d, 0xfe, 0xeb, 0x0f, 0x35, 0x46, 0x1c, 0x42, 0x67,
- 0x6e, 0xde, 0x06, 0x94, 0x48, 0x21, 0x7e, 0x01, 0x95, 0x35, 0x75, 0x03, 0xf1, 0xf3, 0xa4, 0xd1,
- 0x3a, 0x29, 0x8b, 0x3b, 0xa1, 0x4b, 0x11, 0xda, 0x6e, 0x1c, 0x2e, 0x43, 0xc5, 0x28, 0x24, 0xee,
- 0x84, 0x2e, 0xf5, 0xdf, 0x11, 0x40, 0x96, 0x0a, 0x7f, 0x0c, 0xcf, 0xc7, 0x86, 0x49, 0x46, 0x67,
- 0x96, 0x79, 0xfd, 0xce, 0xb0, 0xae, 0x26, 0xb3, 0x77, 0xc6, 0xd9, 0xe8, 0x62, 0x64, 0x9c, 0xab,
- 0x7b, 0xf8, 0x39, 0x7c, 0x94, 0x0f, 0x9e, 0x4d, 0xaf, 0x26, 0xa6, 0x41, 0x54, 0x84, 0x8f, 0xa0,
- 0x99, 0x0f, 0x5c, 0x9e, 0x5e, 0x5d, 0x1a, 0x6a, 0x01, 0xbf, 0x80, 0xa3, 0x3c, 0x3c, 0x1c, 0xcd,
- 0xcc, 0xe9, 0x25, 0x39, 0x1d, 0xab, 0x0a, 0x6e, 0xc1, 0xc9, 0xbf, 0x14, 0x59, 0xbc, 0xf8, 0xb0,
- 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x25, 0xd7, 0xea, 0x3e, 0x3e, 0x04, 0x35, 0x1f, 0x18, 0x4d, 0x2e,
- 0xa6, 0x6a, 0x09, 0x6b, 0x70, 0x78, 0x8f, 0x6e, 0x9e, 0x9a, 0xc6, 0xcc, 0x30, 0xd5, 0xb2, 0xfe,
- 0x6b, 0x09, 0xaa, 0xe9, 0x64, 0xe3, 0x4f, 0xa1, 0x3a, 0xf7, 0xb7, 0x1b, 0x6e, 0x39, 0x1b, 0x2e,
- 0x3b, 0x5d, 0x1c, 0xee, 0x91, 0x8a, 0x84, 0x46, 0x1b, 0x8e, 0x5f, 0x41, 0x2d, 0x0a, 0x2f, 0x5d,
- 0xdf, 0xe6, 0xd1, 0x20, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x42, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x3d,
- 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x43, 0x89, 0xcd, 0xd7, 0xd4, 0xb3, 0x65, 0x6b, 0x9b, 0x24,
- 0xbe, 0xe1, 0xcf, 0xa1, 0xf1, 0x0b, 0x0d, 0x7d, 0x8b, 0xaf, 0x43, 0xca, 0xd6, 0xbe, 0xbb, 0x90,
- 0x33, 0x8f, 0x48, 0x5d, 0xa0, 0x66, 0x02, 0xe2, 0x2f, 0x62, 0x5a, 0xe6, 0xab, 0x24, 0x7d, 0x21,
- 0x72, 0x20, 0xf0, 0xb3, 0xc4, 0xdb, 0x57, 0xa0, 0xe6, 0x78, 0x91, 0xc1, 0xb2, 0x34, 0x88, 0x48,
- 0x23, 0x65, 0x46, 0x26, 0xa7, 0xd0, 0xd8, 0xd0, 0x95, 0xcd, 0x9d, 0x1d, 0xb5, 0x58, 0x60, 0x6f,
- 0x98, 0x56, 0x79, 0xfa, 0xed, 0x1a, 0x6c, 0xe7, 0x3f, 0x50, 0x3e, 0x0b, 0xec, 0x4d, 0xbc, 0x70,
- 0xf5, 0x44, 0x2f, 0x30, 0x86, 0xbf, 0x84, 0x67, 0x69, 0xc2, 0x05, 0x75, 0xb9, 0xcd, 0xb4, 0x6a,
- 0x5b, 0xe9, 0x60, 0x92, 0xd6, 0x39, 0x97, 0xe8, 0x3d, 0xa2, 0x74, 0xca, 0x34, 0x68, 0x2b, 0x1d,
- 0x94, 0x11, 0xa5, 0x4d, 0x26, 0x2c, 0x06, 0x3e, 0x73, 0x72, 0x16, 0x6b, 0xff, 0xd7, 0x62, 0xa2,
- 0x4f, 0x2d, 0xa6, 0x09, 0x63, 0x8b, 0x07, 0x91, 0xc5, 0x04, 0xce, 0x2c, 0xa6, 0xc4, 0xd8, 0x62,
- 0x3d, 0xb2, 0x98, 0xc0, 0xb1, 0xc5, 0xb7, 0x00, 0x21, 0x65, 0x94, 0x5b, 0x6b, 0xf1, 0x55, 0x1a,
- 0x4f, 0xef, 0x65, 0x3a, 0x63, 0x5d, 0x22, 0x34, 0x43, 0x67, 0xc3, 0x49, 0x35, 0x4c, 0x8e, 0xf7,
- 0x1f, 0x82, 0x67, 0x0f, 0x1e, 0x02, 0xfc, 0x19, 0xd4, 0xe7, 0x5b, 0xc6, 0x7d, 0xcf, 0x92, 0xcf,
- 0x06, 0xd3, 0x54, 0x69, 0xe8, 0x20, 0x02, 0xbf, 0x93, 0x98, 0xbe, 0x80, 0x6a, 0x9a, 0x1a, 0x9f,
- 0xc0, 0x31, 0x11, 0x13, 0x6e, 0x0d, 0x47, 0x13, 0xf3, 0xc1, 0x9a, 0x62, 0x68, 0xe4, 0x62, 0xd7,
- 0xc6, 0x4c, 0x45, 0xb8, 0x09, 0xf5, 0x1c, 0x36, 0x99, 0xaa, 0x05, 0xb1, 0x49, 0x39, 0x28, 0xda,
- 0x59, 0x65, 0x50, 0x86, 0x7d, 0xd9, 0x94, 0xc1, 0x01, 0x40, 0x36, 0x6f, 0xfa, 0x1b, 0x80, 0xec,
- 0x03, 0x88, 0x91, 0xf7, 0x97, 0x4b, 0x46, 0xa3, 0x1d, 0x6a, 0x92, 0xf8, 0x26, 0x70, 0x97, 0x6e,
- 0x56, 0x7c, 0x2d, 0x57, 0xa7, 0x4e, 0xe2, 0xdb, 0xe0, 0xe8, 0xfd, 0x5d, 0x0b, 0xfd, 0x76, 0xd7,
- 0x42, 0x7f, 0xde, 0xb5, 0xd0, 0xf7, 0x65, 0xd9, 0xb4, 0x5d, 0xff, 0xa6, 0x24, 0xff, 0x8a, 0xbf,
- 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xfc, 0x93, 0x1c, 0xde, 0x07, 0x00, 0x00,
+ 0x14, 0xed, 0xc4, 0xf9, 0xbc, 0x69, 0xb2, 0xce, 0xd0, 0x76, 0xbd, 0x05, 0xb2, 0xd9, 0x20, 0x20,
+ 0x02, 0x29, 0x91, 0xc2, 0x2b, 0x02, 0x35, 0xad, 0xdb, 0xa4, 0x52, 0x92, 0xd5, 0xc4, 0x45, 0x2a,
+ 0x2f, 0x96, 0x9b, 0x4e, 0x12, 0x0b, 0x3b, 0x36, 0x9e, 0x49, 0xa0, 0xfc, 0x40, 0xb4, 0x8f, 0xfc,
+ 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0xd0, 0x76, 0xb5, 0x6f, 0x33, 0xe7, 0x9e,
+ 0x73, 0xef, 0xc9, 0xf5, 0xbd, 0x13, 0x68, 0xdb, 0x5e, 0xcf, 0x0f, 0x3c, 0x97, 0xf2, 0x15, 0xdd,
+ 0xb0, 0xde, 0x2f, 0x81, 0xcd, 0x69, 0x6f, 0xdb, 0xef, 0xf1, 0x3b, 0x9f, 0xb2, 0xae, 0x1f, 0x78,
+ 0xdc, 0xc3, 0x47, 0xb6, 0xd7, 0x4d, 0x39, 0x5d, 0xc9, 0xe9, 0x6e, 0xfb, 0xc7, 0x07, 0x4b, 0x6f,
+ 0xe9, 0x49, 0x4a, 0x4f, 0x9c, 0x42, 0x76, 0x9b, 0x41, 0x89, 0xd0, 0x9f, 0x37, 0x94, 0x71, 0xac,
+ 0x41, 0x89, 0xdd, 0xb9, 0x37, 0x9e, 0xc3, 0xb4, 0x7c, 0x4b, 0xe9, 0x54, 0x48, 0x7c, 0xc5, 0x43,
+ 0x00, 0x6e, 0xbb, 0x94, 0xd1, 0xc0, 0xa6, 0x4c, 0x2b, 0xb4, 0x94, 0x4e, 0xb5, 0xdf, 0xee, 0x3e,
+ 0x5e, 0xa7, 0x6b, 0xd8, 0x2e, 0x9d, 0x49, 0xe6, 0x20, 0xff, 0xee, 0xaf, 0xd7, 0x7b, 0x24, 0xa3,
+ 0xbd, 0xcc, 0x97, 0x91, 0x9a, 0x6f, 0xff, 0x9e, 0x03, 0x48, 0x69, 0xf8, 0x35, 0x54, 0x1d, 0xeb,
+ 0x86, 0x3a, 0xcc, 0x0c, 0xe8, 0x82, 0x69, 0xa8, 0xa5, 0x74, 0x6a, 0x04, 0x42, 0x88, 0xd0, 0x05,
+ 0xc3, 0xdf, 0x41, 0x89, 0x59, 0xae, 0xef, 0x50, 0xa6, 0xe5, 0x64, 0xf1, 0xe6, 0x53, 0xc5, 0x67,
+ 0x92, 0x16, 0x15, 0x8e, 0x45, 0xf8, 0x02, 0x60, 0x65, 0x33, 0xee, 0x2d, 0x03, 0xcb, 0x65, 0x9a,
+ 0x22, 0x53, 0xbc, 0x79, 0x2a, 0xc5, 0x30, 0x66, 0xc6, 0xf6, 0x53, 0x29, 0x3e, 0x83, 0x0a, 0xfd,
+ 0x95, 0xba, 0xbe, 0x63, 0x05, 0x61, 0x93, 0xaa, 0xfd, 0xd6, 0x53, 0x79, 0xf4, 0x88, 0x18, 0xa5,
+ 0x49, 0x85, 0x78, 0x00, 0x65, 0x97, 0x72, 0xeb, 0xd6, 0xe2, 0x96, 0x56, 0x68, 0xa1, 0xe7, 0x92,
+ 0x8c, 0x23, 0x5e, 0x94, 0x24, 0xd1, 0x5d, 0xe6, 0xcb, 0x45, 0xb5, 0xd4, 0x36, 0xa1, 0x1c, 0x97,
+ 0x79, 0x7f, 0x17, 0x0f, 0xa0, 0xb0, 0xb5, 0x9c, 0x0d, 0xd5, 0x72, 0x2d, 0xd4, 0x41, 0x24, 0xbc,
+ 0xe0, 0x4f, 0xa0, 0x22, 0xbf, 0x0f, 0xb7, 0x5c, 0x5f, 0x53, 0x5a, 0xa8, 0xa3, 0x90, 0x14, 0x68,
+ 0x53, 0x28, 0x86, 0x2d, 0x4d, 0xd5, 0xe8, 0x49, 0x75, 0x6e, 0x47, 0x8d, 0xbf, 0x84, 0x17, 0x8c,
+ 0x5b, 0x01, 0x37, 0x77, 0x2b, 0xd4, 0x25, 0x6c, 0x24, 0x65, 0xfe, 0xc9, 0x41, 0x39, 0xfe, 0xa9,
+ 0xf8, 0x7b, 0xc8, 0x8b, 0x79, 0x96, 0x85, 0xea, 0xfd, 0xaf, 0xdf, 0xd7, 0x1a, 0x71, 0x08, 0xec,
+ 0xb9, 0x71, 0xe7, 0x53, 0x22, 0x85, 0xf8, 0x15, 0x94, 0x57, 0xd4, 0xf1, 0x45, 0x1f, 0x64, 0xbd,
+ 0x1a, 0x29, 0x89, 0x3b, 0xa1, 0x0b, 0x11, 0xda, 0xac, 0x6d, 0x2e, 0x43, 0xf9, 0x30, 0x24, 0xee,
+ 0x84, 0x2e, 0xda, 0x7f, 0x22, 0x80, 0x34, 0x15, 0xfe, 0x18, 0x5e, 0x8e, 0x75, 0x83, 0x8c, 0x4e,
+ 0x4d, 0xe3, 0xfa, 0xad, 0x6e, 0x5e, 0x4d, 0x66, 0x6f, 0xf5, 0xd3, 0xd1, 0xf9, 0x48, 0x3f, 0x53,
+ 0xf7, 0xf0, 0x4b, 0xf8, 0x28, 0x1b, 0x3c, 0x9d, 0x5e, 0x4d, 0x0c, 0x9d, 0xa8, 0x08, 0x1f, 0x42,
+ 0x23, 0x1b, 0xb8, 0x38, 0xb9, 0xba, 0xd0, 0xd5, 0x1c, 0x7e, 0x05, 0x87, 0x59, 0x78, 0x38, 0x9a,
+ 0x19, 0xd3, 0x0b, 0x72, 0x32, 0x56, 0x15, 0xdc, 0x84, 0xe3, 0xff, 0x29, 0xd2, 0x78, 0x7e, 0xb7,
+ 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x21, 0xd7, 0x6a, 0x01, 0x1f, 0x80, 0x9a, 0x0d, 0x8c, 0x26, 0xe7,
+ 0x53, 0xb5, 0x88, 0x35, 0x38, 0x78, 0x40, 0x37, 0x4e, 0x0c, 0x7d, 0xa6, 0x1b, 0x6a, 0xa9, 0xfd,
+ 0x6f, 0x11, 0x2a, 0xc9, 0x6c, 0xe3, 0x4f, 0xa1, 0x32, 0xf7, 0x36, 0x6b, 0x6e, 0xda, 0x6b, 0x2e,
+ 0x3b, 0x9d, 0x1f, 0xee, 0x91, 0xb2, 0x84, 0x46, 0x6b, 0x8e, 0xdf, 0x40, 0x35, 0x0c, 0x2f, 0x1c,
+ 0xcf, 0xe2, 0xe1, 0xc4, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x5c, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x5c,
+ 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x41, 0x91, 0xcd, 0x57, 0xd4, 0xb5, 0x64, 0x6b, 0x1b, 0x24,
+ 0xba, 0xe1, 0xcf, 0xa1, 0xfe, 0x1b, 0x0d, 0x3c, 0x93, 0xaf, 0x02, 0xca, 0x56, 0x9e, 0x73, 0x2b,
+ 0xa7, 0x1e, 0x91, 0x9a, 0x40, 0x8d, 0x18, 0xc4, 0x5f, 0x44, 0xb4, 0xd4, 0x57, 0x51, 0xfa, 0x42,
+ 0x64, 0x5f, 0xe0, 0xa7, 0xb1, 0xb7, 0xaf, 0x40, 0xcd, 0xf0, 0x42, 0x83, 0x25, 0x69, 0x10, 0x91,
+ 0x7a, 0xc2, 0x0c, 0x4d, 0x4e, 0xa1, 0xbe, 0xa6, 0x4b, 0x8b, 0xdb, 0x5b, 0x6a, 0x32, 0xdf, 0x5a,
+ 0x33, 0xad, 0xfc, 0xfc, 0xeb, 0x35, 0xd8, 0xcc, 0x7f, 0xa2, 0x7c, 0xe6, 0x5b, 0xeb, 0x68, 0xe5,
+ 0x6a, 0xb1, 0x5e, 0x60, 0x4c, 0x8c, 0x74, 0x92, 0xf0, 0x96, 0x3a, 0xdc, 0x62, 0x5a, 0xa5, 0xa5,
+ 0x74, 0x30, 0x49, 0xea, 0x9c, 0x49, 0xf4, 0x01, 0x51, 0x3a, 0x65, 0x1a, 0xb4, 0x94, 0x0e, 0x4a,
+ 0x89, 0xd2, 0x26, 0x13, 0x16, 0x7d, 0x8f, 0xd9, 0x19, 0x8b, 0xd5, 0x0f, 0xb5, 0x18, 0xeb, 0x13,
+ 0x8b, 0x49, 0xc2, 0xc8, 0xe2, 0x7e, 0x68, 0x31, 0x86, 0x53, 0x8b, 0x09, 0x31, 0xb2, 0x58, 0x0b,
+ 0x2d, 0xc6, 0x70, 0x64, 0xf1, 0x12, 0x20, 0xa0, 0x8c, 0x72, 0x73, 0x25, 0xbe, 0x4a, 0xfd, 0xf9,
+ 0xbd, 0x4c, 0x66, 0xac, 0x4b, 0x84, 0x66, 0x68, 0xaf, 0x39, 0xa9, 0x04, 0xf1, 0xf1, 0xe1, 0x8b,
+ 0xf1, 0x62, 0xf7, 0xc5, 0xf8, 0x0c, 0x6a, 0xf3, 0x0d, 0xe3, 0x9e, 0x6b, 0xca, 0xf7, 0x85, 0x69,
+ 0xaa, 0x34, 0xb4, 0x1f, 0x82, 0x3f, 0x48, 0xec, 0xb1, 0x67, 0xa5, 0xf1, 0xe8, 0xb3, 0x72, 0x0b,
+ 0x95, 0xc4, 0x03, 0x3e, 0x86, 0x23, 0x22, 0x56, 0xc1, 0x1c, 0x8e, 0x26, 0xc6, 0xce, 0x3e, 0x63,
+ 0xa8, 0x67, 0x62, 0xd7, 0xfa, 0x4c, 0x45, 0xb8, 0x01, 0xb5, 0x0c, 0x36, 0x99, 0xaa, 0x39, 0xb1,
+ 0x72, 0x19, 0x28, 0x5c, 0x6e, 0x65, 0x50, 0x82, 0x82, 0xec, 0xde, 0x60, 0x1f, 0x20, 0x1d, 0xcc,
+ 0xf6, 0xb7, 0x00, 0xe9, 0x97, 0x12, 0xbb, 0xe1, 0x2d, 0x16, 0x8c, 0x86, 0xcb, 0xd6, 0x20, 0xd1,
+ 0x4d, 0xe0, 0x0e, 0x5d, 0x2f, 0xf9, 0x4a, 0xee, 0x58, 0x8d, 0x44, 0xb7, 0xc1, 0xe1, 0xbb, 0xfb,
+ 0x26, 0xfa, 0xe3, 0xbe, 0x89, 0xfe, 0xbe, 0x6f, 0xa2, 0x1f, 0x4b, 0xb2, 0xbb, 0xdb, 0xfe, 0x4d,
+ 0x51, 0xfe, 0x6b, 0x7f, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x62, 0x8f, 0x36, 0x4b, 0x09,
+ 0x08, 0x00, 0x00,
}
func (m *Request) Marshal() (dAtA []byte, err error) {
@@ -996,11 +1025,6 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.CreatedTimestamp != 0 {
- i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
- i--
- dAtA[i] = 0x30
- }
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -1154,6 +1178,11 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if m.StartTimestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.StartTimestamp))
+ i--
+ dAtA[i] = 0x18
+ }
if m.Timestamp != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
i--
@@ -1234,6 +1263,13 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if m.StartTimestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.StartTimestamp))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
+ }
if len(m.CustomValues) > 0 {
for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- {
f6 := math.Float64bits(float64(m.CustomValues[iNdEx]))
@@ -1535,9 +1571,6 @@ func (m *TimeSeries) Size() (n int) {
}
l = m.Metadata.Size()
n += 1 + l + sovTypes(uint64(l))
- if m.CreatedTimestamp != 0 {
- n += 1 + sovTypes(uint64(m.CreatedTimestamp))
- }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -1581,6 +1614,9 @@ func (m *Sample) Size() (n int) {
if m.Timestamp != 0 {
n += 1 + sovTypes(uint64(m.Timestamp))
}
+ if m.StartTimestamp != 0 {
+ n += 1 + sovTypes(uint64(m.StartTimestamp))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -1670,6 +1706,9 @@ func (m *Histogram) Size() (n int) {
if len(m.CustomValues) > 0 {
n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8
}
+ if m.StartTimestamp != 0 {
+ n += 2 + sovTypes(uint64(m.StartTimestamp))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -2093,25 +2132,6 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
- }
- m.CreatedTimestamp = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTypes
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CreatedTimestamp |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
@@ -2350,6 +2370,25 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
break
}
}
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType)
+ }
+ m.StartTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartTimestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
@@ -3038,6 +3077,25 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
} else {
return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType)
}
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType)
+ }
+ m.StartTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartTimestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto
index ff6c4936bb3..c1ae04d2068 100644
--- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto
+++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/write/v2/types.proto
@@ -14,6 +14,7 @@
// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
syntax = "proto3";
+
package io.prometheus.write.v2;
option go_package = "writev2";
@@ -27,6 +28,8 @@ import "gogoproto/gogo.proto";
// The canonical Content-Type request header value for this message is
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
//
+// Version: v2.0-rc.4
+//
// NOTE: gogoproto options might change in future for this file, they
// are not part of the spec proto (they only modify the generated Go code, not
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
@@ -59,7 +62,7 @@ message TimeSeries {
//
// Note that there might be multiple TimeSeries objects in the same
// Requests with the same labels e.g. for different exemplars, metadata
- // or created timestamp.
+ // or start timestamp.
repeated uint32 labels_refs = 1;
// Timeseries messages can either specify samples or (native) histogram samples
@@ -76,23 +79,9 @@ message TimeSeries {
// metadata represents the metadata associated with the given series' samples.
Metadata metadata = 5 [(gogoproto.nullable) = false];
- // created_timestamp represents an optional created timestamp associated with
- // this series' samples in ms format, typically for counter or histogram type
- // metrics. Created timestamp represents the time when the counter started
- // counting (sometimes referred to as start timestamp), which can increase
- // the accuracy of query results.
- //
- // Note that some receivers might require this and in return fail to
- // ingest such samples within the Request.
- //
- // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
- // for conversion from/to time.Time to Prometheus timestamp.
- //
- // Note that the "optional" keyword is omitted due to
- // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
- // Zero value means value not set. If you need to use exactly zero value for
- // the timestamp, use 1 millisecond before or after.
- int64 created_timestamp = 6;
+ // This field is reserved for backward compatibility with the deprecated fields;
+ // previously present in the experimental remote write period.
+ reserved 6;
}
// Exemplar is an additional information attached to some series' samples.
@@ -123,6 +112,26 @@ message Sample {
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
int64 timestamp = 2;
+ // start_timestamp represents an optional start timestamp for the sample,
+ // in ms format. This information is typically used for counter, histogram (cumulative)
+ // or delta type metrics.
+ //
+ // For cumulative metrics, the start timestamp represents the time when the
+ // counter started counting (sometimes referred to as start timestamp), which
+ // can increase the accuracy of certain processing and query semantics (e.g. rates).
+ //
+ // Note:
+ // * That some receivers might require start timestamps for certain metric
+ // types; rejecting such samples within the Request as a result.
+ // * start timestamp is the same as "created timestamp" name Prometheus used in the past.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to efficiency and consistency.
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ int64 start_timestamp = 3;
}
// Metadata represents the metadata associated with the given series' samples.
@@ -148,12 +157,11 @@ message Metadata {
uint32 unit_ref = 4;
}
-// A native histogram, also known as a sparse histogram.
-// Original design doc:
-// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
-// The appendix of this design doc also explains the concept of float
-// histograms. This Histogram message can represent both, the usual
-// integer histogram as well as a float histogram.
+// A native histogram message, supporting
+// * sparse exponential bucketing, custom bucketing.
+// * float or integer histograms.
+//
+// See the full spec: https://prometheus.io/docs/specs/native_histograms/
message Histogram {
enum ResetHint {
RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
@@ -242,6 +250,24 @@ message Histogram {
// The last element is not only the upper inclusive bound of the last regular
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
repeated double custom_values = 16;
+
+ // start_timestamp represents an optional start timestamp for the histogram sample,
+ // in ms format. The start timestamp represents the time when the histogram
+ // started counting, which can increase the accuracy of certain processing and
+ // query semantics (e.g. rates).
+ //
+ // Note:
+ // * That some receivers might require start timestamps for certain metric
+ // types; rejecting such samples within the Request as a result.
+ // * start timestamp is the same as "created timestamp" name Prometheus used in the past.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to efficiency and consistency.
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ int64 start_timestamp = 17;
}
// A BucketSpan defines a number of consecutive buckets with their
diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go
index 3cdf299dffc..75fc9b05d34 100644
--- a/vendor/github.com/prometheus/prometheus/promql/engine.go
+++ b/vendor/github.com/prometheus/prometheus/promql/engine.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"log/slog"
+ "maps"
"math"
"reflect"
"runtime"
@@ -124,6 +125,8 @@ var _ QueryLogger = (*logging.JSONFileLogger)(nil)
// QueryLogger is an interface that can be used to log all the queries logged
// by the engine.
+// logging.JSONFileLogger implements this interface, downstream users may use
+// different implementations.
type QueryLogger interface {
slog.Handler
io.Closer
@@ -633,7 +636,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
logger := slog.New(l)
f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append.
- params := make(map[string]interface{}, 4)
+ params := make(map[string]any, 4)
params["query"] = q.q
if eq, ok := q.Statement().(*parser.EvalStmt); ok {
params["start"] = formatDate(eq.Start)
@@ -650,7 +653,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
f = append(f, slog.Any("spanID", span.SpanContext().SpanID()))
}
if origin := ctx.Value(QueryOrigin{}); origin != nil {
- for k, v := range origin.(map[string]interface{}) {
+ for k, v := range origin.(map[string]any) {
f = append(f, slog.Any(k, v))
}
}
@@ -926,13 +929,27 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
// because wo want to exclude samples that are precisely the
// lookback delta before the eval time.
start -= durationMilliseconds(s.LookbackDelta) - 1
+ if n.Smoothed {
+ end += durationMilliseconds(s.LookbackDelta)
+ }
} else {
- // For all matrix queries we want to ensure that we have
- // (end-start) + range selected this way we have `range` data
- // before the start time. We subtract one from the range to
- // exclude samples positioned directly at the lower boundary of
- // the range.
- start -= durationMilliseconds(evalRange) - 1
+ // For matrix queries, adjust the start and end times to ensure the
+ // correct range of data is selected. For "anchored" selectors, extend
+ // the start time backwards by the lookback delta plus the evaluation
+ // range. For "smoothed" selectors, extend both the start and end times
+ // by the lookback delta, and also extend the start time by the
+ // evaluation range to cover the smoothing window. For standard range
+ // queries, extend the start time backwards by the range (minus one
+ // millisecond) to exclude samples exactly at the lower boundary.
+ switch {
+ case n.Anchored:
+ start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
+ case n.Smoothed:
+ start -= durationMilliseconds(s.LookbackDelta+evalRange) - 1
+ end += durationMilliseconds(s.LookbackDelta)
+ default:
+ start -= durationMilliseconds(evalRange) - 1
+ }
}
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
@@ -979,7 +996,6 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
evalRange = 0
hints.By, hints.Grouping = extractGroupsFromPath(path)
n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...)
-
case *parser.MatrixSelector:
evalRange = n.Range
}
@@ -1082,12 +1098,12 @@ type evaluator struct {
}
// errorf causes a panic with the input formatted into an error.
-func (ev *evaluator) errorf(format string, args ...interface{}) {
+func (ev *evaluator) errorf(format string, args ...any) {
ev.error(fmt.Errorf(format, args...))
}
// error causes a panic with the given error.
-func (ev *evaluator) error(err error) {
+func (*evaluator) error(err error) {
panic(err)
}
@@ -1190,7 +1206,11 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
sample.Metric.Get(model.BucketLabel), 64,
)
if err != nil {
- annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), arg.PositionRange()))
+ if enh.enableDelayedNameRemoval {
+ annos.Add(annotations.NewBadBucketLabelWarning(sample.Metric.Get(labels.MetricName), sample.Metric.Get(model.BucketLabel), arg.PositionRange()))
+ } else {
+ annos.Add(annotations.NewBadBucketLabelWarning("", sample.Metric.Get(model.BucketLabel), arg.PositionRange()))
+ }
continue
}
enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel)
@@ -1213,7 +1233,11 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
// At this data point, we have classic histogram
// buckets and a native histogram with the same name and
// labels. Do not evaluate anything.
- annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange()))
+ if enh.enableDelayedNameRemoval {
+ annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange()))
+ } else {
+ annos.Add(annotations.NewMixedClassicNativeHistogramsWarning("", arg.PositionRange()))
+ }
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
enh.nativeHistogramSamples[idx].H = nil
continue
@@ -1516,6 +1540,76 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
return result, annos
}
+// smoothSeries is a helper function that smooths the series by interpolating the values
+// based on values before and after the timestamp.
+func (ev *evaluator) smoothSeries(series []storage.Series, offset time.Duration) Matrix {
+ dur := ev.endTimestamp - ev.startTimestamp
+
+ it := storage.NewBuffer(dur + 2*durationMilliseconds(ev.lookbackDelta))
+
+ offMS := offset.Milliseconds()
+ start := ev.startTimestamp - offMS
+ end := ev.endTimestamp - offMS
+ step := ev.interval
+ lb := durationMilliseconds(ev.lookbackDelta)
+
+ var chkIter chunkenc.Iterator
+ mat := make(Matrix, 0, len(series))
+
+ for _, s := range series {
+ ss := Series{Metric: s.Labels()}
+
+ chkIter = s.Iterator(chkIter)
+ it.Reset(chkIter)
+
+ var floats []FPoint
+ var hists []HPoint
+
+ for ts := start; ts <= end; ts += step {
+ matrixStart := ts - lb
+ matrixEnd := ts + lb
+
+ floats, hists = ev.matrixIterSlice(it, matrixStart, matrixEnd, floats, hists)
+ if len(floats) == 0 && len(hists) == 0 {
+ continue
+ }
+
+ if len(hists) > 0 {
+ // TODO: support native histograms.
+ ev.errorf("smoothed and anchored modifiers do not work with native histograms")
+ }
+
+ // Binary search for the first index with T >= ts.
+ i := sort.Search(len(floats), func(i int) bool { return floats[i].T >= ts })
+
+ switch {
+ case i < len(floats) && floats[i].T == ts:
+ // Exact match.
+ ss.Floats = append(ss.Floats, floats[i])
+
+ case i > 0 && i < len(floats):
+ // Interpolate between prev and next.
+ // TODO: detect if the sample is a counter, based on __type__ or metadata.
+ prev, next := floats[i-1], floats[i]
+ val := interpolate(prev, next, ts, false, false)
+ ss.Floats = append(ss.Floats, FPoint{F: val, T: ts})
+
+ case i > 0:
+ // No next point yet; carry forward previous value.
+ prev := floats[i-1]
+ ss.Floats = append(ss.Floats, FPoint{F: prev.F, T: ts})
+
+ default:
+ // i == 0 and floats[0].T > ts: there is no previous data yet; skip.
+ }
+ }
+
+ mat = append(mat, ss)
+ }
+
+ return mat
+}
+
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp,
// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series.
@@ -1670,13 +1764,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
sortedGrouping := e.Grouping
slices.Sort(sortedGrouping)
- unwrapParenExpr(&e.Param)
- param := unwrapStepInvariantExpr(e.Param)
- unwrapParenExpr(¶m)
-
if e.Op == parser.COUNT_VALUES {
- valueLabel := param.(*parser.StringLiteral)
- if !model.LabelName(valueLabel.Val).IsValid() {
+ valueLabel := e.Param.(*parser.StringLiteral)
+ if !model.UTF8Validation.IsValidLabelName(valueLabel.Val) {
ev.errorf("invalid label name %s", valueLabel)
}
if !e.Without {
@@ -1690,8 +1780,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
var warnings annotations.Annotations
originalNumSamples := ev.currentSamples
- // param is the number k for topk/bottomk, or q for quantile.
- fp, ws := newFParams(ctx, ev, param)
+ // e.Param is the number k for topk/bottomk, or q for quantile.
+ fp, ws := newFParams(ctx, ev, e.Param)
warnings.Merge(ws)
// Now fetch the data to be aggregated.
val, ws := ev.eval(ctx, e.Expr)
@@ -1711,9 +1801,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
// Matrix evaluation always returns the evaluation time,
// so this function needs special handling when given
// a vector selector.
- unwrapParenExpr(&e.Args[0])
arg := unwrapStepInvariantExpr(e.Args[0])
- unwrapParenExpr(&arg)
vs, ok := arg.(*parser.VectorSelector)
if ok {
return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e)
@@ -1727,9 +1815,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
warnings annotations.Annotations
)
for i := range e.Args {
- unwrapParenExpr(&e.Args[i])
- a := unwrapStepInvariantExpr(e.Args[i])
- unwrapParenExpr(&a)
+ a := e.Args[i]
if _, ok := a.(*parser.MatrixSelector); ok {
matrixArgIndex = i
matrixArg = true
@@ -1780,12 +1866,21 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
}
}
- unwrapParenExpr(&e.Args[matrixArgIndex])
- arg := unwrapStepInvariantExpr(e.Args[matrixArgIndex])
- unwrapParenExpr(&arg)
+ arg := e.Args[matrixArgIndex]
sel := arg.(*parser.MatrixSelector)
selVS := sel.VectorSelector.(*parser.VectorSelector)
+ switch {
+ case selVS.Anchored:
+ if _, ok := AnchoredSafeFunctions[e.Func.Name]; !ok {
+ ev.errorf("anchored modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(AnchoredSafeFunctions)), ", "), e.Func.Name)
+ }
+ case selVS.Smoothed:
+ if _, ok := SmoothedSafeFunctions[e.Func.Name]; !ok {
+ ev.errorf("smoothed modifier can only be used with: %s - not with %s", strings.Join(slices.Sorted(maps.Keys(SmoothedSafeFunctions)), ", "), e.Func.Name)
+ }
+ }
+
ws, err := checkAndExpandSeriesSet(ctx, sel)
warnings.Merge(ws)
if err != nil {
@@ -1794,10 +1889,17 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
offset := durationMilliseconds(selVS.Offset)
selRange := durationMilliseconds(sel.Range)
- stepRange := selRange
- if stepRange > ev.interval {
- stepRange = ev.interval
+
+ var stepRange int64
+ switch {
+ case selVS.Anchored:
+ stepRange = min(selRange+durationMilliseconds(ev.lookbackDelta), ev.interval)
+ case selVS.Smoothed:
+ stepRange = min(selRange+durationMilliseconds(2*ev.lookbackDelta), ev.interval)
+ default:
+ stepRange = min(selRange, ev.interval)
}
+
// Reuse objects across steps to save memory allocations.
var floats []FPoint
var histograms []HPoint
@@ -1805,14 +1907,25 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
inMatrix := make(Matrix, 1)
enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
// Process all the calls for one time series at a time.
- it := storage.NewBuffer(selRange)
+ // For anchored and smoothed selectors, we need to iterate over a
+ // larger range than the query range to account for the lookback delta.
+ // For standard range queries, we iterate over the query range.
+ bufferRange := selRange
+ switch {
+ case selVS.Anchored:
+ bufferRange += durationMilliseconds(ev.lookbackDelta)
+ case selVS.Smoothed:
+ bufferRange += durationMilliseconds(2 * ev.lookbackDelta)
+ }
+
+ it := storage.NewBuffer(bufferRange)
var chkIter chunkenc.Iterator
- // The last_over_time function acts like offset; thus, it
- // should keep the metric name. For all the other range
- // vector functions, the only change needed is to drop the
- // metric name in the output.
- dropName := e.Func.Name != "last_over_time"
+ // The last_over_time and first_over_time functions act like
+ // offset; thus, they should keep the metric name. For all the
+ // other range vector functions, the only change needed is to
+ // drop the metric name in the output.
+ dropName := (e.Func.Name != "last_over_time" && e.Func.Name != "first_over_time")
vectorVals := make([]Vector, len(e.Args)-1)
for i, s := range selVS.Series {
if err := contextDone(ctx, "expression evaluation"); err != nil {
@@ -1854,11 +1967,24 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if ts == ev.startTimestamp || selVS.Timestamp == nil {
maxt := ts - offset
mint := maxt - selRange
+ switch {
+ case selVS.Anchored:
+ mint -= durationMilliseconds(ev.lookbackDelta)
+ case selVS.Smoothed:
+ mint -= durationMilliseconds(ev.lookbackDelta)
+ maxt += durationMilliseconds(ev.lookbackDelta)
+ }
floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms)
}
if len(floats)+len(histograms) == 0 {
continue
}
+ if selVS.Anchored || selVS.Smoothed {
+ if len(histograms) > 0 {
+ // TODO: support native histograms.
+ ev.errorf("smoothed and anchored modifiers do not work with native histograms")
+ }
+ }
inMatrix[0].Floats = floats
inMatrix[0].Histograms = histograms
enh.Ts = ts
@@ -2057,6 +2183,10 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
}
+ if e.Smoothed {
+ mat := ev.smoothSeries(e.Series, e.Offset)
+ return mat, ws
+ }
mat := ev.evalSeries(ctx, e.Series, e.Offset, false)
return mat, ws
@@ -2108,11 +2238,6 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples)
return res, ws
case *parser.StepInvariantExpr:
- switch ce := e.Expr.(type) {
- case *parser.StringLiteral, *parser.NumberLiteral:
- return ev.eval(ctx, ce)
- }
-
newEv := &evaluator{
startTimestamp: ev.startTimestamp,
endTimestamp: ev.startTimestamp, // Always a single evaluation.
@@ -2185,7 +2310,7 @@ func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) {
if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 {
r = prevSS.Histograms[len(prevSS.Histograms):]
prevSS.Histograms = prevSS.Histograms[0:len(prevSS.Histograms):len(prevSS.Histograms)]
- return
+ return r
}
return getHPointSlice(numSteps)
@@ -2197,7 +2322,7 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) {
if prevSS != nil && cap(prevSS.Floats)-2*len(prevSS.Floats) > 0 {
r = prevSS.Floats[len(prevSS.Floats):]
prevSS.Floats = prevSS.Floats[0:len(prevSS.Floats):len(prevSS.Floats)]
- return
+ return r
}
return getFPointSlice(numSteps)
@@ -2358,10 +2483,23 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
offset = durationMilliseconds(vs.Offset)
maxt = ev.startTimestamp - offset
mint = maxt - durationMilliseconds(node.Range)
- matrix = make(Matrix, 0, len(vs.Series))
-
- it = storage.NewBuffer(durationMilliseconds(node.Range))
+ // matrixMint keeps the original mint for smoothed and anchored selectors.
+ matrixMint = mint
+ // matrixMaxt keeps the original maxt for smoothed and anchored selectors.
+ matrixMaxt = maxt
+ matrix = make(Matrix, 0, len(vs.Series))
+ bufferRange = durationMilliseconds(node.Range)
)
+ switch {
+ case vs.Anchored:
+ bufferRange += durationMilliseconds(ev.lookbackDelta)
+ mint -= durationMilliseconds(ev.lookbackDelta)
+ case vs.Smoothed:
+ bufferRange += 2 * durationMilliseconds(ev.lookbackDelta)
+ mint -= durationMilliseconds(ev.lookbackDelta)
+ maxt += durationMilliseconds(ev.lookbackDelta)
+ }
+ it := storage.NewBuffer(bufferRange)
ws, err := checkAndExpandSeriesSet(ctx, node)
if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
@@ -2380,6 +2518,18 @@ func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSele
}
ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil)
+ switch {
+ case vs.Anchored:
+ if ss.Histograms != nil {
+ ev.errorf("anchored modifier is not supported with histograms")
+ }
+ ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, false)
+ case vs.Smoothed:
+ if ss.Histograms != nil {
+ ev.errorf("anchored modifier is not supported with histograms")
+ }
+ ss.Floats = extendFloats(ss.Floats, matrixMint, matrixMaxt, true)
+ }
totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms))
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize)
@@ -2562,7 +2712,7 @@ loop:
return floats, histograms
}
-func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
+func (*evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
panic("set operations must only use many-to-many matching")
}
@@ -2586,7 +2736,7 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching,
return enh.Out
}
-func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
+func (*evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
switch {
case matching.Card != parser.CardManyToMany:
panic("set operations must only use many-to-many matching")
@@ -2613,7 +2763,7 @@ func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching,
return enh.Out
}
-func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
+func (*evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
panic("set operations must only use many-to-many matching")
}
@@ -2712,11 +2862,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
fl, fr = fr, fl
hl, hr = hr, hl
}
- floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos)
+ floatValue, histogramValue, keep, info, err := vectorElemBinop(op, fl, fr, hl, hr, pos)
if err != nil {
lastErr = err
continue
}
+ if info != nil {
+ lastErr = info
+ }
switch {
case returnBool:
histogramValue = nil
@@ -2836,8 +2989,8 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
lf, rf = rf, lf
lh, rh = rh, lh
}
- float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos)
- if err != nil {
+ float, histogram, keep, _, err := vectorElemBinop(op, lf, rf, lh, rh, pos)
+ if err != nil && !errors.Is(err, annotations.PromQLWarning) {
lastErr = err
continue
}
@@ -2904,83 +3057,98 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
}
// vectorElemBinop evaluates a binary operation between two Vector elements.
-func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) {
+func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (res float64, resH *histogram.FloatHistogram, keep bool, info, err error) {
opName := parser.ItemTypeStr[op]
switch {
case hlhs == nil && hrhs == nil:
{
switch op {
case parser.ADD:
- return lhs + rhs, nil, true, nil
+ return lhs + rhs, nil, true, nil, nil
case parser.SUB:
- return lhs - rhs, nil, true, nil
+ return lhs - rhs, nil, true, nil, nil
case parser.MUL:
- return lhs * rhs, nil, true, nil
+ return lhs * rhs, nil, true, nil, nil
case parser.DIV:
- return lhs / rhs, nil, true, nil
+ return lhs / rhs, nil, true, nil, nil
case parser.POW:
- return math.Pow(lhs, rhs), nil, true, nil
+ return math.Pow(lhs, rhs), nil, true, nil, nil
case parser.MOD:
- return math.Mod(lhs, rhs), nil, true, nil
+ return math.Mod(lhs, rhs), nil, true, nil, nil
case parser.EQLC:
- return lhs, nil, lhs == rhs, nil
+ return lhs, nil, lhs == rhs, nil, nil
case parser.NEQ:
- return lhs, nil, lhs != rhs, nil
+ return lhs, nil, lhs != rhs, nil, nil
case parser.GTR:
- return lhs, nil, lhs > rhs, nil
+ return lhs, nil, lhs > rhs, nil, nil
case parser.LSS:
- return lhs, nil, lhs < rhs, nil
+ return lhs, nil, lhs < rhs, nil, nil
case parser.GTE:
- return lhs, nil, lhs >= rhs, nil
+ return lhs, nil, lhs >= rhs, nil, nil
case parser.LTE:
- return lhs, nil, lhs <= rhs, nil
+ return lhs, nil, lhs <= rhs, nil, nil
case parser.ATAN2:
- return math.Atan2(lhs, rhs), nil, true, nil
+ return math.Atan2(lhs, rhs), nil, true, nil, nil
}
}
case hlhs == nil && hrhs != nil:
{
switch op {
case parser.MUL:
- return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil
+ return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil, nil
case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos)
+ return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos)
}
}
case hlhs != nil && hrhs == nil:
{
switch op {
case parser.MUL:
- return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil
+ return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil, nil
case parser.DIV:
- return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil
+ return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil
case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos)
+ return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos)
}
}
case hlhs != nil && hrhs != nil:
{
switch op {
case parser.ADD:
- res, err := hlhs.Copy().Add(hrhs)
+ res, counterResetCollision, nhcbBoundsReconciled, err := hlhs.Copy().Add(hrhs)
if err != nil {
- return 0, nil, false, err
+ return 0, nil, false, nil, err
}
- return 0, res.Compact(0), true, nil
+ if counterResetCollision {
+ err = annotations.NewHistogramCounterResetCollisionWarning(pos, annotations.HistogramAdd)
+ }
+ if nhcbBoundsReconciled {
+ info = annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramAdd)
+ }
+ return 0, res.Compact(0), true, info, err
case parser.SUB:
- res, err := hlhs.Copy().Sub(hrhs)
+ res, counterResetCollision, nhcbBoundsReconciled, err := hlhs.Copy().Sub(hrhs)
if err != nil {
- return 0, nil, false, err
+ return 0, nil, false, nil, err
+ }
+ // The result must be marked as gauge.
+ res.CounterResetHint = histogram.GaugeType
+ if counterResetCollision {
+ err = annotations.NewHistogramCounterResetCollisionWarning(pos, annotations.HistogramSub)
+ }
+ if nhcbBoundsReconciled {
+ info = annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramSub)
}
- return 0, res.Compact(0), true, nil
+
+ return 0, res.Compact(0), true, info, err
case parser.EQLC:
// This operation expects that both histograms are compacted.
- return 0, hlhs, hlhs.Equals(hrhs), nil
+ return 0, hlhs, hlhs.Equals(hrhs), nil, nil
case parser.NEQ:
// This operation expects that both histograms are compacted.
- return 0, hlhs, !hlhs.Equals(hrhs), nil
+ return 0, hlhs, !hlhs.Equals(hrhs), nil, nil
case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos)
+ return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos)
}
}
}
@@ -2999,9 +3167,11 @@ type groupedAggregation struct {
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
- incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
+ incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets.
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
incrementalMean bool // True after reverting to incremental calculation of the mean value.
+ counterResetSeen bool // Counter reset hint CounterReset seen. Currently only used for histogram samples.
+ notCounterResetSeen bool // Counter reset hint NotCounterReset seen. Currently only used for histogram samples.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@@ -3038,6 +3208,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
} else {
group.histogramValue = h.Copy()
group.hasHistogram = true
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ group.counterResetSeen = true
+ case histogram.NotCounterReset:
+ group.notCounterResetSeen = true
+ }
}
case parser.STDVAR, parser.STDDEV:
switch {
@@ -3085,11 +3261,20 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil {
group.hasHistogram = true
if group.histogramValue != nil {
- _, err := group.histogramValue.Add(h)
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ group.counterResetSeen = true
+ case histogram.NotCounterReset:
+ group.notCounterResetSeen = true
+ }
+ _, _, nhcbBoundsReconciled, err := group.histogramValue.Add(h)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
}
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
}
// Otherwise the aggregation contained floats
// previously and will be invalid anyway. No
@@ -3136,20 +3321,34 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil {
group.hasHistogram = true
if group.histogramValue != nil {
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ group.counterResetSeen = true
+ case histogram.NotCounterReset:
+ group.notCounterResetSeen = true
+ }
left := h.Copy().Div(group.groupCount)
right := group.histogramValue.Copy().Div(group.groupCount)
- toAdd, err := left.Sub(right)
+
+ toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
- _, err = group.histogramValue.Add(toAdd)
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
+
+ _, _, nhcbBoundsReconciled, err = group.histogramValue.Add(toAdd)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
}
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
}
// Otherwise the aggregation contained floats
// previously and will be invalid anyway. No
@@ -3281,10 +3480,18 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
default:
aggr.floatValue += aggr.floatKahanC
}
+
default:
// For other aggregations, we already have the right value.
}
+ // This only is relevant for AVG and SUM with native histograms
+ // involved, but since those booleans aren't touched in other
+ // cases, we can just do it here in general.
+ if aggr.counterResetSeen && aggr.notCounterResetSeen {
+ annos.Add(annotations.NewHistogramCounterResetCollisionWarning(e.Expr.PositionRange(), annotations.HistogramAgg))
+ }
+
ss := &outputMatrix[ri]
addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps)
ss.DropName = inputMatrix[ri].DropName
@@ -3328,10 +3535,7 @@ seriesLoop:
var r float64
switch op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
- k = int64(fParam)
- if k > int64(len(inputMatrix)) {
- k = int64(len(inputMatrix))
- }
+ k = min(int64(fParam), int64(len(inputMatrix)))
if k < 1 {
if enh.Ts != ev.endTimestamp {
advanceRemainingSeries(enh.Ts, si+1)
@@ -3523,7 +3727,7 @@ seriesLoop:
// aggregationCountValues evaluates count_values on vec.
// Outputs as many series per group as there are values in the input.
-func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+func (*evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
type groupCount struct {
labels labels.Labels
count int
@@ -3607,7 +3811,7 @@ func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, n
ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: h})
}
-func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) {
+func (*evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) {
switch {
case len(series.Floats) > 0 && series.Floats[0].T == ts:
f = series.Floats[0].F
@@ -3626,8 +3830,6 @@ func handleAggregationError(err error, e *parser.AggregateExpr, metricName strin
pos := e.Expr.PositionRange()
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
@@ -3642,7 +3844,7 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat
return annotations.New().Add(err)
}
// TODO(NeerajGartia21): Test the exact annotation output once the testing framework can do so.
- if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) || errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
+ if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return annotations.New().Add(annotations.NewIncompatibleBucketLayoutInBinOpWarning(op, pos))
}
return nil
@@ -3698,7 +3900,7 @@ func changesMetricSchema(op parser.ItemType) bool {
}
// NewOriginContext returns a new context with data about the origin attached.
-func NewOriginContext(ctx context.Context, data map[string]interface{}) context.Context {
+func NewOriginContext(ctx context.Context, data map[string]any) context.Context {
return context.WithValue(ctx, QueryOrigin{}, data)
}
@@ -3725,8 +3927,8 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
}
// PreprocessExpr wraps all possible step invariant parts of the given expression with
-// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions
-// into their numeric values.
+// StepInvariantExpr. It also resolves the preprocessors, evaluates duration expressions
+// into their numeric values and removes superfluous parenthesis on parameters to functions and aggregations.
func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) {
detectHistogramStatsDecoding(expr)
@@ -3734,18 +3936,20 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration)
return nil, err
}
- isStepInvariant := preprocessExprHelper(expr, start, end)
- if isStepInvariant {
+ _, shouldWrap := preprocessExprHelper(expr, start, end)
+ if shouldWrap {
return newStepInvariantExpr(expr), nil
}
return expr, nil
}
-// preprocessExprHelper wraps the child nodes of the expression
-// with a StepInvariantExpr wherever it's step invariant. The returned boolean is true if the
-// passed expression qualifies to be wrapped by StepInvariantExpr.
-// It also resolves the preprocessors.
-func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
+// preprocessExprHelper wraps child nodes of expr with a StepInvariantExpr,
+// at the highest level within the tree that is step-invariant.
+// Also resolves start() and end() on selector and subquery nodes.
+// Also remove superfluous parenthesis on parameters to functions and aggregations.
+// Return isStepInvariant is true when the whole subexpression is step invariant.
+// Return shoudlWrap is false for cases like MatrixSelector and StringLiteral that never need to be wrapped.
+func preprocessExprHelper(expr parser.Expr, start, end time.Time) (isStepInvariant, shouldWrap bool) {
switch n := expr.(type) {
case *parser.VectorSelector:
switch n.StartOrEnd {
@@ -3754,49 +3958,56 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
}
- return n.Timestamp != nil
+ return n.Timestamp != nil, n.Timestamp != nil
case *parser.AggregateExpr:
+ unwrapParenExpr(&n.Expr)
+ unwrapParenExpr(&n.Param)
return preprocessExprHelper(n.Expr, start, end)
case *parser.BinaryExpr:
- isInvariant1, isInvariant2 := preprocessExprHelper(n.LHS, start, end), preprocessExprHelper(n.RHS, start, end)
- if isInvariant1 && isInvariant2 {
- return true
+ isInvariantLHS, shouldWrapLHS := preprocessExprHelper(n.LHS, start, end)
+ isInvariantRHS, shouldWrapRHS := preprocessExprHelper(n.RHS, start, end)
+ if isInvariantLHS && isInvariantRHS {
+ return true, true
}
- if isInvariant1 {
+ if shouldWrapLHS {
n.LHS = newStepInvariantExpr(n.LHS)
}
- if isInvariant2 {
+ if shouldWrapRHS {
n.RHS = newStepInvariantExpr(n.RHS)
}
- return false
+ return false, false
case *parser.Call:
_, ok := AtModifierUnsafeFunctions[n.Func.Name]
isStepInvariant := !ok
- isStepInvariantSlice := make([]bool, len(n.Args))
+ shouldWrap := make([]bool, len(n.Args))
for i := range n.Args {
- isStepInvariantSlice[i] = preprocessExprHelper(n.Args[i], start, end)
- isStepInvariant = isStepInvariant && isStepInvariantSlice[i]
+ unwrapParenExpr(&n.Args[i])
+ var argIsStepInvariant bool
+ argIsStepInvariant, shouldWrap[i] = preprocessExprHelper(n.Args[i], start, end)
+ isStepInvariant = isStepInvariant && argIsStepInvariant
}
if isStepInvariant {
// The function and all arguments are step invariant.
- return true
+ return true, true
}
- for i, isi := range isStepInvariantSlice {
+ for i, isi := range shouldWrap {
if isi {
n.Args[i] = newStepInvariantExpr(n.Args[i])
}
}
- return false
+ return false, false
case *parser.MatrixSelector:
- return preprocessExprHelper(n.VectorSelector, start, end)
+ // We don't need to wrap a MatrixSelector because functions over range vectors evaluate those directly.
+ isStepInvariant, _ := preprocessExprHelper(n.VectorSelector, start, end)
+ return isStepInvariant, false
case *parser.SubqueryExpr:
// Since we adjust offset for the @ modifier evaluation,
@@ -3804,7 +4015,7 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
// Hence we wrap the inside of subquery irrespective of
// @ on subquery (given it is also step invariant) so that
// it is evaluated only once w.r.t. the start time of subquery.
- isInvariant := preprocessExprHelper(n.Expr, start, end)
+ isInvariant, _ := preprocessExprHelper(n.Expr, start, end)
if isInvariant {
n.Expr = newStepInvariantExpr(n.Expr)
}
@@ -3814,7 +4025,7 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
}
- return n.Timestamp != nil
+ return n.Timestamp != nil, n.Timestamp != nil
case *parser.ParenExpr:
return preprocessExprHelper(n.Expr, start, end)
@@ -3823,7 +4034,7 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
return preprocessExprHelper(n.Expr, start, end)
case *parser.StringLiteral, *parser.NumberLiteral:
- return true
+ return true, false
}
panic(fmt.Sprintf("found unexpected node %#v", expr))
@@ -3880,22 +4091,36 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
return nil
}
- for i := len(path) - 1; i > 0; i-- { // Walk backwards up the path.
+ pathLoop:
+ for i := len(path) - 1; i >= 0; i-- { // Walk backwards up the path.
+ if _, ok := path[i].(*parser.SubqueryExpr); ok {
+ // If we ever see a subquery in the path, we
+ // will not skip the buckets. We need the
+ // buckets for correct counter reset detection.
+ n.SkipHistogramBuckets = false
+ break pathLoop
+ }
call, ok := path[i].(*parser.Call)
if !ok {
- continue
+ continue pathLoop
}
switch call.Func.Name {
case "histogram_count", "histogram_sum", "histogram_avg":
+ // We allow skipping buckets preliminarily. But
+ // we will continue through the path to see if
+ // we find a subquery (or a histogram function)
+ // further up (the latter wouldn't make sense,
+ // but no harm in detecting it).
n.SkipHistogramBuckets = true
case "histogram_quantile", "histogram_fraction":
+ // If we ever see a function that needs the
+ // whole histogram, we will not skip the
+ // buckets.
n.SkipHistogramBuckets = false
- default:
- continue
+ break pathLoop
}
- break
}
- return errors.New("stop")
+ return nil
})
}
@@ -3922,7 +4147,7 @@ func NewHashRatioSampler() *HashRatioSampler {
return &HashRatioSampler{}
}
-func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 {
+func (*HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 {
const (
float64MaxUint64 = float64(math.MaxUint64)
)
@@ -4010,3 +4235,39 @@ func (ev *evaluator) gatherVector(ts int64, input Matrix, output Vector, bufHelp
return output, bufHelpers
}
+
+// extendFloats extends the floats to the given mint and maxt.
+// This function is used with matrix selectors that are smoothed or anchored.
+func extendFloats(floats []FPoint, mint, maxt int64, smoothed bool) []FPoint {
+ lastSampleIndex := len(floats) - 1
+
+ firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T > mint })-1)
+ if smoothed {
+ lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return floats[i].T >= maxt })
+ }
+
+ if floats[lastSampleIndex].T <= mint {
+ return []FPoint{}
+ }
+
+ // TODO: detect if the sample is a counter, based on __type__ or metadata.
+ left := pickOrInterpolateLeft(floats, firstSampleIndex, mint, smoothed, false)
+ right := pickOrInterpolateRight(floats, lastSampleIndex, maxt, smoothed, false)
+
+ // Filter out samples at boundaries or outside the range.
+ if floats[firstSampleIndex].T <= mint {
+ firstSampleIndex++
+ }
+ if floats[lastSampleIndex].T >= maxt {
+ lastSampleIndex--
+ }
+
+ // TODO: Preallocate the length of the new list.
+ out := make([]FPoint, 0)
+ // Create the new floats list with the boundary samples and the inner samples.
+ out = append(out, FPoint{T: mint, F: left})
+ out = append(out, floats[firstSampleIndex:lastSampleIndex+1]...)
+ out = append(out, FPoint{T: maxt, F: right})
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go
index 6b038fe336e..ca8cfdce154 100644
--- a/vendor/github.com/prometheus/prometheus/promql/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/functions.go
@@ -65,13 +65,127 @@ func funcTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeHelper) (
}}, nil
}
+// pickOrInterpolateLeft returns the value at the left boundary of the range.
+// If interpolation is needed (when smoothed is true and the first sample is before the range start),
+// it returns the interpolated value at the left boundary; otherwise, it returns the first sample's value.
+func pickOrInterpolateLeft(floats []FPoint, first int, rangeStart int64, smoothed, isCounter bool) float64 {
+ if smoothed && floats[first].T < rangeStart {
+ return interpolate(floats[first], floats[first+1], rangeStart, isCounter, true)
+ }
+ return floats[first].F
+}
+
+// pickOrInterpolateRight returns the value at the right boundary of the range.
+// If interpolation is needed (when smoothed is true and the last sample is after the range end),
+// it returns the interpolated value at the right boundary; otherwise, it returns the last sample's value.
+func pickOrInterpolateRight(floats []FPoint, last int, rangeEnd int64, smoothed, isCounter bool) float64 {
+ if smoothed && last > 0 && floats[last].T > rangeEnd {
+ return interpolate(floats[last-1], floats[last], rangeEnd, isCounter, false)
+ }
+ return floats[last].F
+}
+
+// interpolate performs linear interpolation between two points.
+// If isCounter is true and there is a counter reset:
+// - on the left edge, it sets the value to 0.
+// - on the right edge, it adds the left value to the right value.
+// It then calculates the interpolated value at the given timestamp.
+func interpolate(p1, p2 FPoint, t int64, isCounter, leftEdge bool) float64 {
+ y1 := p1.F
+ y2 := p2.F
+ if isCounter && y2 < y1 {
+ if leftEdge {
+ y1 = 0
+ } else {
+ y2 += y1
+ }
+ }
+
+ return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T)
+}
+
+// correctForCounterResets calculates the correction for counter resets.
+// This function is only used for extendedRate functions with smoothed or anchored rates.
+func correctForCounterResets(left, right float64, points []FPoint) float64 {
+ var correction float64
+ prev := left
+ for _, p := range points {
+ if p.F < prev {
+ correction += prev
+ }
+ prev = p.F
+ }
+ if right < prev {
+ correction += prev
+ }
+ return correction
+}
+
+// extendedRate is a utility function for anchored/smoothed rate/increase/delta.
+// It calculates the rate (allowing for counter resets if isCounter is true),
+// extrapolates if the first/last sample if needed, and returns
+// the result as either per-second (if isRate is true) or overall.
+func extendedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
+ var (
+ ms = args[0].(*parser.MatrixSelector)
+ vs = ms.VectorSelector.(*parser.VectorSelector)
+ samples = vals[0]
+ f = samples.Floats
+ lastSampleIndex = len(f) - 1
+ rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
+ rangeEnd = enh.Ts - durationMilliseconds(vs.Offset)
+ annos annotations.Annotations
+ smoothed = vs.Smoothed
+ )
+
+ firstSampleIndex := max(0, sort.Search(lastSampleIndex, func(i int) bool { return f[i].T > rangeStart })-1)
+ if smoothed {
+ lastSampleIndex = sort.Search(lastSampleIndex, func(i int) bool { return f[i].T >= rangeEnd })
+ }
+
+ if f[lastSampleIndex].T <= rangeStart {
+ return enh.Out, annos
+ }
+
+ left := pickOrInterpolateLeft(f, firstSampleIndex, rangeStart, smoothed, isCounter)
+ right := pickOrInterpolateRight(f, lastSampleIndex, rangeEnd, smoothed, isCounter)
+
+ resultFloat := right - left
+
+ if isCounter {
+ // We only need to consider samples exactly within the range
+ // for counter resets correction, as pickOrInterpolateLeft and
+ // pickOrInterpolateRight already handle the resets at boundaries.
+ if f[firstSampleIndex].T <= rangeStart {
+ firstSampleIndex++
+ }
+ if f[lastSampleIndex].T >= rangeEnd {
+ lastSampleIndex--
+ }
+
+ resultFloat += correctForCounterResets(left, right, f[firstSampleIndex:lastSampleIndex+1])
+ }
+ if isRate {
+ resultFloat /= ms.Range.Seconds()
+ }
+
+ return append(enh.Out, Sample{F: resultFloat}), annos
+}
+
// extrapolatedRate is a utility function for rate/increase/delta.
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall.
+//
+// Note: If the vector selector is smoothed or anchored, it will use the
+// extendedRate function instead.
func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) (Vector, annotations.Annotations) {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
+ if vs.Anchored || vs.Smoothed {
+ return extendedRate(vals, args, enh, isCounter, isRate)
+ }
+
var (
samples = vals[0]
rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
@@ -230,10 +344,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
- minSchema := prev.Schema
- if last.Schema < minSchema {
- minSchema = last.Schema
- }
+ minSchema := min(last.Schema, prev.Schema)
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.H
if curr == nil {
@@ -254,28 +365,34 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
}
h := last.CopyToSchema(minSchema)
- _, err := h.Sub(prev)
+ // This subtraction may deliberately include conflicting counter resets.
+ // Counter resets are treated explicitly in this function, so the
+ // information about conflicting counter resets is ignored here.
+ _, _, nhcbBoundsReconciled, err := h.Sub(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramSub))
+ }
if isCounter {
// Second iteration to deal with counter resets.
for _, currPoint := range points[1:] {
curr := currPoint.H
if curr.DetectReset(prev) {
- _, err := h.Add(prev)
+ // Counter reset conflict ignored here for the same reason as above.
+ _, _, nhcbBoundsReconciled, err := h.Add(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
}
}
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(pos, annotations.HistogramAdd))
+ }
}
prev = curr
}
@@ -283,7 +400,6 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, pos))
}
- h.CounterResetHint = histogram.GaugeType
return h.Compact(0), annos
}
@@ -390,11 +506,16 @@ func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool)
annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, args.PositionRange()))
}
if !isRate || !ss[1].H.DetectReset(ss[0].H) {
- _, err := resultSample.H.Sub(ss[0].H)
+ // This subtraction may deliberately include conflicting
+ // counter resets. Counter resets are treated explicitly
+ // in this function, so the information about
+ // conflicting counter resets is ignored here.
+ _, _, nhcbBoundsReconciled, err := resultSample.H.Sub(ss[0].H)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args.PositionRange()))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return out, annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args.PositionRange()))
+ }
+ if nhcbBoundsReconciled {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args.PositionRange(), annotations.HistogramSub))
}
}
resultSample.H.CounterResetHint = histogram.GaugeType
@@ -700,20 +821,51 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
// the current implementation is accurate enough for practical purposes.
if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms.
+ var annos annotations.Annotations
vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) {
+ var counterResetSeen, notCounterResetSeen, nhcbBoundsReconciledSeen bool
+
+ trackCounterReset := func(h *histogram.FloatHistogram) {
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ counterResetSeen = true
+ case histogram.NotCounterReset:
+ notCounterResetSeen = true
+ }
+ }
+
+ defer func() {
+ if counterResetSeen && notCounterResetSeen {
+ annos.Add(annotations.NewHistogramCounterResetCollisionWarning(args[0].PositionRange(), annotations.HistogramAgg))
+ }
+ if nhcbBoundsReconciledSeen {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args[0].PositionRange(), annotations.HistogramAgg))
+ }
+ }()
+
mean := s.Histograms[0].H.Copy()
+ trackCounterReset(mean)
for i, h := range s.Histograms[1:] {
+ trackCounterReset(h.H)
count := float64(i + 2)
left := h.H.Copy().Div(count)
right := mean.Copy().Div(count)
- toAdd, err := left.Sub(right)
+
+ toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
if err != nil {
return mean, err
}
- _, err = mean.Add(toAdd)
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
+
+ _, _, nhcbBoundsReconciled, err = mean.Add(toAdd)
if err != nil {
return mean, err
}
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
}
return mean, nil
})
@@ -721,11 +873,9 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
- return vec, nil
+ return vec, annos
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var (
@@ -767,6 +917,34 @@ func funcCountOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh
}), nil
}
+// === first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
+func funcFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ el := matrixVal[0]
+
+ var f FPoint
+ if len(el.Floats) > 0 {
+ f = el.Floats[0]
+ }
+
+ var h HPoint
+ if len(el.Histograms) > 0 {
+ h = el.Histograms[0]
+ }
+
+ // If a float data point exists and is older than any histogram data
+ // points, return it.
+ if h.H == nil || (len(el.Floats) > 0 && f.T < h.T) {
+ return append(enh.Out, Sample{
+ Metric: el.Metric,
+ F: f.F,
+ }), nil
+ }
+ return append(enh.Out, Sample{
+ Metric: el.Metric,
+ H: h.H.Copy(),
+ }), nil
+}
+
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
el := matrixVal[0]
@@ -818,6 +996,26 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
}), annos
}
+// === ts_of_first_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
+func funcTsOfFirstOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+ el := matrixVal[0]
+
+ var tf int64 = math.MaxInt64
+ if len(el.Floats) > 0 {
+ tf = el.Floats[0].T
+ }
+
+ var th int64 = math.MaxInt64
+ if len(el.Histograms) > 0 {
+ th = el.Histograms[0].T
+ }
+
+ return append(enh.Out, Sample{
+ Metric: el.Metric,
+ F: float64(min(tf, th)) / 1000,
+ }), nil
+}
+
// === ts_of_last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcTsOfLastOverTime(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
el := matrixVal[0]
@@ -902,13 +1100,39 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
}
if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms.
+ var annos annotations.Annotations
vec, err := aggrHistOverTime(matrixVal, enh, func(s Series) (*histogram.FloatHistogram, error) {
+ var counterResetSeen, notCounterResetSeen, nhcbBoundsReconciledSeen bool
+
+ trackCounterReset := func(h *histogram.FloatHistogram) {
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ counterResetSeen = true
+ case histogram.NotCounterReset:
+ notCounterResetSeen = true
+ }
+ }
+
+ defer func() {
+ if counterResetSeen && notCounterResetSeen {
+ annos.Add(annotations.NewHistogramCounterResetCollisionWarning(args[0].PositionRange(), annotations.HistogramAgg))
+ }
+ if nhcbBoundsReconciledSeen {
+ annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args[0].PositionRange(), annotations.HistogramAgg))
+ }
+ }()
+
sum := s.Histograms[0].H.Copy()
+ trackCounterReset(sum)
for _, h := range s.Histograms[1:] {
- _, err := sum.Add(h.H)
+ trackCounterReset(h.H)
+ _, _, nhcbBoundsReconciled, err := sum.Add(h.H)
if err != nil {
return sum, err
}
+ if nhcbBoundsReconciled {
+ nhcbBoundsReconciledSeen = true
+ }
}
return sum, nil
})
@@ -916,11 +1140,9 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
- } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
}
}
- return vec, nil
+ return vec, annos
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var sum, c float64
@@ -1018,7 +1240,7 @@ func funcAbsentOverTime(_ []Vector, _ Matrix, _ parser.Expressions, enh *EvalNod
// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcPresentOverTime(_ []Vector, matrixVals Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
- return aggrOverTime(matrixVals, enh, func(_ Series) float64 {
+ return aggrOverTime(matrixVals, enh, func(Series) float64 {
return 1
}), nil
}
@@ -1154,7 +1376,7 @@ func funcDeg(vectorVals []Vector, _ Matrix, _ parser.Expressions, enh *EvalNodeH
}
// === pi() Scalar ===
-func funcPi(_ []Vector, _ Matrix, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
+func funcPi([]Vector, Matrix, parser.Expressions, *EvalNodeHelper) (Vector, annotations.Annotations) {
return Vector{Sample{F: math.Pi}}, nil
}
@@ -1304,7 +1526,7 @@ func simpleHistogramFunc(vectorVals []Vector, enh *EvalNodeHelper, f func(h *his
for _, el := range vectorVals[0] {
if el.H != nil { // Process only histogram samples.
if !enh.enableDelayedNameRemoval {
- el.Metric = el.Metric.DropMetricName()
+ el.Metric = el.Metric.DropReserved(func(n string) bool { return n == labels.MetricName })
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
@@ -1463,7 +1685,11 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
if len(mb.buckets) > 0 {
res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets)
if forcedMonotonicity {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
+ if enh.enableDelayedNameRemoval {
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
+ } else {
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo("", args[1].PositionRange()))
+ }
}
if !enh.enableDelayedNameRemoval {
@@ -1481,8 +1707,21 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
return enh.Out, annos
}
+// pickFirstSampleIndex returns the index of the last sample before
+// or at the range start, or 0 if none exist before the range start.
+// If the vector selector is not anchored, it always returns 0.
+func pickFirstSampleIndex(floats []FPoint, args parser.Expressions, enh *EvalNodeHelper) int {
+ ms := args[0].(*parser.MatrixSelector)
+ vs := ms.VectorSelector.(*parser.VectorSelector)
+ if !vs.Anchored {
+ return 0
+ }
+ rangeStart := enh.Ts - durationMilliseconds(ms.Range+vs.Offset)
+ return max(0, sort.Search(len(floats)-1, func(i int) bool { return floats[i].T > rangeStart })-1)
+}
+
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
-func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+func funcResets(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
resets := 0
@@ -1491,7 +1730,8 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
}
var prevSample, curSample Sample
- for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); {
+ firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
+ for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
@@ -1504,7 +1744,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
- if iFloat+iHistogram == 1 {
+ if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample
continue
}
@@ -1527,7 +1767,7 @@ func funcResets(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNod
}
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
-func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
+func funcChanges(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := matrixVal[0].Floats
histograms := matrixVal[0].Histograms
changes := 0
@@ -1536,7 +1776,8 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
}
var prevSample, curSample Sample
- for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); {
+ firstSampleIndex := pickFirstSampleIndex(floats, args, enh)
+ for iFloat, iHistogram := firstSampleIndex, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
@@ -1549,7 +1790,7 @@ func funcChanges(_ []Vector, matrixVal Matrix, _ parser.Expressions, enh *EvalNo
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
- if iFloat+iHistogram == 1 {
+ if iFloat+iHistogram == 1+firstSampleIndex {
prevSample = curSample
continue
}
@@ -1584,7 +1825,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}
- if !model.LabelName(dst).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(dst) {
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
}
@@ -1632,12 +1873,12 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions)
)
for i := 3; i < len(args); i++ {
src := stringFromArg(args[i])
- if !model.LabelName(src).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(src) {
panic(fmt.Errorf("invalid source label name in label_join(): %s", src))
}
srcLabels[i-3] = src
}
- if !model.LabelName(dst).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(dst) {
panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
}
@@ -1780,6 +2021,7 @@ var FunctionCalls = map[string]FunctionCall{
"delta": funcDelta,
"deriv": funcDeriv,
"exp": funcExp,
+ "first_over_time": funcFirstOverTime,
"floor": funcFloor,
"histogram_avg": funcHistogramAvg,
"histogram_count": funcHistogramCount,
@@ -1803,6 +2045,7 @@ var FunctionCalls = map[string]FunctionCall{
"mad_over_time": funcMadOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
+ "ts_of_first_over_time": funcTsOfFirstOverTime,
"ts_of_last_over_time": funcTsOfLastOverTime,
"ts_of_max_over_time": funcTsOfMaxOverTime,
"ts_of_min_over_time": funcTsOfMinOverTime,
@@ -1851,6 +2094,26 @@ var AtModifierUnsafeFunctions = map[string]struct{}{
"timestamp": {},
}
+// AnchoredSafeFunctions are the functions that can be used with the anchored
+// modifier. Anchored modifier returns matrices with samples outside of the
+// boundaries, so not every function can be used with it.
+var AnchoredSafeFunctions = map[string]struct{}{
+ "resets": {},
+ "changes": {},
+ "rate": {},
+ "increase": {},
+ "delta": {},
+}
+
+// SmoothedSafeFunctions are the functions that can be used with the smoothed
+// modifier. Smoothed modifier returns matrices with samples outside of the
+// boundaries, so not every function can be used with it.
+var SmoothedSafeFunctions = map[string]struct{}{
+ "rate": {},
+ "increase": {},
+ "delta": {},
+}
+
type vectorByValueHeap Vector
func (s vectorByValueHeap) Len() int {
@@ -1869,11 +2132,11 @@ func (s vectorByValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s *vectorByValueHeap) Push(x interface{}) {
+func (s *vectorByValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
-func (s *vectorByValueHeap) Pop() interface{} {
+func (s *vectorByValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
@@ -1899,11 +2162,11 @@ func (s vectorByReverseValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s *vectorByReverseValueHeap) Push(x interface{}) {
+func (s *vectorByReverseValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
-func (s *vectorByReverseValueHeap) Pop() interface{} {
+func (s *vectorByReverseValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
@@ -1946,14 +2209,12 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
}
func stringFromArg(e parser.Expr) string {
- tmp := unwrapStepInvariantExpr(e) // Unwrap StepInvariant
- unwrapParenExpr(&tmp) // Optionally unwrap ParenExpr
- return tmp.(*parser.StringLiteral).Val
+ return e.(*parser.StringLiteral).Val
}
func stringSliceFromArgs(args parser.Expressions) []string {
tmp := make([]string, len(args))
- for i := 0; i < len(args); i++ {
+ for i := range args {
tmp[i] = stringFromArg(args[i])
}
return tmp
diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go
index 362b33301de..a71a63f8eb6 100644
--- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go
+++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go
@@ -61,7 +61,7 @@ const (
var symbolTable = labels.NewSymbolTable()
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
- p, warning := textparse.New(in, contentType, "", false, false, false, symbolTable)
+ p, warning := textparse.New(in, contentType, symbolTable, textparse.ParserOptions{})
if p == nil || warning != nil {
// An invalid content type is being passed, which should not happen
// in this context.
diff --git a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go
index cbc717cac0e..e58cc7d8483 100644
--- a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go
+++ b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go
@@ -19,157 +19,144 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
-// HistogramStatsIterator is an iterator that returns histogram objects
-// which have only their sum and count values populated. The iterator handles
-// counter reset detection internally and sets the counter reset hint accordingly
-// in each returned histogram object.
+// HistogramStatsIterator is an iterator that returns histogram objects that
+// have only their sum and count values populated. The iterator handles counter
+// reset detection internally and sets the counter reset hint accordingly in
+// each returned histogram object. The Next and Seek methods of the iterator
+// will never return ValHistogram, but ValFloatHistogram instead. Effectively,
+// the iterator enforces conversion of (integer) Histogram to FloatHistogram.
+// The AtHistogram method must not be called (and will panic).
type HistogramStatsIterator struct {
chunkenc.Iterator
- currentH *histogram.Histogram
- lastH *histogram.Histogram
-
- currentFH *histogram.FloatHistogram
- lastFH *histogram.FloatHistogram
-
- currentSeriesRead bool
+ current *histogram.FloatHistogram
+ last *histogram.FloatHistogram
+ lastIsCurrent bool
}
// NewHistogramStatsIterator creates a new HistogramStatsIterator.
func NewHistogramStatsIterator(it chunkenc.Iterator) *HistogramStatsIterator {
return &HistogramStatsIterator{
- Iterator: it,
- currentH: &histogram.Histogram{},
- currentFH: &histogram.FloatHistogram{},
+ Iterator: it,
+ current: &histogram.FloatHistogram{},
}
}
// Reset resets this iterator for use with a new underlying iterator, reusing
// objects already allocated where possible.
-func (f *HistogramStatsIterator) Reset(it chunkenc.Iterator) {
- f.Iterator = it
- f.currentSeriesRead = false
+func (hsi *HistogramStatsIterator) Reset(it chunkenc.Iterator) {
+ hsi.Iterator = it
+ hsi.last = nil
+ hsi.lastIsCurrent = false
}
-// AtHistogram returns the next timestamp/histogram pair. The counter reset
-// detection is guaranteed to be correct only when the caller does not switch
-// between AtHistogram and AtFloatHistogram calls.
-func (f *HistogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
- var t int64
- t, f.currentH = f.Iterator.AtHistogram(f.currentH)
- if value.IsStaleNaN(f.currentH.Sum) {
- h = &histogram.Histogram{Sum: f.currentH.Sum}
- return t, h
+// Next mostly relays to the underlying iterator, but changes a ValHistogram
+// return into a ValFloatHistogram return.
+func (hsi *HistogramStatsIterator) Next() chunkenc.ValueType {
+ hsi.lastIsCurrent = false
+ vt := hsi.Iterator.Next()
+ if vt == chunkenc.ValHistogram {
+ return chunkenc.ValFloatHistogram
}
+ return vt
+}
- if h == nil {
- h = &histogram.Histogram{
- CounterResetHint: f.getResetHint(f.currentH),
- Count: f.currentH.Count,
- Sum: f.currentH.Sum,
- }
- f.setLastH(f.currentH)
- return t, h
+// Seek mostly relays to the underlying iterator, but changes a ValHistogram
+// return into a ValFloatHistogram return.
+func (hsi *HistogramStatsIterator) Seek(t int64) chunkenc.ValueType {
+ // If the Seek is going to move the iterator, we have to forget the
+ // lastFH and mark the currentFH as not current anymore.
+ if t > hsi.AtT() {
+ hsi.last = nil
+ hsi.lastIsCurrent = false
}
-
- returnValue := histogram.Histogram{
- CounterResetHint: f.getResetHint(f.currentH),
- Count: f.currentH.Count,
- Sum: f.currentH.Sum,
+ vt := hsi.Iterator.Seek(t)
+ if vt == chunkenc.ValHistogram {
+ return chunkenc.ValFloatHistogram
}
- returnValue.CopyTo(h)
-
- f.setLastH(f.currentH)
- return t, h
+ return vt
}
-// AtFloatHistogram returns the next timestamp/float histogram pair. The counter
-// reset detection is guaranteed to be correct only when the caller does not
-// switch between AtHistogram and AtFloatHistogram calls.
-func (f *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
- var t int64
- t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
- if value.IsStaleNaN(f.currentFH.Sum) {
- return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
- }
+// AtHistogram must never be called.
+func (*HistogramStatsIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+ panic("HistogramStatsIterator.AtHistogram must never be called")
+}
- if fh == nil {
- fh = &histogram.FloatHistogram{
- CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
- Count: f.currentFH.Count,
- Sum: f.currentFH.Sum,
+// AtFloatHistogram returns the next timestamp/float histogram pair. The method
+// performs a counter reset detection on the fly. It will return an explicit
+// hint (not UnknownCounterReset) if the previous sample has been accessed with
+// the same iterator.
+func (hsi *HistogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+ populateFH := func(src *histogram.FloatHistogram, detectReset bool) {
+ h := histogram.FloatHistogram{
+ CounterResetHint: src.CounterResetHint,
+ Count: src.Count,
+ Sum: src.Sum,
+ }
+ if detectReset {
+ h.CounterResetHint = hsi.getResetHint(src.CounterResetHint)
+ }
+ if fh == nil {
+ // Note that we cannot simply write `fh = &h` here
+ // because that would let h escape to the heap.
+ fh = &histogram.FloatHistogram{}
+ *fh = h
+ } else {
+ h.CopyTo(fh)
}
- f.setLastFH(f.currentFH)
- return t, fh
}
- returnValue := histogram.FloatHistogram{
- CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
- Count: f.currentFH.Count,
- Sum: f.currentFH.Sum,
+ if hsi.lastIsCurrent {
+ // Nothing changed since last AtFloatHistogram call. Return a
+ // copy of the stored last histogram rather than doing counter
+ // reset detection again (which would yield a potentially wrong
+ // result of "no counter reset").
+ populateFH(hsi.last, false)
+ return hsi.AtT(), fh
}
- returnValue.CopyTo(fh)
-
- f.setLastFH(f.currentFH)
- return t, fh
-}
-func (f *HistogramStatsIterator) setLastH(h *histogram.Histogram) {
- f.lastFH = nil
- if f.lastH == nil {
- f.lastH = h.Copy()
- } else {
- h.CopyTo(f.lastH)
+ var t int64
+ t, hsi.current = hsi.Iterator.AtFloatHistogram(hsi.current)
+ if value.IsStaleNaN(hsi.current.Sum) {
+ populateFH(hsi.current, false)
+ return t, fh
}
-
- f.currentSeriesRead = true
+ populateFH(hsi.current, true)
+ hsi.setLastFromCurrent(fh.CounterResetHint)
+ return t, fh
}
-func (f *HistogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) {
- f.lastH = nil
- if f.lastFH == nil {
- f.lastFH = fh.Copy()
+// setLastFromCurrent stores a copy of hsi.current as hsi.last. The
+// CounterResetHint of hsi.last is set to the provided value, though. This is
+// meant to store the value we have calculated on the fly so that we can return
+// the same without re-calculation in case AtFloatHistogram is called multiple
+// times.
+func (hsi *HistogramStatsIterator) setLastFromCurrent(hint histogram.CounterResetHint) {
+ if hsi.last == nil {
+ hsi.last = hsi.current.Copy()
} else {
- fh.CopyTo(f.lastFH)
+ hsi.current.CopyTo(hsi.last)
}
-
- f.currentSeriesRead = true
+ hsi.last.CounterResetHint = hint
+ hsi.lastIsCurrent = true
}
-func (f *HistogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint {
+func (hsi *HistogramStatsIterator) getResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint {
if hint != histogram.UnknownCounterReset {
return hint
}
- prevFH := f.lastFH
- if prevFH == nil || !f.currentSeriesRead {
- if f.lastH == nil || !f.currentSeriesRead {
- // We don't know if there's a counter reset.
- return histogram.UnknownCounterReset
- }
- prevFH = f.lastH.ToFloat(nil)
- }
- if f.currentFH.DetectReset(prevFH) {
- return histogram.CounterReset
- }
- return histogram.NotCounterReset
-}
-
-func (f *HistogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint {
- if h.CounterResetHint != histogram.UnknownCounterReset {
- return h.CounterResetHint
- }
- var prevFH *histogram.FloatHistogram
- if f.lastH == nil || !f.currentSeriesRead {
- if f.lastFH == nil || !f.currentSeriesRead {
- // We don't know if there's a counter reset.
- return histogram.UnknownCounterReset
- }
- prevFH = f.lastFH
- } else {
- prevFH = f.lastH.ToFloat(nil)
+ if hsi.last == nil {
+ // We don't know if there's a counter reset. Note that this
+ // generally will trigger an explicit counter reset detection by
+ // the PromQL engine, which in turn isn't as reliable in this
+ // case because the PromQL engine will not see the buckets.
+ // However, we can assume that in cases where the counter reset
+ // detection is relevant, an iteration through the series has
+ // happened, and therefore we do not end up here in the first
+ // place.
+ return histogram.UnknownCounterReset
}
- fh := h.ToFloat(nil)
- if fh.DetectReset(prevFH) {
+ if hsi.current.DetectReset(hsi.last) {
return histogram.CounterReset
}
return histogram.NotCounterReset
diff --git a/vendor/github.com/prometheus/prometheus/promql/info.go b/vendor/github.com/prometheus/prometheus/promql/info.go
index 0197330822c..0067fce845b 100644
--- a/vendor/github.com/prometheus/prometheus/promql/info.go
+++ b/vendor/github.com/prometheus/prometheus/promql/info.go
@@ -55,13 +55,13 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par
}
// Don't try to enrich info series.
- ignoreSeries := map[int]struct{}{}
+ ignoreSeries := map[uint64]struct{}{}
loop:
- for i, s := range mat {
+ for _, s := range mat {
name := s.Metric.Get(labels.MetricName)
for _, m := range infoNameMatchers {
if m.Matches(name) {
- ignoreSeries[i] = struct{}{}
+ ignoreSeries[s.Metric.Hash()] = struct{}{}
continue loop
}
}
@@ -121,11 +121,11 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
// fetchInfoSeries fetches info series given matching identifying labels in mat.
// Series in ignoreSeries are not fetched.
// dataLabelMatchers may be mutated.
-func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) {
+func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) {
// A map of values for all identifying labels we are interested in.
idLblValues := map[string]map[string]struct{}{}
- for i, s := range mat {
- if _, exists := ignoreSeries[i]; exists {
+ for _, s := range mat {
+ if _, exists := ignoreSeries[s.Metric.Hash()]; exists {
continue
}
@@ -197,7 +197,7 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri
}
// combineWithInfoSeries combines mat with select data labels from infoMat.
-func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) {
+func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) {
buf := make([]byte, 0, 1024)
lb := labels.NewScratchBuilder(0)
sigFunction := func(name string) func(labels.Labels) string {
@@ -249,19 +249,19 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
tempNumSamples := ev.currentSamples
// For every base series, compute signature per info metric.
- baseSigs := make([]map[string]string, 0, len(mat))
+ baseSigs := make(map[uint64]map[string]string, len(mat))
for _, s := range mat {
sigs := make(map[string]string, len(infoMetrics))
for infoName := range infoMetrics {
sigs[infoName] = sigfs[infoName](s.Metric)
}
- baseSigs = append(baseSigs, sigs)
+ baseSigs[s.Metric.Hash()] = sigs
}
- infoSigs := make([]string, 0, len(infoMat))
+ infoSigs := make(map[uint64]string, len(infoMat))
for _, s := range infoMat {
name := s.Metric.Map()[labels.MetricName]
- infoSigs = append(infoSigs, sigfs[name](s.Metric))
+ infoSigs[s.Metric.Hash()] = sigfs[name](s.Metric)
}
var warnings annotations.Annotations
@@ -331,7 +331,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat
// combineWithInfoVector combines base and info Vectors.
// Base series in ignoreSeries are not combined.
-func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[int]struct{}, baseSigs []map[string]string, infoSigs []string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) {
+func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[uint64]struct{}, baseSigs map[uint64]map[string]string, infoSigs map[uint64]string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) {
if len(base) == 0 {
return nil, nil // Short-circuit: nothing is going to match.
}
@@ -343,14 +343,14 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[i
clear(enh.rightSigs)
}
- for i, s := range info {
+ for _, s := range info {
if s.H != nil {
ev.error(errors.New("info sample should be float"))
}
// We encode original info sample timestamps via the float value.
origT := int64(s.F)
- sig := infoSigs[i]
+ sig := infoSigs[s.Metric.Hash()]
if existing, exists := enh.rightSigs[sig]; exists {
// We encode original info sample timestamps via the float value.
existingOrigT := int64(existing.F)
@@ -362,16 +362,18 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[i
enh.rightSigs[sig] = s
default:
// The two info samples have the same timestamp - conflict.
- name := s.Metric.Map()[labels.MetricName]
- ev.errorf("found duplicate series for info metric %s", name)
+ ev.errorf("found duplicate series for info metric: existing %s @ %d, new %s @ %d",
+ existing.Metric.String(), existingOrigT, s.Metric.String(), origT)
}
} else {
enh.rightSigs[sig] = s
}
}
- for i, bs := range base {
- if _, exists := ignoreSeries[i]; exists {
+ for _, bs := range base {
+ hash := bs.Metric.Hash()
+
+ if _, exists := ignoreSeries[hash]; exists {
// This series should not be enriched with info metric data labels.
enh.Out = append(enh.Out, Sample{
Metric: bs.Metric,
@@ -386,7 +388,7 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[i
// For every info metric name, try to find an info series with the same signature.
seenInfoMetrics := map[string]struct{}{}
- for infoName, sig := range baseSigs[i] {
+ for infoName, sig := range baseSigs[hash] {
is, exists := enh.rightSigs[sig]
if !exists {
continue
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
index dc3e36b5b58..67ecb190fea 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go
@@ -226,6 +226,11 @@ type VectorSelector struct {
// This is the case when VectorSelector is used to represent the info function's second argument.
BypassEmptyMatcherCheck bool
+ // Anchored is true when the VectorSelector is anchored.
+ Anchored bool
+ // Smoothed is true when the VectorSelector is smoothed.
+ Smoothed bool
+
PosRange posrange.PositionRange
}
@@ -243,15 +248,15 @@ func (TestStmt) PositionRange() posrange.PositionRange {
End: -1,
}
}
-func (e *AggregateExpr) Type() ValueType { return ValueTypeVector }
-func (e *Call) Type() ValueType { return e.Func.ReturnType }
-func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix }
-func (e *SubqueryExpr) Type() ValueType { return ValueTypeMatrix }
-func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar }
-func (e *ParenExpr) Type() ValueType { return e.Expr.Type() }
-func (e *StringLiteral) Type() ValueType { return ValueTypeString }
-func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() }
-func (e *VectorSelector) Type() ValueType { return ValueTypeVector }
+func (*AggregateExpr) Type() ValueType { return ValueTypeVector }
+func (e *Call) Type() ValueType { return e.Func.ReturnType }
+func (*MatrixSelector) Type() ValueType { return ValueTypeMatrix }
+func (*SubqueryExpr) Type() ValueType { return ValueTypeMatrix }
+func (*NumberLiteral) Type() ValueType { return ValueTypeScalar }
+func (e *ParenExpr) Type() ValueType { return e.Expr.Type() }
+func (*StringLiteral) Type() ValueType { return ValueTypeString }
+func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() }
+func (*VectorSelector) Type() ValueType { return ValueTypeVector }
func (e *BinaryExpr) Type() ValueType {
if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar {
return ValueTypeScalar
@@ -259,7 +264,7 @@ func (e *BinaryExpr) Type() ValueType {
return ValueTypeVector
}
func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() }
-func (e *DurationExpr) Type() ValueType { return ValueTypeScalar }
+func (*DurationExpr) Type() ValueType { return ValueTypeScalar }
func (*AggregateExpr) PromQLExpr() {}
func (*BinaryExpr) PromQLExpr() {}
@@ -334,10 +339,13 @@ func Walk(v Visitor, node Node, path []Node) error {
if v, err = v.Visit(node, path); v == nil || err != nil {
return err
}
- path = append(path, node)
+ var pathToHere []Node // Initialized only when needed.
- for _, e := range Children(node) {
- if err := Walk(v, e, path); err != nil {
+ for e := range ChildrenIter(node) {
+ if pathToHere == nil {
+ pathToHere = append(path, node)
+ }
+ if err := Walk(v, e, pathToHere); err != nil {
return err
}
}
@@ -371,61 +379,71 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
// Inspect traverses an AST in depth-first order: It starts by calling
// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
// for all the non-nil children of node, recursively.
+// Note: path may be overwritten after f returns; copy path if you need to retain it.
func Inspect(node Node, f inspector) {
- Walk(f, node, nil) //nolint:errcheck
+ var pathBuf [4]Node // To reduce allocations during recursion.
+ Walk(f, node, pathBuf[:0]) //nolint:errcheck
+}
+
+// ChildrenIter returns an iterator over all child nodes of a syntax tree node.
+func ChildrenIter(node Node) func(func(Node) bool) {
+ return func(yield func(Node) bool) {
+ // According to lore, these switches have significantly better performance than interfaces
+ switch n := node.(type) {
+ case *EvalStmt:
+ yield(n.Expr)
+ case Expressions:
+ for _, e := range n {
+ if !yield(e) {
+ return
+ }
+ }
+ case *AggregateExpr:
+ if n.Expr != nil {
+ if !yield(n.Expr) {
+ return
+ }
+ }
+ if n.Param != nil {
+ yield(n.Param)
+ }
+ case *BinaryExpr:
+ if !yield(n.LHS) {
+ return
+ }
+ yield(n.RHS)
+ case *Call:
+ for _, e := range n.Args {
+ if !yield(e) {
+ return
+ }
+ }
+ case *SubqueryExpr:
+ yield(n.Expr)
+ case *ParenExpr:
+ yield(n.Expr)
+ case *UnaryExpr:
+ yield(n.Expr)
+ case *MatrixSelector:
+ yield(n.VectorSelector)
+ case *StepInvariantExpr:
+ yield(n.Expr)
+ case *NumberLiteral, *StringLiteral, *VectorSelector:
+ // nothing to do
+ default:
+ panic(fmt.Errorf("promql.ChildrenIter: unhandled node type %T", node))
+ }
+ }
}
// Children returns a list of all child nodes of a syntax tree node.
+// Implemented for backwards-compatibility; prefer ChildrenIter().
func Children(node Node) []Node {
- // For some reasons these switches have significantly better performance than interfaces
- switch n := node.(type) {
- case *EvalStmt:
- return []Node{n.Expr}
- case Expressions:
- // golang cannot convert slices of interfaces
- ret := make([]Node, len(n))
- for i, e := range n {
- ret[i] = e
- }
- return ret
- case *AggregateExpr:
- // While this does not look nice, it should avoid unnecessary allocations
- // caused by slice resizing
- switch {
- case n.Expr == nil && n.Param == nil:
- return nil
- case n.Expr == nil:
- return []Node{n.Param}
- case n.Param == nil:
- return []Node{n.Expr}
- default:
- return []Node{n.Expr, n.Param}
- }
- case *BinaryExpr:
- return []Node{n.LHS, n.RHS}
- case *Call:
- // golang cannot convert slices of interfaces
- ret := make([]Node, len(n.Args))
- for i, e := range n.Args {
- ret[i] = e
- }
- return ret
- case *SubqueryExpr:
- return []Node{n.Expr}
- case *ParenExpr:
- return []Node{n.Expr}
- case *UnaryExpr:
- return []Node{n.Expr}
- case *MatrixSelector:
- return []Node{n.VectorSelector}
- case *StepInvariantExpr:
- return []Node{n.Expr}
- case *NumberLiteral, *StringLiteral, *VectorSelector:
- // nothing to do
- return []Node{}
- default:
- panic(fmt.Errorf("promql.Children: unhandled node type %T", node))
+ ret := []Node{}
+ for e := range ChildrenIter(node) {
+ ret = append(ret, e)
}
+ return ret
}
// mergeRanges is a helper function to merge the PositionRanges of two Nodes.
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
index dfb181833f2..a471cb3a6df 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go
@@ -162,6 +162,12 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
+ "first_over_time": {
+ Name: "first_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ Experimental: true,
+ },
"floor": {
Name: "floor",
ArgTypes: []ValueType{ValueTypeVector},
@@ -283,6 +289,12 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
+ "ts_of_first_over_time": {
+ Name: "ts_of_first_over_time",
+ ArgTypes: []ValueType{ValueTypeMatrix},
+ ReturnType: ValueTypeVector,
+ Experimental: true,
+ },
"ts_of_max_over_time": {
Name: "ts_of_max_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
index e7e16cd0330..d9bbb10b28a 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y
@@ -141,6 +141,8 @@ GROUP_LEFT
GROUP_RIGHT
IGNORING
OFFSET
+SMOOTHED
+ANCHORED
ON
WITHOUT
%token keywordsEnd
@@ -187,7 +189,7 @@ START_METRIC_SELECTOR
%type int
%type uint
%type number series_value signed_number signed_or_unsigned_number
-%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
+%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr anchored_expr smoothed_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector duration_expr paren_duration_expr positive_duration_expr offset_duration_expr
%start start
@@ -230,6 +232,8 @@ expr :
| matrix_selector
| number_duration_literal
| offset_expr
+ | anchored_expr
+ | smoothed_expr
| paren_expr
| string_literal
| subquery_expr
@@ -244,29 +248,15 @@ expr :
*/
aggregate_expr : aggregate_op aggregate_modifier function_call_body
- {
- // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input
- // like 'sum (some_metric) by test'
- if len(yylex.(*parser).closingParens) > 1 {
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
- }
- $$ = yylex.(*parser).newAggregateExpr($1, $2, $3)
- }
+ { $$ = yylex.(*parser).newAggregateExpr($1, $2, $3, false) }
| aggregate_op function_call_body aggregate_modifier
- {
- // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input
- // like 'sum by test (some_metric)'
- if len(yylex.(*parser).closingParens) > 1 {
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
- }
- $$ = yylex.(*parser).newAggregateExpr($1, $3, $2)
- }
+ { $$ = yylex.(*parser).newAggregateExpr($1, $3, $2, false) }
| aggregate_op function_call_body
- { $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, $2) }
+ { $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, $2, true) }
| aggregate_op error
{
yylex.(*parser).unexpected("aggregation","");
- $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, Expressions{})
+ $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, Expressions{}, false)
}
;
@@ -378,14 +368,14 @@ grouping_label_list:
grouping_label : maybe_label
{
- if !model.LabelName($1.Val).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName($1.Val) {
yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val)
}
$$ = $1
}
| STRING {
unquoted := yylex.(*parser).unquoteString($1.Val)
- if !model.LabelName(unquoted).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(unquoted) {
yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted)
}
$$ = $1
@@ -414,10 +404,9 @@ function_call : IDENTIFIER function_call_body
Args: $2.(Expressions),
PosRange: posrange.PositionRange{
Start: $1.Pos,
- End: yylex.(*parser).closingParens[0],
+ End: yylex.(*parser).lastClosing,
},
}
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
}
;
@@ -443,10 +432,7 @@ function_call_args: function_call_args COMMA expr
*/
paren_expr : LEFT_PAREN expr RIGHT_PAREN
- {
- $$ = &ParenExpr{Expr: $2.(Expr), PosRange: mergeRanges(&$1, &$3)}
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
- }
+ { $$ = &ParenExpr{Expr: $2.(Expr), PosRange: mergeRanges(&$1, &$3)} }
;
/*
@@ -482,6 +468,20 @@ offset_expr: expr OFFSET offset_duration_expr
{ yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 }
;
+/*
+ * Anchored and smoothed modifiers
+ */
+
+anchored_expr: expr ANCHORED
+ {
+ yylex.(*parser).setAnchored($1)
+ }
+
+smoothed_expr: expr SMOOTHED
+ {
+ yylex.(*parser).setSmoothed($1)
+ }
+
/*
* @ modifiers.
*/
@@ -696,7 +696,7 @@ metric : metric_identifier label_set
;
-metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP;
+metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED;
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
{ $$ = labels.New($2...) }
@@ -953,7 +953,7 @@ counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET |
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
-maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP;
+maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED;
unary_op : ADD | SUB;
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
index e93d1b3de6b..eb4b32129a9 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go
@@ -115,26 +115,28 @@ const GROUP_LEFT = 57422
const GROUP_RIGHT = 57423
const IGNORING = 57424
const OFFSET = 57425
-const ON = 57426
-const WITHOUT = 57427
-const keywordsEnd = 57428
-const preprocessorStart = 57429
-const START = 57430
-const END = 57431
-const STEP = 57432
-const preprocessorEnd = 57433
-const counterResetHintsStart = 57434
-const UNKNOWN_COUNTER_RESET = 57435
-const COUNTER_RESET = 57436
-const NOT_COUNTER_RESET = 57437
-const GAUGE_TYPE = 57438
-const counterResetHintsEnd = 57439
-const startSymbolsStart = 57440
-const START_METRIC = 57441
-const START_SERIES_DESCRIPTION = 57442
-const START_EXPRESSION = 57443
-const START_METRIC_SELECTOR = 57444
-const startSymbolsEnd = 57445
+const SMOOTHED = 57426
+const ANCHORED = 57427
+const ON = 57428
+const WITHOUT = 57429
+const keywordsEnd = 57430
+const preprocessorStart = 57431
+const START = 57432
+const END = 57433
+const STEP = 57434
+const preprocessorEnd = 57435
+const counterResetHintsStart = 57436
+const UNKNOWN_COUNTER_RESET = 57437
+const COUNTER_RESET = 57438
+const NOT_COUNTER_RESET = 57439
+const GAUGE_TYPE = 57440
+const counterResetHintsEnd = 57441
+const startSymbolsStart = 57442
+const START_METRIC = 57443
+const START_SERIES_DESCRIPTION = 57444
+const START_EXPRESSION = 57445
+const START_METRIC_SELECTOR = 57446
+const startSymbolsEnd = 57447
var yyToknames = [...]string{
"$end",
@@ -220,6 +222,8 @@ var yyToknames = [...]string{
"GROUP_RIGHT",
"IGNORING",
"OFFSET",
+ "SMOOTHED",
+ "ANCHORED",
"ON",
"WITHOUT",
"keywordsEnd",
@@ -252,489 +256,502 @@ var yyExca = [...]int16{
-1, 1,
1, -1,
-2, 0,
- -1, 38,
- 1, 143,
- 10, 143,
- 24, 143,
+ -1, 40,
+ 1, 149,
+ 10, 149,
+ 24, 149,
-2, 0,
- -1, 66,
- 2, 186,
- 15, 186,
- 79, 186,
- 85, 186,
- -2, 103,
- -1, 67,
- 2, 187,
- 15, 187,
- 79, 187,
- 85, 187,
- -2, 104,
- -1, 68,
- 2, 188,
- 15, 188,
- 79, 188,
- 85, 188,
- -2, 106,
- -1, 69,
- 2, 189,
- 15, 189,
- 79, 189,
- 85, 189,
- -2, 107,
-1, 70,
- 2, 190,
- 15, 190,
- 79, 190,
- 85, 190,
- -2, 108,
- -1, 71,
- 2, 191,
- 15, 191,
- 79, 191,
- 85, 191,
- -2, 113,
- -1, 72,
2, 192,
15, 192,
79, 192,
- 85, 192,
- -2, 115,
- -1, 73,
+ 87, 192,
+ -2, 107,
+ -1, 71,
2, 193,
15, 193,
79, 193,
- 85, 193,
- -2, 117,
- -1, 74,
+ 87, 193,
+ -2, 108,
+ -1, 72,
2, 194,
15, 194,
79, 194,
- 85, 194,
- -2, 118,
- -1, 75,
+ 87, 194,
+ -2, 110,
+ -1, 73,
2, 195,
15, 195,
79, 195,
- 85, 195,
- -2, 119,
- -1, 76,
+ 87, 195,
+ -2, 111,
+ -1, 74,
2, 196,
15, 196,
79, 196,
- 85, 196,
- -2, 120,
- -1, 77,
+ 87, 196,
+ -2, 112,
+ -1, 75,
2, 197,
15, 197,
79, 197,
- 85, 197,
- -2, 121,
- -1, 78,
+ 87, 197,
+ -2, 117,
+ -1, 76,
2, 198,
15, 198,
79, 198,
- 85, 198,
- -2, 125,
- -1, 79,
+ 87, 198,
+ -2, 119,
+ -1, 77,
2, 199,
15, 199,
79, 199,
- 85, 199,
- -2, 126,
- -1, 129,
- 41, 262,
- 42, 262,
- 52, 262,
- 53, 262,
- 57, 262,
- -2, 20,
- -1, 239,
- 9, 249,
- 12, 249,
- 13, 249,
- 18, 249,
- 19, 249,
- 25, 249,
- 41, 249,
- 47, 249,
- 48, 249,
- 51, 249,
- 57, 249,
- 62, 249,
- 63, 249,
- 64, 249,
- 65, 249,
- 66, 249,
- 67, 249,
- 68, 249,
- 69, 249,
- 70, 249,
- 71, 249,
- 72, 249,
- 73, 249,
- 74, 249,
- 75, 249,
- 79, 249,
- 83, 249,
- 85, 249,
- 88, 249,
- 89, 249,
- 90, 249,
+ 87, 199,
+ -2, 121,
+ -1, 78,
+ 2, 200,
+ 15, 200,
+ 79, 200,
+ 87, 200,
+ -2, 122,
+ -1, 79,
+ 2, 201,
+ 15, 201,
+ 79, 201,
+ 87, 201,
+ -2, 123,
+ -1, 80,
+ 2, 202,
+ 15, 202,
+ 79, 202,
+ 87, 202,
+ -2, 124,
+ -1, 81,
+ 2, 203,
+ 15, 203,
+ 79, 203,
+ 87, 203,
+ -2, 125,
+ -1, 82,
+ 2, 204,
+ 15, 204,
+ 79, 204,
+ 87, 204,
+ -2, 129,
+ -1, 83,
+ 2, 205,
+ 15, 205,
+ 79, 205,
+ 87, 205,
+ -2, 130,
+ -1, 135,
+ 41, 270,
+ 42, 270,
+ 52, 270,
+ 53, 270,
+ 57, 270,
+ -2, 22,
+ -1, 245,
+ 9, 257,
+ 12, 257,
+ 13, 257,
+ 18, 257,
+ 19, 257,
+ 25, 257,
+ 41, 257,
+ 47, 257,
+ 48, 257,
+ 51, 257,
+ 57, 257,
+ 62, 257,
+ 63, 257,
+ 64, 257,
+ 65, 257,
+ 66, 257,
+ 67, 257,
+ 68, 257,
+ 69, 257,
+ 70, 257,
+ 71, 257,
+ 72, 257,
+ 73, 257,
+ 74, 257,
+ 75, 257,
+ 79, 257,
+ 83, 257,
+ 84, 257,
+ 85, 257,
+ 87, 257,
+ 90, 257,
+ 91, 257,
+ 92, 257,
-2, 0,
- -1, 240,
- 9, 249,
- 12, 249,
- 13, 249,
- 18, 249,
- 19, 249,
- 25, 249,
- 41, 249,
- 47, 249,
- 48, 249,
- 51, 249,
- 57, 249,
- 62, 249,
- 63, 249,
- 64, 249,
- 65, 249,
- 66, 249,
- 67, 249,
- 68, 249,
- 69, 249,
- 70, 249,
- 71, 249,
- 72, 249,
- 73, 249,
- 74, 249,
- 75, 249,
- 79, 249,
- 83, 249,
- 85, 249,
- 88, 249,
- 89, 249,
- 90, 249,
+ -1, 246,
+ 9, 257,
+ 12, 257,
+ 13, 257,
+ 18, 257,
+ 19, 257,
+ 25, 257,
+ 41, 257,
+ 47, 257,
+ 48, 257,
+ 51, 257,
+ 57, 257,
+ 62, 257,
+ 63, 257,
+ 64, 257,
+ 65, 257,
+ 66, 257,
+ 67, 257,
+ 68, 257,
+ 69, 257,
+ 70, 257,
+ 71, 257,
+ 72, 257,
+ 73, 257,
+ 74, 257,
+ 75, 257,
+ 79, 257,
+ 83, 257,
+ 84, 257,
+ 85, 257,
+ 87, 257,
+ 90, 257,
+ 91, 257,
+ 92, 257,
-2, 0,
}
const yyPrivate = 57344
-const yyLast = 1045
+const yyLast = 1071
var yyAct = [...]int16{
- 53, 393, 391, 176, 398, 324, 272, 231, 187, 179,
- 45, 338, 89, 135, 215, 87, 118, 122, 339, 64,
- 121, 60, 180, 123, 150, 120, 119, 405, 406, 407,
- 408, 62, 237, 124, 238, 112, 239, 240, 116, 388,
- 387, 358, 315, 208, 184, 356, 145, 377, 117, 115,
- 57, 6, 118, 346, 183, 344, 175, 356, 210, 127,
- 56, 129, 94, 96, 97, 314, 98, 99, 100, 101,
- 102, 103, 104, 105, 106, 107, 185, 108, 109, 111,
- 95, 125, 80, 392, 138, 40, 307, 312, 313, 184,
- 364, 318, 186, 130, 136, 137, 342, 122, 81, 183,
- 365, 306, 309, 123, 110, 363, 319, 91, 174, 173,
- 311, 172, 362, 189, 193, 194, 195, 196, 197, 198,
- 235, 168, 320, 190, 190, 190, 190, 190, 190, 190,
- 171, 192, 169, 211, 191, 191, 191, 191, 191, 191,
- 191, 201, 204, 190, 126, 199, 128, 200, 2, 3,
- 4, 5, 221, 57, 191, 233, 223, 316, 341, 259,
- 227, 112, 216, 56, 217, 131, 113, 116, 234, 264,
- 260, 420, 185, 399, 409, 382, 263, 117, 115, 352,
- 256, 118, 114, 226, 351, 80, 190, 421, 186, 258,
- 419, 191, 260, 418, 381, 265, 266, 191, 146, 350,
- 262, 81, 190, 108, 219, 111, 113, 116, 113, 116,
- 207, 174, 173, 191, 218, 220, 203, 117, 115, 117,
- 115, 118, 114, 118, 114, 217, 137, 310, 261, 202,
- 110, 225, 236, 124, 257, 132, 82, 241, 242, 243,
- 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
- 254, 255, 340, 317, 224, 36, 336, 337, 7, 376,
- 343, 375, 90, 345, 138, 219, 93, 269, 10, 189,
- 190, 268, 88, 190, 136, 218, 220, 347, 84, 190,
- 222, 191, 134, 374, 191, 91, 267, 91, 373, 372,
- 191, 417, 371, 370, 151, 152, 153, 154, 155, 156,
- 157, 158, 159, 160, 161, 162, 163, 164, 165, 354,
- 113, 116, 369, 368, 367, 366, 144, 355, 357, 190,
- 359, 117, 115, 90, 214, 118, 114, 360, 361, 213,
- 191, 349, 385, 88, 51, 8, 37, 184, 57, 38,
- 83, 86, 212, 378, 175, 1, 91, 183, 56, 422,
- 348, 113, 116, 190, 166, 143, 141, 142, 384, 65,
- 386, 140, 117, 115, 191, 50, 118, 114, 390, 185,
- 80, 394, 395, 396, 139, 49, 401, 400, 403, 402,
- 397, 410, 48, 47, 149, 186, 81, 46, 275, 411,
- 412, 190, 44, 353, 413, 379, 174, 173, 285, 147,
- 205, 43, 191, 415, 291, 148, 42, 41, 383, 61,
- 416, 274, 9, 9, 113, 116, 52, 230, 414, 192,
- 190, 321, 423, 92, 228, 117, 115, 270, 85, 118,
- 114, 191, 404, 287, 288, 177, 273, 289, 54, 133,
- 0, 0, 0, 0, 0, 302, 0, 0, 276, 278,
- 280, 281, 282, 290, 292, 295, 296, 297, 298, 299,
- 303, 304, 275, 0, 277, 279, 283, 284, 286, 293,
- 294, 209, 285, 0, 300, 301, 305, 0, 291, 0,
- 0, 188, 271, 0, 0, 274, 0, 0, 57, 0,
- 113, 116, 0, 0, 175, 0, 0, 0, 56, 0,
- 0, 117, 115, 0, 0, 118, 114, 287, 288, 0,
- 0, 289, 0, 0, 0, 0, 0, 0, 0, 302,
- 80, 0, 276, 278, 280, 281, 282, 290, 292, 295,
- 296, 297, 298, 299, 303, 304, 81, 0, 277, 279,
- 283, 284, 286, 293, 294, 0, 174, 173, 300, 301,
- 305, 57, 0, 112, 55, 82, 0, 58, 0, 0,
- 22, 56, 0, 167, 206, 0, 0, 59, 0, 192,
- 57, 0, 0, 0, 0, 0, 175, 0, 0, 0,
- 56, 96, 0, 80, 0, 0, 0, 0, 0, 18,
- 19, 105, 106, 20, 0, 108, 0, 111, 95, 81,
- 0, 0, 80, 0, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79, 81, 0,
- 0, 13, 110, 0, 380, 24, 0, 30, 174, 173,
- 31, 32, 63, 57, 39, 0, 55, 82, 0, 58,
- 0, 0, 22, 56, 0, 0, 178, 0, 0, 59,
- 0, 170, 0, 184, 0, 0, 0, 0, 113, 116,
- 0, 0, 0, 183, 0, 80, 0, 0, 0, 117,
- 115, 18, 19, 118, 114, 20, 0, 0, 0, 0,
- 0, 81, 0, 0, 0, 185, 66, 67, 68, 69,
+ 57, 182, 401, 399, 185, 406, 278, 237, 193, 332,
+ 93, 47, 346, 141, 68, 221, 91, 413, 414, 415,
+ 416, 127, 128, 64, 156, 186, 66, 126, 347, 326,
+ 129, 243, 122, 125, 130, 244, 245, 246, 119, 122,
+ 118, 124, 123, 121, 327, 151, 124, 118, 214, 123,
+ 121, 396, 373, 124, 120, 364, 395, 366, 323, 385,
+ 328, 354, 352, 133, 216, 135, 6, 98, 100, 101,
+ 364, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 324, 112, 113, 117, 99, 42, 131, 315, 112,
+ 144, 117, 136, 400, 241, 350, 191, 143, 128, 349,
+ 142, 137, 270, 314, 322, 320, 129, 268, 317, 114,
+ 116, 115, 192, 95, 233, 178, 114, 116, 115, 195,
+ 199, 200, 201, 202, 203, 204, 174, 321, 319, 177,
+ 196, 196, 196, 196, 196, 196, 196, 232, 175, 217,
+ 267, 130, 197, 197, 197, 197, 197, 197, 197, 132,
+ 196, 134, 138, 205, 390, 407, 239, 207, 210, 227,
+ 206, 223, 197, 229, 428, 2, 3, 4, 5, 360,
+ 190, 194, 429, 389, 359, 7, 266, 240, 61, 86,
+ 189, 231, 269, 427, 181, 150, 426, 262, 60, 358,
+ 264, 119, 122, 196, 425, 209, 271, 272, 266, 197,
+ 152, 225, 123, 121, 230, 197, 124, 120, 208, 196,
+ 84, 224, 226, 119, 122, 38, 384, 213, 222, 383,
+ 223, 197, 10, 382, 123, 121, 85, 235, 124, 120,
+ 143, 190, 88, 318, 238, 381, 180, 179, 241, 242,
+ 380, 189, 379, 378, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 348,
+ 225, 198, 325, 191, 94, 377, 351, 376, 97, 353,
+ 224, 226, 344, 345, 92, 195, 375, 196, 374, 192,
+ 196, 39, 228, 355, 61, 55, 196, 95, 1, 197,
+ 181, 87, 197, 149, 60, 148, 172, 69, 197, 54,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 417, 84, 362, 65, 53,
+ 190, 9, 9, 144, 52, 51, 363, 365, 196, 367,
+ 189, 155, 85, 142, 275, 368, 369, 184, 274, 50,
+ 197, 140, 180, 179, 190, 49, 95, 48, 372, 119,
+ 122, 386, 191, 273, 189, 8, 46, 153, 211, 40,
+ 123, 121, 196, 371, 124, 120, 392, 198, 192, 394,
+ 370, 388, 94, 45, 197, 154, 191, 402, 403, 404,
+ 398, 44, 92, 405, 43, 409, 408, 411, 410, 418,
+ 90, 281, 192, 56, 236, 95, 422, 316, 419, 420,
+ 196, 291, 361, 421, 393, 119, 122, 297, 329, 423,
+ 96, 391, 197, 234, 280, 276, 123, 121, 424, 89,
+ 124, 120, 412, 119, 122, 187, 188, 183, 431, 196,
+ 279, 119, 122, 58, 123, 121, 293, 294, 124, 120,
+ 295, 197, 123, 121, 139, 0, 124, 120, 308, 0,
+ 0, 282, 284, 286, 287, 288, 296, 298, 301, 302,
+ 303, 304, 305, 309, 310, 0, 281, 283, 285, 289,
+ 290, 292, 299, 313, 312, 300, 291, 0, 220, 306,
+ 307, 311, 297, 219, 0, 0, 277, 387, 0, 280,
+ 147, 0, 190, 61, 0, 146, 218, 0, 0, 265,
+ 0, 0, 189, 60, 430, 0, 119, 122, 145, 0,
+ 0, 293, 294, 0, 0, 295, 0, 123, 121, 0,
+ 0, 124, 120, 308, 191, 84, 282, 284, 286, 287,
+ 288, 296, 298, 301, 302, 303, 304, 305, 309, 310,
+ 192, 85, 283, 285, 289, 290, 292, 299, 313, 312,
+ 300, 180, 179, 0, 306, 307, 311, 61, 0, 118,
+ 59, 86, 0, 62, 0, 0, 22, 60, 0, 0,
+ 212, 0, 0, 63, 0, 0, 263, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 98, 100, 0, 84,
+ 0, 0, 0, 0, 0, 18, 19, 109, 110, 20,
+ 0, 112, 113, 117, 99, 85, 0, 0, 0, 0,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 0, 186, 0, 13, 0, 0, 0, 24, 0, 30,
- 0, 0, 31, 32, 63, 57, 0, 112, 55, 82,
- 0, 58, 0, 0, 22, 56, 0, 0, 0, 0,
- 0, 59, 181, 182, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 94, 96, 0, 80, 0, 0,
- 0, 0, 0, 18, 19, 105, 106, 20, 308, 108,
- 109, 111, 95, 81, 0, 0, 0, 0, 66, 67,
- 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
- 78, 79, 17, 82, 0, 13, 110, 0, 22, 24,
- 0, 30, 113, 116, 31, 32, 63, 0, 0, 0,
- 0, 0, 0, 117, 115, 0, 0, 118, 114, 0,
- 0, 0, 229, 0, 0, 0, 184, 18, 19, 232,
- 0, 20, 0, 235, 0, 0, 183, 0, 0, 0,
+ 80, 81, 82, 83, 0, 0, 0, 13, 114, 116,
+ 115, 24, 37, 36, 215, 30, 0, 0, 31, 32,
+ 67, 61, 41, 0, 59, 86, 0, 62, 0, 0,
+ 22, 60, 0, 119, 122, 0, 0, 63, 0, 0,
+ 0, 0, 0, 0, 123, 121, 0, 0, 124, 120,
+ 0, 357, 0, 84, 0, 0, 0, 0, 61, 18,
+ 19, 0, 0, 20, 181, 0, 0, 0, 60, 85,
+ 356, 0, 0, 0, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 0, 0,
+ 84, 13, 0, 0, 0, 24, 37, 36, 0, 30,
+ 0, 0, 31, 32, 67, 61, 85, 0, 59, 86,
+ 0, 62, 331, 0, 22, 60, 180, 179, 0, 330,
+ 0, 63, 0, 334, 335, 333, 340, 342, 339, 341,
+ 336, 337, 338, 343, 0, 0, 0, 84, 0, 0,
+ 0, 198, 0, 18, 19, 0, 0, 20, 0, 0,
+ 0, 0, 0, 85, 0, 0, 0, 0, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 17, 86, 0, 13, 0, 0, 22, 24,
+ 37, 36, 397, 30, 0, 0, 31, 32, 67, 0,
+ 0, 0, 0, 334, 335, 333, 340, 342, 339, 341,
+ 336, 337, 338, 343, 0, 0, 0, 18, 19, 0,
+ 0, 20, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
- 26, 27, 28, 29, 33, 34, 17, 36, 185, 13,
- 0, 0, 22, 24, 323, 30, 0, 0, 31, 32,
- 35, 322, 0, 0, 186, 326, 327, 325, 332, 334,
- 331, 333, 328, 329, 330, 335, 0, 0, 0, 0,
+ 26, 27, 28, 29, 33, 34, 17, 38, 0, 13,
+ 0, 0, 22, 24, 37, 36, 0, 30, 0, 0,
+ 31, 32, 35, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
16, 21, 23, 25, 26, 27, 28, 29, 33, 34,
- 112, 0, 0, 13, 0, 0, 0, 24, 0, 30,
- 0, 0, 31, 32, 35, 0, 0, 0, 0, 112,
- 0, 0, 0, 0, 0, 0, 0, 94, 96, 97,
- 0, 98, 99, 100, 101, 102, 103, 104, 105, 106,
- 107, 0, 108, 109, 111, 95, 94, 96, 97, 0,
- 98, 99, 100, 0, 102, 103, 104, 105, 106, 107,
- 389, 108, 109, 111, 95, 112, 0, 0, 0, 110,
- 0, 326, 327, 325, 332, 334, 331, 333, 328, 329,
- 330, 335, 0, 0, 0, 0, 0, 0, 110, 0,
- 0, 0, 94, 96, 97, 0, 98, 99, 0, 0,
- 102, 103, 0, 105, 106, 107, 0, 108, 109, 111,
- 95, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 110,
+ 118, 0, 0, 13, 0, 0, 0, 24, 37, 36,
+ 0, 30, 0, 0, 31, 32, 35, 0, 0, 118,
+ 0, 0, 0, 0, 0, 0, 0, 98, 100, 101,
+ 0, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 0, 112, 113, 117, 99, 98, 100, 101, 0,
+ 102, 103, 104, 0, 106, 107, 108, 109, 110, 111,
+ 173, 112, 113, 117, 99, 118, 0, 61, 0, 114,
+ 116, 115, 0, 181, 118, 0, 0, 60, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 114, 116,
+ 115, 0, 98, 100, 101, 0, 102, 103, 0, 84,
+ 106, 107, 100, 109, 110, 111, 0, 112, 113, 117,
+ 99, 0, 109, 110, 0, 85, 112, 0, 117, 99,
+ 0, 0, 0, 0, 0, 180, 179, 0, 0, 0,
+ 0, 0, 0, 0, 114, 116, 115, 0, 0, 0,
+ 0, 0, 0, 114, 116, 115, 0, 0, 0, 0,
+ 176,
}
var yyPact = [...]int16{
- 49, 248, 834, 834, 624, 770, -1000, -1000, -1000, 242,
+ 64, 165, 844, 844, 632, 780, -1000, -1000, -1000, 202,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 321, -1000, 264, -1000,
- 896, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 125, 18, 218, -1000, -1000, 706, -1000,
- 706, 223, -1000, 150, 220, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 370, -1000,
+ 266, -1000, 906, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -3, 19, 126,
+ -1000, -1000, 716, -1000, 716, 166, -1000, 86, 137, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 262, -1000, -1000, 354, -1000, -1000, 353, 312,
- -1000, -1000, 22, -1000, -54, -54, -54, -54, -54, -54,
+ -1000, -1000, -1000, -1000, -1000, -1000, 321, -1000, -1000, 488,
+ -1000, -1000, 291, 181, -1000, -1000, 21, -1000, -54, -54,
-54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
- 561, 644, 479, 41, 41, 41, 41, 41, 41, 218,
- -62, -1000, 214, 214, 542, -1000, 21, 449, 147, -40,
- -1000, 36, 41, 322, -1000, -1000, 160, 221, -1000, -1000,
- 260, -1000, 229, -1000, 158, 807, 706, -1000, -50, -44,
- -1000, 706, 706, 706, 706, 706, 706, 706, 706, 706,
- 706, 706, 706, 706, 706, 706, -1000, -1000, -1000, 144,
- 213, 185, 125, -1000, -1000, 41, -1000, 154, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 80, 80, 265, -1000, 125,
- -1000, 41, 150, -4, -4, -40, -40, -40, -40, -1000,
- -1000, -1000, 460, -1000, -1000, 79, -1000, 896, -1000, -1000,
- -1000, 751, -1000, 82, -1000, 85, -1000, -1000, -1000, -1000,
- -1000, 63, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 16,
- 131, 65, -1000, -1000, -1000, 837, 539, 214, 214, 214,
- 214, 147, 147, 703, 703, 703, 961, 915, 703, 703,
- 961, 147, 147, 703, 147, 539, -1000, 143, 81, 41,
- -40, 33, 41, 449, 31, -1000, -1000, -1000, 329, -1000,
- 177, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -54, -54, -54, -54, 978, -1000, -1000, 335, 169, 275,
+ 275, 275, 275, 275, 275, 126, -57, -1000, 193, 193,
+ 548, -1000, 26, 612, 33, -15, -1000, 42, 275, 476,
+ -1000, -1000, 216, 157, -1000, -1000, 262, -1000, 179, -1000,
+ 112, 222, 716, -1000, -51, -44, -1000, 716, 716, 716,
+ 716, 716, 716, 716, 716, 716, 716, 716, 716, 716,
+ 716, 716, -1000, -1000, -1000, 484, 125, 92, -3, -1000,
+ -1000, 275, -1000, 87, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 161, 161, 332, -1000, -3, -1000, 275, 86, -10,
+ -10, -15, -15, -15, -15, -1000, -1000, -1000, 464, -1000,
+ -1000, 81, -1000, 906, -1000, -1000, -1000, 390, -1000, 88,
+ -1000, 103, -1000, -1000, -1000, -1000, -1000, 102, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 32, 55, 3, -1000, -1000,
+ -1000, 715, 980, 193, 193, 193, 193, 33, 33, 545,
+ 545, 545, 971, 925, 545, 545, 971, 33, 33, 545,
+ 33, 980, -1000, 84, 80, 275, -15, 40, 275, 612,
+ 39, -1000, -1000, -1000, 669, -1000, 167, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 706, 41, -1000,
- -1000, -1000, -1000, -1000, -1000, 38, 38, 15, 38, 104,
- 104, 88, 83, -1000, -1000, 309, 308, 307, 306, 287,
- 286, 283, 282, 277, 255, 253, -1000, -1000, -1000, -1000,
- -1000, 25, 41, 373, -1000, 617, -1000, 173, -1000, -1000,
- -1000, 386, -1000, 896, 310, -1000, -1000, -1000, 38, -1000,
- 14, 13, 953, -1000, -1000, -1000, 26, 35, 35, 35,
- 80, 159, 159, 26, 159, 26, -66, -1000, 167, -1000,
- 41, -1000, -1000, -1000, -1000, -1000, -1000, 38, 38, -1000,
- -1000, -1000, 38, -1000, -1000, -1000, -1000, -1000, -1000, 35,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 41,
- 269, -1000, -1000, -1000, 169, -1000, 165, -1000, 328, -1000,
- -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 716, 275, -1000, -1000, -1000,
+ -1000, -1000, -1000, 51, 51, 31, 51, 78, 78, 346,
+ 35, -1000, -1000, 272, 270, 261, 259, 237, 236, 234,
+ 229, 217, 213, 210, -1000, -1000, -1000, -1000, -1000, 37,
+ 275, 465, -1000, 364, -1000, 152, -1000, -1000, -1000, 389,
+ -1000, 906, 382, -1000, -1000, -1000, 51, -1000, 30, 25,
+ 785, -1000, -1000, -1000, 36, 311, 311, 311, 161, 141,
+ 141, 36, 141, 36, -78, -1000, 308, -1000, 275, -1000,
+ -1000, -1000, -1000, -1000, -1000, 51, 51, -1000, -1000, -1000,
+ 51, -1000, -1000, -1000, -1000, -1000, -1000, 311, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 275, 172, -1000,
+ -1000, -1000, 162, -1000, 150, -1000, 483, -1000, -1000, -1000,
+ -1000, -1000,
}
var yyPgo = [...]int16{
- 0, 439, 13, 438, 6, 14, 436, 409, 21, 435,
- 12, 432, 19, 268, 335, 428, 15, 427, 18, 11,
- 424, 423, 7, 421, 5, 4, 418, 2, 1, 9,
- 417, 22, 3, 416, 407, 26, 198, 406, 405, 85,
- 401, 400, 25, 399, 31, 392, 10, 387, 384, 383,
- 382, 375, 365, 334, 0, 359, 8, 354, 345, 336,
+ 0, 444, 13, 433, 6, 15, 430, 318, 23, 427,
+ 10, 422, 14, 222, 355, 419, 16, 415, 28, 12,
+ 413, 410, 7, 408, 9, 5, 396, 3, 2, 4,
+ 394, 25, 1, 393, 384, 33, 200, 381, 375, 86,
+ 373, 358, 27, 357, 26, 356, 11, 347, 345, 339,
+ 331, 325, 324, 319, 299, 285, 0, 297, 8, 296,
+ 288, 281,
}
var yyR1 = [...]int8{
- 0, 58, 58, 58, 58, 58, 58, 58, 39, 39,
+ 0, 60, 60, 60, 60, 60, 60, 60, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
- 39, 34, 34, 34, 34, 35, 35, 37, 37, 37,
+ 39, 39, 39, 34, 34, 34, 34, 35, 35, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
- 37, 37, 37, 36, 38, 38, 48, 48, 43, 43,
- 43, 43, 18, 18, 18, 18, 17, 17, 17, 4,
- 4, 4, 40, 42, 42, 41, 41, 41, 49, 56,
- 47, 47, 33, 33, 33, 9, 9, 45, 51, 51,
- 51, 51, 51, 51, 52, 53, 53, 53, 44, 44,
- 44, 1, 1, 1, 2, 2, 2, 2, 2, 2,
- 2, 14, 14, 7, 7, 7, 7, 7, 7, 7,
+ 37, 37, 37, 37, 37, 36, 38, 38, 50, 50,
+ 43, 43, 43, 43, 18, 18, 18, 18, 17, 17,
+ 17, 4, 4, 4, 40, 42, 42, 41, 41, 41,
+ 51, 58, 47, 47, 48, 49, 33, 33, 33, 9,
+ 9, 45, 53, 53, 53, 53, 53, 53, 54, 55,
+ 55, 55, 44, 44, 44, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 14, 14, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 13, 13,
- 13, 13, 15, 15, 15, 16, 16, 16, 16, 16,
- 16, 16, 59, 21, 21, 21, 21, 20, 20, 20,
- 20, 20, 20, 20, 20, 20, 30, 30, 30, 22,
- 22, 22, 22, 23, 23, 23, 24, 24, 24, 24,
- 24, 24, 24, 24, 24, 24, 24, 25, 25, 26,
- 26, 26, 11, 11, 11, 11, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 13, 13, 13, 13, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 16, 61, 21,
+ 21, 21, 21, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 30, 30, 30, 22, 22, 22, 22, 23,
+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 25, 25, 26, 26, 26, 11, 11,
+ 11, 11, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 8, 8, 5, 5, 5, 5, 46, 46, 29, 29,
- 31, 31, 32, 32, 28, 27, 27, 50, 10, 19,
- 19, 57, 57, 57, 57, 57, 57, 57, 57, 12,
- 12, 54, 54, 54, 54, 54, 54, 54, 54, 54,
- 54, 54, 55,
+ 6, 6, 6, 6, 6, 6, 6, 6, 8, 8,
+ 5, 5, 5, 5, 46, 46, 29, 29, 31, 31,
+ 32, 32, 28, 27, 27, 52, 10, 19, 19, 59,
+ 59, 59, 59, 59, 59, 59, 59, 12, 12, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 57,
}
var yyR2 = [...]int8{
0, 2, 2, 2, 2, 2, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 3, 3, 2, 2, 2, 2, 4, 4, 4,
+ 1, 1, 1, 3, 3, 2, 2, 2, 2, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 1, 0, 1, 3, 3, 1, 1,
- 3, 3, 3, 4, 2, 1, 3, 1, 2, 1,
- 1, 1, 2, 3, 2, 3, 1, 2, 3, 1,
- 3, 3, 3, 5, 3, 1, 1, 4, 6, 5,
- 6, 5, 4, 3, 2, 2, 1, 1, 3, 4,
- 2, 3, 1, 2, 3, 3, 1, 3, 3, 2,
- 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 4, 4, 4, 4, 4, 1, 0, 1, 3, 3,
+ 1, 1, 3, 3, 3, 4, 2, 1, 3, 1,
+ 2, 1, 1, 1, 2, 3, 2, 3, 1, 2,
+ 3, 1, 3, 3, 2, 2, 3, 5, 3, 1,
+ 1, 4, 6, 5, 6, 5, 4, 3, 2, 2,
+ 1, 1, 3, 4, 2, 3, 1, 2, 3, 3,
+ 1, 3, 3, 2, 1, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 3, 4,
- 2, 0, 3, 1, 2, 3, 3, 1, 3, 3,
- 2, 1, 2, 0, 3, 2, 1, 1, 3, 1,
- 3, 4, 1, 3, 5, 5, 1, 1, 1, 4,
- 3, 3, 2, 3, 1, 2, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
- 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 3, 4, 2, 0, 3, 1,
+ 2, 3, 3, 1, 3, 3, 2, 1, 2, 0,
+ 3, 2, 1, 1, 3, 1, 3, 4, 1, 3,
+ 5, 5, 1, 1, 1, 4, 3, 3, 2, 3,
+ 1, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 3, 3, 1, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 1, 1, 1, 2, 1, 1, 1, 0,
- 1, 1, 2, 3, 4, 6, 7, 4, 1, 1,
- 1, 1, 2, 3, 3, 3, 3, 3, 3, 3,
- 6, 1, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 1, 1, 1, 2, 1, 1, 1, 0, 1, 1,
+ 2, 3, 4, 6, 7, 4, 1, 1, 1, 1,
+ 2, 3, 3, 3, 3, 3, 3, 3, 6, 1,
+ 3,
}
var yyChk = [...]int16{
- -1000, -58, 99, 100, 101, 102, 2, 10, -14, -7,
+ -1000, -60, 101, 102, 103, 104, 2, 10, -14, -7,
-13, 62, 63, 79, 64, 65, 66, 12, 47, 48,
51, 67, 18, 68, 83, 69, 70, 71, 72, 73,
- 85, 88, 89, 74, 75, 90, 13, -59, -14, 10,
- -39, -34, -37, -40, -45, -46, -47, -49, -50, -51,
- -52, -53, -33, -54, -3, 12, 19, 9, 15, 25,
- -8, -7, -44, 90, -12, -55, 62, 63, 64, 65,
- 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
- 41, 57, 13, -53, -13, -15, 20, -16, 12, -10,
- 2, 25, -21, 2, 41, 59, 42, 43, 45, 46,
- 47, 48, 49, 50, 51, 52, 53, 54, 56, 57,
- 83, 58, 14, 41, 57, 53, 42, 52, 56, -35,
- -42, 2, 79, 85, 15, -42, -39, -54, -39, -54,
- -44, 15, 15, -1, 20, -2, 12, -10, 2, 20,
- 7, 2, 4, 2, 4, 24, -36, -43, -38, -48,
- 78, -36, -36, -36, -36, -36, -36, -36, -36, -36,
- -36, -36, -36, -36, -36, -36, -57, 2, -46, -8,
- 90, -12, -54, 68, 67, 15, -32, -9, 2, -29,
- -31, 88, 89, 19, 9, 41, 57, -56, 2, -54,
- -46, -8, 90, -54, -54, -54, -54, -54, -54, -42,
- -35, -18, 15, 2, -18, -41, 22, -39, 22, 22,
- 22, -54, 20, 7, 2, -5, 2, 4, 54, 44,
- 55, -5, 20, -16, 25, 2, 25, 2, -20, 5,
- -30, -22, 12, -29, -31, 16, -39, 82, 84, 80,
- 81, -39, -39, -39, -39, -39, -39, -39, -39, -39,
- -39, -39, -39, -39, -39, -39, -46, 90, -12, 15,
- -54, 15, 15, -54, 15, -29, -29, 21, 6, 2,
- -17, 22, -4, -6, 25, 2, 62, 78, 63, 79,
- 64, 65, 66, 80, 81, 12, 82, 47, 48, 51,
- 67, 18, 68, 83, 84, 69, 70, 71, 72, 73,
- 88, 89, 59, 74, 75, 90, 22, 7, 7, 20,
- -2, 25, 2, 25, 2, 26, 26, -31, 26, 41,
- 57, -23, 24, 17, -24, 30, 28, 29, 35, 36,
- 37, 33, 31, 34, 32, 38, -18, -18, -19, -18,
- -19, 15, 15, -54, 22, -54, 22, -56, 21, 2,
- 22, 7, 2, -39, -54, -28, 19, -28, 26, -28,
- -22, -22, 24, 17, 2, 17, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 22, -54, 22,
- 7, 21, 2, 22, -4, 22, -28, 26, 26, 17,
- -24, -27, 57, -28, -32, -32, -32, -29, -25, 14,
- -25, -27, -25, -27, -11, 93, 94, 95, 96, 7,
- -54, -28, -28, -28, -26, -32, -54, 22, 24, 21,
- 2, 22, 21, -32,
+ 87, 90, 91, 74, 75, 92, 85, 84, 13, -61,
+ -14, 10, -39, -34, -37, -40, -45, -46, -47, -48,
+ -49, -51, -52, -53, -54, -55, -33, -56, -3, 12,
+ 19, 9, 15, 25, -8, -7, -44, 92, -12, -57,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 41, 57, 13, -55, -13, -15,
+ 20, -16, 12, -10, 2, 25, -21, 2, 41, 59,
+ 42, 43, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 56, 57, 83, 85, 84, 58, 14, 41,
+ 57, 53, 42, 52, 56, -35, -42, 2, 79, 87,
+ 15, -42, -39, -56, -39, -56, -44, 15, 15, -1,
+ 20, -2, 12, -10, 2, 20, 7, 2, 4, 2,
+ 4, 24, -36, -43, -38, -50, 78, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -59, 2, -46, -8, 92, -12, -56, 68,
+ 67, 15, -32, -9, 2, -29, -31, 90, 91, 19,
+ 9, 41, 57, -58, 2, -56, -46, -8, 92, -56,
+ -56, -56, -56, -56, -56, -42, -35, -18, 15, 2,
+ -18, -41, 22, -39, 22, 22, 22, -56, 20, 7,
+ 2, -5, 2, 4, 54, 44, 55, -5, 20, -16,
+ 25, 2, 25, 2, -20, 5, -30, -22, 12, -29,
+ -31, 16, -39, 82, 86, 80, 81, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39, -39, -39, -39,
+ -39, -39, -46, 92, -12, 15, -56, 15, 15, -56,
+ 15, -29, -29, 21, 6, 2, -17, 22, -4, -6,
+ 25, 2, 62, 78, 63, 79, 64, 65, 66, 80,
+ 81, 12, 82, 47, 48, 51, 67, 18, 68, 83,
+ 86, 69, 70, 71, 72, 73, 90, 91, 59, 74,
+ 75, 92, 85, 84, 22, 7, 7, 20, -2, 25,
+ 2, 25, 2, 26, 26, -31, 26, 41, 57, -23,
+ 24, 17, -24, 30, 28, 29, 35, 36, 37, 33,
+ 31, 34, 32, 38, -18, -18, -19, -18, -19, 15,
+ 15, -56, 22, -56, 22, -58, 21, 2, 22, 7,
+ 2, -39, -56, -28, 19, -28, 26, -28, -22, -22,
+ 24, 17, 2, 17, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 22, -56, 22, 7, 21,
+ 2, 22, -4, 22, -28, 26, 26, 17, -24, -27,
+ 57, -28, -32, -32, -32, -29, -25, 14, -25, -27,
+ -25, -27, -11, 95, 96, 97, 98, 7, -56, -28,
+ -28, -28, -26, -32, -56, 22, 24, 21, 2, 22,
+ 21, -32,
}
var yyDef = [...]int16{
- 0, -2, 131, 131, 0, 0, 7, 6, 1, 131,
- 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
- 122, 123, 124, 125, 126, 127, 0, 2, -2, 3,
- 4, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 0, 109, 236, 237, 0, 247,
- 0, 86, 87, 127, 0, 271, -2, -2, -2, -2,
+ 0, -2, 137, 137, 0, 0, 7, 6, 1, 137,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 0, 2,
+ -2, 3, 4, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 0, 113,
+ 244, 245, 0, 255, 0, 90, 91, 131, 0, 279,
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- 230, 231, 0, 5, 101, 0, 130, 133, 0, 137,
- 141, 248, 142, 146, 44, 44, 44, 44, 44, 44,
- 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+ -2, -2, -2, -2, 238, 239, 0, 5, 105, 0,
+ 136, 139, 0, 143, 147, 256, 148, 152, 46, 46,
+ 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 46, 0, 74, 75, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 25, 26, 0, 0,
+ 0, 64, 0, 22, 88, -2, 89, 0, 0, 0,
+ 94, 96, 0, 100, 104, 134, 0, 140, 0, 146,
+ 0, 151, 0, 45, 50, 51, 47, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 23, 24, 0, 0, 0, 62, 0, 20, 84, -2,
- 85, 0, 0, 0, 90, 92, 0, 96, 100, 128,
- 0, 134, 0, 140, 0, 145, 0, 43, 48, 49,
- 45, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 70, 71, 251, 0,
- 0, 0, 258, 259, 260, 0, 72, 0, 74, 242,
- 243, 75, 76, 238, 239, 0, 0, 0, 83, 69,
- 261, 0, 0, 263, 264, 265, 266, 267, 268, 21,
- 22, 25, 0, 55, 26, 0, 64, 66, 68, 272,
- 269, 0, 88, 0, 93, 0, 99, 232, 233, 234,
- 235, 0, 129, 132, 135, 138, 136, 139, 144, 147,
- 149, 152, 156, 157, 158, 0, 27, 0, 0, -2,
- -2, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 252, 0, 0, 0,
- 262, 0, 0, 0, 0, 240, 241, 77, 0, 82,
- 0, 54, 57, 59, 60, 61, 200, 201, 202, 203,
- 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ 0, 0, 72, 73, 259, 0, 0, 0, 266, 267,
+ 268, 0, 76, 0, 78, 250, 251, 79, 80, 246,
+ 247, 0, 0, 0, 87, 71, 269, 0, 0, 271,
+ 272, 273, 274, 275, 276, 23, 24, 27, 0, 57,
+ 28, 0, 66, 68, 70, 280, 277, 0, 92, 0,
+ 97, 0, 103, 240, 241, 242, 243, 0, 135, 138,
+ 141, 144, 142, 145, 150, 153, 155, 158, 162, 163,
+ 164, 0, 29, 0, 0, -2, -2, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 260, 0, 0, 0, 270, 0, 0, 0,
+ 0, 248, 249, 81, 0, 86, 0, 56, 59, 61,
+ 62, 63, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
- 224, 225, 226, 227, 228, 229, 63, 67, 0, 89,
- 91, 94, 98, 95, 97, 0, 0, 0, 0, 0,
- 0, 0, 0, 162, 164, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 46, 47, 50, 250,
- 51, 0, 0, 0, 253, 0, 73, 0, 79, 81,
- 52, 0, 58, 65, 0, 148, 244, 150, 0, 153,
- 0, 0, 0, 160, 165, 161, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 254, 0, 257,
- 0, 78, 80, 53, 56, 270, 151, 0, 0, 159,
- 163, 166, 0, 246, 167, 168, 169, 170, 171, 0,
- 172, 173, 174, 175, 176, 182, 183, 184, 185, 0,
- 0, 154, 155, 245, 0, 180, 0, 255, 0, 178,
- 181, 256, 177, 179,
+ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 65, 69, 0, 93, 95, 98,
+ 102, 99, 101, 0, 0, 0, 0, 0, 0, 0,
+ 0, 168, 170, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 48, 49, 52, 258, 53, 0,
+ 0, 0, 261, 0, 77, 0, 83, 85, 54, 0,
+ 60, 67, 0, 154, 252, 156, 0, 159, 0, 0,
+ 0, 166, 171, 167, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 262, 0, 265, 0, 82,
+ 84, 55, 58, 278, 157, 0, 0, 165, 169, 172,
+ 0, 254, 173, 174, 175, 176, 177, 0, 178, 179,
+ 180, 181, 182, 188, 189, 190, 191, 0, 0, 160,
+ 161, 253, 0, 186, 0, 263, 0, 184, 187, 264,
+ 183, 185,
}
var yyTok1 = [...]int8{
@@ -752,7 +769,7 @@ var yyTok2 = [...]int8{
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- 102, 103,
+ 102, 103, 104, 105,
}
var yyTok3 = [...]int8{
@@ -1119,45 +1136,35 @@ yydefault:
{
yylex.(*parser).unexpected("", "")
}
- case 21:
+ case 23:
yyDollar = yyS[yypt-3 : yypt+1]
{
- // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input
- // like 'sum (some_metric) by test'
- if len(yylex.(*parser).closingParens) > 1 {
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
- }
- yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node)
+ yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node, false)
}
- case 22:
+ case 24:
yyDollar = yyS[yypt-3 : yypt+1]
{
- // Need to consume the position of the first RIGHT_PAREN. It might not exist on garbage input
- // like 'sum by test (some_metric)'
- if len(yylex.(*parser).closingParens) > 1 {
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
- }
- yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node)
+ yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node, false)
}
- case 23:
+ case 25:
yyDollar = yyS[yypt-2 : yypt+1]
{
- yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node)
+ yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node, true)
}
- case 24:
+ case 26:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("aggregation", "")
- yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{})
+ yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{}, false)
}
- case 25:
+ case 27:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = &AggregateExpr{
Grouping: yyDollar[2].strings,
}
}
- case 26:
+ case 28:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = &AggregateExpr{
@@ -1165,16 +1172,6 @@ yydefault:
Without: true,
}
}
- case 27:
- yyDollar = yyS[yypt-4 : yypt+1]
- {
- yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
- }
- case 28:
- yyDollar = yyS[yypt-4 : yypt+1]
- {
- yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
- }
case 29:
yyDollar = yyS[yypt-4 : yypt+1]
{
@@ -1245,14 +1242,24 @@ yydefault:
{
yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
}
+ case 43:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ {
+ yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
+ }
case 44:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ {
+ yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node)
+ }
+ case 46:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.node = &BinaryExpr{
VectorMatching: &VectorMatching{Card: CardOneToOne},
}
}
- case 45:
+ case 47:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &BinaryExpr{
@@ -1260,96 +1267,96 @@ yydefault:
ReturnBool: true,
}
}
- case 46:
+ case 48:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[1].node
yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings
}
- case 47:
+ case 49:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[1].node
yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings
yyVAL.node.(*BinaryExpr).VectorMatching.On = true
}
- case 50:
+ case 52:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[1].node
yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne
yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings
}
- case 51:
+ case 53:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[1].node
yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany
yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings
}
- case 52:
+ case 54:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.strings = yyDollar[2].strings
}
- case 53:
+ case 55:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.strings = yyDollar[2].strings
}
- case 54:
+ case 56:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.strings = []string{}
}
- case 55:
+ case 57:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "\"(\"")
yyVAL.strings = nil
}
- case 56:
+ case 58:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val)
}
- case 57:
+ case 59:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.strings = []string{yyDollar[1].item.Val}
}
- case 58:
+ case 60:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"")
yyVAL.strings = yyDollar[1].strings
}
- case 59:
+ case 61:
yyDollar = yyS[yypt-1 : yypt+1]
{
- if !model.LabelName(yyDollar[1].item.Val).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(yyDollar[1].item.Val) {
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val)
}
yyVAL.item = yyDollar[1].item
}
- case 60:
+ case 62:
yyDollar = yyS[yypt-1 : yypt+1]
{
unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val)
- if !model.LabelName(unquoted).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(unquoted) {
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted)
}
yyVAL.item = yyDollar[1].item
yyVAL.item.Pos++
yyVAL.item.Val = unquoted
}
- case 61:
+ case 63:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("grouping opts", "label")
yyVAL.item = Item{}
}
- case 62:
+ case 64:
yyDollar = yyS[yypt-2 : yypt+1]
{
fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions)
@@ -1364,44 +1371,42 @@ yydefault:
Args: yyDollar[2].node.(Expressions),
PosRange: posrange.PositionRange{
Start: yyDollar[1].item.Pos,
- End: yylex.(*parser).closingParens[0],
+ End: yylex.(*parser).lastClosing,
},
}
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
}
- case 63:
+ case 65:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = yyDollar[2].node
}
- case 64:
+ case 66:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = Expressions{}
}
- case 65:
+ case 67:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr))
}
- case 66:
+ case 68:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = Expressions{yyDollar[1].node.(Expr)}
}
- case 67:
+ case 69:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args")
yyVAL.node = yyDollar[1].node
}
- case 68:
+ case 70:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)}
- yylex.(*parser).closingParens = yylex.(*parser).closingParens[1:]
}
- case 69:
+ case 71:
yyDollar = yyS[yypt-1 : yypt+1]
{
if numLit, ok := yyDollar[1].node.(*NumberLiteral); ok {
@@ -1415,7 +1420,7 @@ yydefault:
}
yyVAL.node = yyDollar[1].node
}
- case 70:
+ case 72:
yyDollar = yyS[yypt-3 : yypt+1]
{
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
@@ -1426,31 +1431,41 @@ yydefault:
yylex.(*parser).addOffsetExpr(yyDollar[1].node, yyDollar[3].node.(*DurationExpr))
yyVAL.node = yyDollar[1].node
}
- case 71:
+ case 73:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("offset", "number, duration, or step()")
yyVAL.node = yyDollar[1].node
}
- case 72:
+ case 74:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ {
+ yylex.(*parser).setAnchored(yyDollar[1].node)
+ }
+ case 75:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ {
+ yylex.(*parser).setSmoothed(yyDollar[1].node)
+ }
+ case 76:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float)
yyVAL.node = yyDollar[1].node
}
- case 73:
+ case 77:
yyDollar = yyS[yypt-5 : yypt+1]
{
yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item)
yyVAL.node = yyDollar[1].node
}
- case 74:
+ case 78:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("@", "timestamp")
yyVAL.node = yyDollar[1].node
}
- case 77:
+ case 81:
yyDollar = yyS[yypt-4 : yypt+1]
{
var errMsg string
@@ -1480,7 +1495,7 @@ yydefault:
EndPos: yylex.(*parser).lastClosing,
}
}
- case 78:
+ case 82:
yyDollar = yyS[yypt-6 : yypt+1]
{
var rangeNl time.Duration
@@ -1502,7 +1517,7 @@ yydefault:
EndPos: yyDollar[6].item.Pos + 1,
}
}
- case 79:
+ case 83:
yyDollar = yyS[yypt-5 : yypt+1]
{
var rangeNl time.Duration
@@ -1517,31 +1532,31 @@ yydefault:
EndPos: yyDollar[5].item.Pos + 1,
}
}
- case 80:
+ case 84:
yyDollar = yyS[yypt-6 : yypt+1]
{
yylex.(*parser).unexpected("subquery selector", "\"]\"")
yyVAL.node = yyDollar[1].node
}
- case 81:
+ case 85:
yyDollar = yyS[yypt-5 : yypt+1]
{
yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 82:
+ case 86:
yyDollar = yyS[yypt-4 : yypt+1]
{
yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"")
yyVAL.node = yyDollar[1].node
}
- case 83:
+ case 87:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()")
yyVAL.node = yyDollar[1].node
}
- case 84:
+ case 88:
yyDollar = yyS[yypt-2 : yypt+1]
{
if nl, ok := yyDollar[2].node.(*NumberLiteral); ok {
@@ -1554,7 +1569,7 @@ yydefault:
yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos}
}
}
- case 85:
+ case 89:
yyDollar = yyS[yypt-2 : yypt+1]
{
vs := yyDollar[2].node.(*VectorSelector)
@@ -1563,7 +1578,7 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 86:
+ case 90:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := &VectorSelector{
@@ -1574,14 +1589,14 @@ yydefault:
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 87:
+ case 91:
yyDollar = yyS[yypt-1 : yypt+1]
{
vs := yyDollar[1].node.(*VectorSelector)
yylex.(*parser).assembleVectorSelector(vs)
yyVAL.node = vs
}
- case 88:
+ case 92:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1589,7 +1604,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item),
}
}
- case 89:
+ case 93:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1597,7 +1612,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item),
}
}
- case 90:
+ case 94:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = &VectorSelector{
@@ -1605,7 +1620,7 @@ yydefault:
PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item),
}
}
- case 91:
+ case 95:
yyDollar = yyS[yypt-3 : yypt+1]
{
if yyDollar[1].matchers != nil {
@@ -1614,144 +1629,144 @@ yydefault:
yyVAL.matchers = yyDollar[1].matchers
}
}
- case 92:
+ case 96:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher}
}
- case 93:
+ case 97:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "\",\" or \"}\"")
yyVAL.matchers = yyDollar[1].matchers
}
- case 94:
+ case 98:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
- case 95:
+ case 99:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item)
}
- case 96:
+ case 100:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item)
}
- case 97:
+ case 101:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "string")
yyVAL.matcher = nil
}
- case 98:
+ case 102:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "string")
yyVAL.matcher = nil
}
- case 99:
+ case 103:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "label matching operator")
yyVAL.matcher = nil
}
- case 100:
+ case 104:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label matching", "identifier or \"}\"")
yyVAL.matcher = nil
}
- case 101:
+ case 105:
yyDollar = yyS[yypt-2 : yypt+1]
{
b := labels.NewBuilder(yyDollar[2].labels)
b.Set(labels.MetricName, yyDollar[1].item.Val)
yyVAL.labels = b.Labels()
}
- case 102:
+ case 106:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.labels = yyDollar[1].labels
}
- case 128:
+ case 134:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
- case 129:
+ case 135:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
- case 130:
+ case 136:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 131:
+ case 137:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.labels = labels.New()
}
- case 132:
+ case 138:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
}
- case 133:
+ case 139:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.lblList = []labels.Label{yyDollar[1].label}
}
- case 134:
+ case 140:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.lblList = yyDollar[1].lblList
}
- case 135:
+ case 141:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 136:
+ case 142:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
- case 137:
+ case 143:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}
}
- case 138:
+ case 144:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 139:
+ case 145:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
- case 140:
+ case 146:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\"=\"")
yyVAL.label = labels.Label{}
}
- case 141:
+ case 147:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label set", "identifier or \"}\"")
yyVAL.label = labels.Label{}
}
- case 142:
+ case 148:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).generatedParserResult = &seriesDescription{
@@ -1759,33 +1774,33 @@ yydefault:
values: yyDollar[2].series,
}
}
- case 143:
+ case 149:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.series = []SequenceValue{}
}
- case 144:
+ case 150:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
}
- case 145:
+ case 151:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.series = yyDollar[1].series
}
- case 146:
+ case 152:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("series values", "")
yyVAL.series = nil
}
- case 147:
+ case 153:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Omitted: true}}
}
- case 148:
+ case 154:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1793,12 +1808,12 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
}
}
- case 149:
+ case 155:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
}
- case 150:
+ case 156:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1807,7 +1822,7 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
}
}
- case 151:
+ case 157:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1817,12 +1832,12 @@ yydefault:
yyDollar[1].float += yyDollar[2].float
}
}
- case 152:
+ case 158:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}}
}
- case 153:
+ case 159:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1832,7 +1847,7 @@ yydefault:
//$1 += $2
}
}
- case 154:
+ case 160:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1841,7 +1856,7 @@ yydefault:
}
yyVAL.series = val
}
- case 155:
+ case 161:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1850,7 +1865,7 @@ yydefault:
}
yyVAL.series = val
}
- case 156:
+ case 162:
yyDollar = yyS[yypt-1 : yypt+1]
{
if yyDollar[1].item.Val != "stale" {
@@ -1858,130 +1873,130 @@ yydefault:
}
yyVAL.float = math.Float64frombits(value.StaleNaN)
}
- case 159:
+ case 165:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
- case 160:
+ case 166:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
- case 161:
+ case 167:
yyDollar = yyS[yypt-3 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
- case 162:
+ case 168:
yyDollar = yyS[yypt-2 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
- case 163:
+ case 169:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors))
}
- case 164:
+ case 170:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.descriptors = yyDollar[1].descriptors
}
- case 165:
+ case 171:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
}
- case 166:
+ case 172:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["schema"] = yyDollar[3].int
}
- case 167:
+ case 173:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["sum"] = yyDollar[3].float
}
- case 168:
+ case 174:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["count"] = yyDollar[3].float
}
- case 169:
+ case 175:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["z_bucket"] = yyDollar[3].float
}
- case 170:
+ case 176:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
}
- case 171:
+ case 177:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
}
- case 172:
+ case 178:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
}
- case 173:
+ case 179:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["offset"] = yyDollar[3].int
}
- case 174:
+ case 180:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
}
- case 175:
+ case 181:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["n_offset"] = yyDollar[3].int
}
- case 176:
+ case 182:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
}
- case 177:
+ case 183:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
- case 178:
+ case 184:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
- case 179:
+ case 185:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
}
- case 180:
+ case 186:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.bucket_set = []float64{yyDollar[1].float}
}
- case 236:
+ case 244:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &NumberLiteral{
@@ -1989,7 +2004,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 237:
+ case 245:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2004,12 +2019,12 @@ yydefault:
Duration: true,
}
}
- case 238:
+ case 246:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
}
- case 239:
+ case 247:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2020,17 +2035,17 @@ yydefault:
}
yyVAL.float = dur.Seconds()
}
- case 240:
+ case 248:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = yyDollar[2].float
}
- case 241:
+ case 249:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = -yyDollar[2].float
}
- case 244:
+ case 252:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2039,17 +2054,17 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
}
}
- case 245:
+ case 253:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.int = -int64(yyDollar[2].uint)
}
- case 246:
+ case 254:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.int = int64(yyDollar[1].uint)
}
- case 247:
+ case 255:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &StringLiteral{
@@ -2057,7 +2072,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 248:
+ case 256:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.item = Item{
@@ -2066,12 +2081,12 @@ yydefault:
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
}
}
- case 249:
+ case 257:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.strings = nil
}
- case 251:
+ case 259:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2082,7 +2097,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 252:
+ case 260:
yyDollar = yyS[yypt-2 : yypt+1]
{
nl := yyDollar[2].node.(*NumberLiteral)
@@ -2097,7 +2112,7 @@ yydefault:
nl.PosRange.Start = yyDollar[1].item.Pos
yyVAL.node = nl
}
- case 253:
+ case 261:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2106,7 +2121,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 254:
+ case 262:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2119,7 +2134,7 @@ yydefault:
StartPos: yyDollar[1].item.Pos,
}
}
- case 255:
+ case 263:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2130,7 +2145,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 256:
+ case 264:
yyDollar = yyS[yypt-7 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2146,7 +2161,7 @@ yydefault:
},
}
}
- case 257:
+ case 265:
yyDollar = yyS[yypt-4 : yypt+1]
{
de := yyDollar[3].node.(*DurationExpr)
@@ -2161,7 +2176,7 @@ yydefault:
}
yyVAL.node = yyDollar[3].node
}
- case 261:
+ case 269:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2172,7 +2187,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 262:
+ case 270:
yyDollar = yyS[yypt-2 : yypt+1]
{
switch expr := yyDollar[2].node.(type) {
@@ -2205,25 +2220,25 @@ yydefault:
break
}
}
- case 263:
+ case 271:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 264:
+ case 272:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 265:
+ case 273:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 266:
+ case 274:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2234,7 +2249,7 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 267:
+ case 275:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2245,13 +2260,13 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 268:
+ case 276:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 269:
+ case 277:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2260,7 +2275,7 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 270:
+ case 278:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2271,7 +2286,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 272:
+ case 280:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr))
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
index 2b3eecbadd6..296b91d1aea 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go
@@ -129,6 +129,8 @@ var key = map[string]ItemType{
// Keywords.
"offset": OFFSET,
+ "smoothed": SMOOTHED,
+ "anchored": ANCHORED,
"by": BY,
"without": WITHOUT,
"on": ON,
@@ -197,6 +199,7 @@ var ItemTypeStr = map[ItemType]string{
EQL_REGEX: "=~",
NEQ_REGEX: "!~",
POW: "^",
+ AT: "@",
}
func init() {
@@ -347,7 +350,7 @@ func (l *Lexer) acceptRun(valid string) {
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.NextItem.
-func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
+func (l *Lexer) errorf(format string, args ...any) stateFn {
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
l.scannedItem = true
@@ -1185,3 +1188,34 @@ func lexDurationExpr(l *Lexer) stateFn {
return l.errorf("unexpected character in duration expression: %q", r)
}
}
+
+// findPrevRightParen finds the previous right parenthesis.
+// Use in case when the parser had to read ahead to the find the next right
+// parenthesis to decide whether to continue and lost track of the previous right
+// parenthesis position.
+// Only use when outside string literals as those can have runes made up of
+// multiple bytes, which would break the position calculation.
+// Falls back to the input start position on any problem.
+// https://github.com/prometheus/prometheus/issues/16053
+func (l *Lexer) findPrevRightParen(fallbackPos posrange.Pos) posrange.Pos {
+ // Early return on:
+ // - invalid fallback position,
+ // - not enough space for second right parenthesis,
+ // - last read position is after the end, since then we stopped due to the
+ // end of the input, not a parenthesis, or if last position doesn't hold
+ // right parenthesis,
+ // - last position doesn't hold right parenthesis.
+ if fallbackPos <= 0 || fallbackPos > posrange.Pos(len(l.input)) || l.lastPos <= 0 || l.lastPos >= posrange.Pos(len(l.input)) || l.input[l.lastPos] != ')' {
+ return fallbackPos
+ }
+ for i := l.lastPos - 1; i > 0; i-- {
+ switch {
+ case l.input[i] == ')':
+ return i + 1
+ case isSpace(rune(l.input[i])):
+ default:
+ return fallbackPos
+ }
+ }
+ return fallbackPos
+}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
index e99f5f4570d..bcd511f4670 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go
@@ -34,7 +34,7 @@ import (
)
var parserPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &parser{}
},
}
@@ -42,6 +42,9 @@ var parserPool = sync.Pool{
// ExperimentalDurationExpr is a flag to enable experimental duration expression parsing.
var ExperimentalDurationExpr bool
+// EnableExtendedRangeSelectors is a flag to enable experimental extended range selectors.
+var EnableExtendedRangeSelectors bool
+
type Parser interface {
ParseExpr() (Expr, error)
Close()
@@ -59,17 +62,10 @@ type parser struct {
// Everytime an Item is lexed that could be the end
// of certain expressions its end position is stored here.
lastClosing posrange.Pos
- // Keep track of closing parentheses in addition, because sometimes the
- // parser needs to read past a closing parenthesis to find the end of an
- // expression, e.g. reading ony '(sum(foo)' cannot tell the end of the
- // aggregation expression, since it could continue with either
- // '(sum(foo))' or '(sum(foo) by (bar))' by which time we set lastClosing
- // to the last paren.
- closingParens []posrange.Pos
yyParser yyParserImpl
- generatedParserResult interface{}
+ generatedParserResult any
parseErrors ParseErrors
}
@@ -89,7 +85,7 @@ func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexporte
p.injecting = false
p.parseErrors = nil
p.generatedParserResult = nil
- p.closingParens = make([]posrange.Pos, 0)
+ p.lastClosing = posrange.Pos(0)
// Clear lexer struct before reusing.
p.lex = Lexer{
@@ -179,11 +175,6 @@ func EnrichParseError(err error, enrich func(parseErr *ParseErr)) {
func ParseExpr(input string) (expr Expr, err error) {
p := NewParser(input)
defer p.Close()
-
- if len(p.closingParens) > 0 {
- return nil, fmt.Errorf("internal parser error, not all closing parens consumed: %v", p.closingParens)
- }
-
return p.ParseExpr()
}
@@ -285,7 +276,7 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
}
// addParseErrf formats the error and appends it to the list of parsing errors.
-func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) {
+func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...any) {
p.addParseErr(positionRange, fmt.Errorf(format, args...))
}
@@ -334,7 +325,7 @@ func (p *parser) unexpected(context, expected string) {
var errUnexpected = errors.New("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
-func (p *parser) recover(errp *error) {
+func (*parser) recover(errp *error) {
e := recover()
switch _, ok := e.(runtime.Error); {
case ok:
@@ -387,10 +378,7 @@ func (p *parser) Lex(lval *yySymType) int {
case EOF:
lval.item.Typ = EOF
p.InjectItem(0)
- case RIGHT_PAREN:
- p.closingParens = append(p.closingParens, lval.item.Pos+posrange.Pos(len(lval.item.Val)))
- fallthrough
- case RIGHT_BRACE, RIGHT_BRACKET, DURATION, NUMBER:
+ case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER:
p.lastClosing = lval.item.Pos + posrange.Pos(len(lval.item.Val))
}
@@ -402,7 +390,7 @@ func (p *parser) Lex(lval *yySymType) int {
// It is a no-op since the parsers error routines are triggered
// by mechanisms that allow more fine-grained control
// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
-func (p *parser) Error(string) {
+func (*parser) Error(string) {
}
// InjectItem allows injecting a single Item at the beginning of the token stream
@@ -425,7 +413,7 @@ func (p *parser) InjectItem(typ ItemType) {
p.injecting = true
}
-func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
+func (*parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
ret := modifiers.(*BinaryExpr)
ret.LHS = lhs.(Expr)
@@ -435,7 +423,7 @@ func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *Bi
return ret
}
-func (p *parser) assembleVectorSelector(vs *VectorSelector) {
+func (*parser) assembleVectorSelector(vs *VectorSelector) {
// If the metric name was set outside the braces, add a matcher for it.
// If the metric name was inside the braces we don't need to do anything.
if vs.Name != "" {
@@ -447,20 +435,17 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) {
}
}
-func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) {
+func (p *parser) newAggregateExpr(op Item, modifier, args Node, overread bool) (ret *AggregateExpr) {
ret = modifier.(*AggregateExpr)
arguments := args.(Expressions)
- if len(p.closingParens) == 0 {
- // Prevents invalid array accesses.
- // The error is already captured by the parser.
- return
- }
ret.PosRange = posrange.PositionRange{
Start: op.Pos,
- End: p.closingParens[0],
+ End: p.lastClosing,
+ }
+ if overread {
+ ret.PosRange.End = p.lex.findPrevRightParen(p.lastClosing)
}
- p.closingParens = p.closingParens[1:]
ret.Op = op.Typ
@@ -468,14 +453,14 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
p.addParseErrf(ret.PositionRange(), "no arguments for aggregate expression provided")
// Prevents invalid array accesses.
- return
+ return ret
}
desiredArgs := 1
if ret.Op.IsAggregatorWithParam() {
if !EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() {
p.addParseErrf(ret.PositionRange(), "%s() is experimental and must be enabled with --enable-feature=promql-experimental-functions", ret.Op)
- return
+ return ret
}
desiredArgs = 2
@@ -484,7 +469,7 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
if len(arguments) != desiredArgs {
p.addParseErrf(ret.PositionRange(), "wrong number of arguments for aggregate expression provided, expected %d, got %d", desiredArgs, len(arguments))
- return
+ return ret
}
ret.Expr = arguments[desiredArgs-1]
@@ -493,13 +478,13 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
}
// newMap is used when building the FloatHistogram from a map.
-func (p *parser) newMap() (ret map[string]interface{}) {
- return map[string]interface{}{}
+func (*parser) newMap() (ret map[string]any) {
+ return map[string]any{}
}
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
// This will merge the right map into the left map.
-func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
+func (p *parser) mergeMaps(left, right *map[string]any) (ret *map[string]any) {
for key, value := range *right {
if _, ok := (*left)[key]; ok {
p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
@@ -512,17 +497,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
- return a.Add(b)
+ res, _, _, err := a.Add(b)
+ return res, err
})
}
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
- return a.Sub(b)
+ res, _, _, err := a.Sub(b)
+ return res, err
})
}
-func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
+func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
) ([]SequenceValue, error) {
ret := make([]SequenceValue, times+1)
@@ -546,7 +533,7 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin
}
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
-func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
+func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHistogram {
output := &histogram.FloatHistogram{}
val, ok := (*desc)["schema"]
@@ -639,7 +626,7 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
return output
}
-func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
+func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]any, bucketsKey, offsetKey string,
) (buckets []float64, spans []histogram.Span) {
bucketCount := 0
val, ok := (*desc)[bucketsKey]
@@ -665,7 +652,7 @@ func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, buc
if bucketCount > 0 {
spans = []histogram.Span{{Offset: offset, Length: uint32(bucketCount)}}
}
- return
+ return buckets, spans
}
// number parses a number.
@@ -743,7 +730,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
}
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- {
}
- return
+ return r
}
if n.ReturnBool && !n.Op.IsComparisonOperator() {
@@ -816,11 +803,14 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
p.addParseErrf(node.PositionRange(), "expected type %s in %s, got %s", DocumentedType(ValueTypeVector), fmt.Sprintf("call to function %q", n.Func.Name), DocumentedType(n.Args[1].Type()))
}
// Check the vector selector in the input doesn't contain a metric name
- if n.Args[1].(*VectorSelector).Name != "" {
+ if vs, ok := n.Args[1].(*VectorSelector); ok && vs.Name != "" {
p.addParseErrf(n.Args[1].PositionRange(), "expected label selectors only, got vector selector instead")
+ } else if ok {
+ // Set Vector Selector flag to bypass empty matcher check
+ vs.BypassEmptyMatcherCheck = true
+ } else {
+ p.addParseErrf(n.Args[1].PositionRange(), "expected label selectors only")
}
- // Set Vector Selector flag to bypass empty matcher check
- n.Args[1].(*VectorSelector).BypassEmptyMatcherCheck = true
}
for i, arg := range n.Args {
@@ -832,7 +822,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
}
i = len(n.Func.ArgTypes) - 1
}
- p.expectType(arg, n.Func.ArgTypes[i], fmt.Sprintf("call to function %q", n.Func.Name))
+ if t := p.checkAST(arg); t != n.Func.ArgTypes[i] {
+ p.addParseErrf(arg.PositionRange(), "expected type %s in call to function %q, got %s", DocumentedType(n.Func.ArgTypes[i]), n.Func.Name, DocumentedType(t))
+ }
}
case *ParenExpr:
@@ -890,7 +882,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
default:
p.addParseErrf(n.PositionRange(), "unknown node type: %T", node)
}
- return
+ return typ
}
func (p *parser) unquoteString(s string) string {
@@ -912,7 +904,7 @@ func parseDuration(ds string) (time.Duration, error) {
// parseGenerated invokes the yacc generated parser.
// The generated parser gets the provided startSymbol injected into
// the lexer stream, based on which grammar will be used.
-func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
+func (p *parser) parseGenerated(startSymbol ItemType) any {
p.InjectItem(startSymbol)
p.yyParser.Parse(p)
@@ -1037,6 +1029,52 @@ func (p *parser) addOffsetExpr(e Node, expr *DurationExpr) {
*endPosp = p.lastClosing
}
+func (p *parser) setAnchored(e Node) {
+ if !EnableExtendedRangeSelectors {
+ p.addParseErrf(e.PositionRange(), "anchored modifier is experimental and not enabled")
+ return
+ }
+ switch s := e.(type) {
+ case *VectorSelector:
+ s.Anchored = true
+ if s.Smoothed {
+ p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
+ }
+ case *MatrixSelector:
+ s.VectorSelector.(*VectorSelector).Anchored = true
+ if s.VectorSelector.(*VectorSelector).Smoothed {
+ p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
+ }
+ case *SubqueryExpr:
+ p.addParseErrf(e.PositionRange(), "anchored modifier is not supported for subqueries")
+ default:
+ p.addParseErrf(e.PositionRange(), "anchored modifier not implemented")
+ }
+}
+
+func (p *parser) setSmoothed(e Node) {
+ if !EnableExtendedRangeSelectors {
+ p.addParseErrf(e.PositionRange(), "smoothed modifier is experimental and not enabled")
+ return
+ }
+ switch s := e.(type) {
+ case *VectorSelector:
+ s.Smoothed = true
+ if s.Anchored {
+ p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
+ }
+ case *MatrixSelector:
+ s.VectorSelector.(*VectorSelector).Smoothed = true
+ if s.VectorSelector.(*VectorSelector).Anchored {
+ p.addParseErrf(e.PositionRange(), "anchored and smoothed modifiers cannot be used together")
+ }
+ case *SubqueryExpr:
+ p.addParseErrf(e.PositionRange(), "smoothed modifier is not supported for subqueries")
+ default:
+ p.addParseErrf(e.PositionRange(), "smoothed modifier not implemented")
+ }
+}
+
// setTimestamp is used to set the timestamp from the @ modifier in the generated parser.
func (p *parser) setTimestamp(e Node, ts float64) {
if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) ||
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go
index 531fd8a30c0..f883a91bbb3 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/posrange/posrange.go
@@ -23,8 +23,8 @@ type Pos int
// PositionRange describes a position in the input string of the parser.
type PositionRange struct {
- Start Pos
- End Pos
+ Start Pos // Start of the range, zero-indexed.
+ End Pos // End of the range, zero-indexed.
}
// StartPosInput uses the query string to convert the PositionRange into a
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go
index eefa3f490be..90fb7a0cf9b 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go
@@ -54,7 +54,7 @@ func (e *AggregateExpr) Pretty(level int) string {
return s
}
- s += e.getAggOpStr()
+ s += e.ShortString()
s += "(\n"
if e.Op.IsAggregatorWithParam() {
@@ -105,7 +105,7 @@ func (e *Call) Pretty(level int) string {
return s
}
-func (e *EvalStmt) Pretty(_ int) string {
+func (e *EvalStmt) Pretty(int) string {
return "EVAL " + e.Expr.String()
}
diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
index 9dae10a70e3..a562b88044d 100644
--- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
+++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go
@@ -41,7 +41,7 @@ func tree(node Node, level string) string {
level += " · · ·"
- for _, e := range Children(node) {
+ for e := range ChildrenIter(node) {
t += tree(e, level)
}
@@ -53,60 +53,79 @@ func (node *EvalStmt) String() string {
}
func (es Expressions) String() (s string) {
- if len(es) == 0 {
+ switch len(es) {
+ case 0:
return ""
+ case 1:
+ return es[0].String()
}
- for _, e := range es {
- s += e.String()
- s += ", "
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteString(es[0].String())
+ for _, e := range es[1:] {
+ b.WriteString(", ")
+ b.WriteString(e.String())
}
- return s[:len(s)-2]
+ return b.String()
}
func (node *AggregateExpr) String() string {
- aggrString := node.getAggOpStr()
- aggrString += "("
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ node.writeAggOpStr(b)
+ b.WriteString("(")
if node.Op.IsAggregatorWithParam() {
- aggrString += fmt.Sprintf("%s, ", node.Param)
+ b.WriteString(node.Param.String())
+ b.WriteString(", ")
}
- aggrString += fmt.Sprintf("%s)", node.Expr)
+ b.WriteString(node.Expr.String())
+ b.WriteString(")")
- return aggrString
+ return b.String()
}
func (node *AggregateExpr) ShortString() string {
- aggrString := node.getAggOpStr()
- return aggrString
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ node.writeAggOpStr(b)
+ return b.String()
}
-func (node *AggregateExpr) getAggOpStr() string {
- aggrString := node.Op.String()
+func (node *AggregateExpr) writeAggOpStr(b *bytes.Buffer) {
+ b.WriteString(node.Op.String())
switch {
case node.Without:
- aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping))
+ b.WriteString(" without (")
+ writeLabels(b, node.Grouping)
+ b.WriteString(") ")
case len(node.Grouping) > 0:
- aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping))
+ b.WriteString(" by (")
+ writeLabels(b, node.Grouping)
+ b.WriteString(") ")
}
-
- return aggrString
}
-func joinLabels(ss []string) string {
- var bytea [1024]byte // On stack to avoid memory allocation while building the output.
- b := bytes.NewBuffer(bytea[:0])
-
+func writeLabels(b *bytes.Buffer, ss []string) {
for i, s := range ss {
if i > 0 {
b.WriteString(", ")
}
- if !model.IsValidLegacyMetricName(string(model.LabelValue(s))) {
+ if !model.LegacyValidation.IsValidMetricName(s) {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), s))
} else {
b.WriteString(s)
}
}
- return b.String()
+}
+
+// writeStringsJoin is like strings.Join but appending to a bytes.Buffer.
+func writeStringsJoin(b *bytes.Buffer, elems []string, sep string) {
+ if len(elems) == 0 {
+ return
+ }
+ b.WriteString(elems[0])
+ for _, s := range elems[1:] {
+ b.WriteString(sep)
+ b.WriteString(s)
+ }
}
func (node *BinaryExpr) returnBool() string {
@@ -118,11 +137,11 @@ func (node *BinaryExpr) returnBool() string {
func (node *BinaryExpr) String() string {
matching := node.getMatchingStr()
- return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS)
+ return node.LHS.String() + " " + node.Op.String() + node.returnBool() + matching + " " + node.RHS.String()
}
func (node *BinaryExpr) ShortString() string {
- return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr())
+ return node.Op.String() + node.returnBool() + node.getMatchingStr()
}
func (node *BinaryExpr) getMatchingStr() string {
@@ -147,32 +166,54 @@ func (node *BinaryExpr) getMatchingStr() string {
}
func (node *DurationExpr) String() string {
- var expr string
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ node.writeTo(b)
+ return b.String()
+}
+
+func (node *DurationExpr) writeTo(b *bytes.Buffer) {
+ if node.Wrapped {
+ b.WriteByte('(')
+ }
+
switch {
case node.Op == STEP:
- expr = "step()"
+ b.WriteString("step()")
case node.Op == MIN:
- expr = fmt.Sprintf("min(%s, %s)", node.LHS, node.RHS)
+ b.WriteString("min(")
+ b.WriteString(node.LHS.String())
+ b.WriteString(", ")
+ b.WriteString(node.RHS.String())
+ b.WriteByte(')')
case node.Op == MAX:
- expr = fmt.Sprintf("max(%s, %s)", node.LHS, node.RHS)
+ b.WriteString("max(")
+ b.WriteString(node.LHS.String())
+ b.WriteString(", ")
+ b.WriteString(node.RHS.String())
+ b.WriteByte(')')
case node.LHS == nil:
// This is a unary duration expression.
switch node.Op {
case SUB:
- expr = fmt.Sprintf("%s%s", node.Op, node.RHS)
+ b.WriteString(node.Op.String())
+ b.WriteString(node.RHS.String())
case ADD:
- expr = node.RHS.String()
+ b.WriteString(node.RHS.String())
default:
// This should never happen.
panic(fmt.Sprintf("unexpected unary duration expression: %s", node.Op))
}
default:
- expr = fmt.Sprintf("%s %s %s", node.LHS, node.Op, node.RHS)
+ b.WriteString(node.LHS.String())
+ b.WriteByte(' ')
+ b.WriteString(node.Op.String())
+ b.WriteByte(' ')
+ b.WriteString(node.RHS.String())
}
+
if node.Wrapped {
- return fmt.Sprintf("(%s)", expr)
+ b.WriteByte(')')
}
- return expr
}
func (node *DurationExpr) ShortString() string {
@@ -180,7 +221,7 @@ func (node *DurationExpr) ShortString() string {
}
func (node *Call) String() string {
- return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args)
+ return node.Func.Name + "(" + node.Args.String() + ")"
}
func (node *Call) ShortString() string {
@@ -188,7 +229,6 @@ func (node *Call) ShortString() string {
}
func (node *MatrixSelector) atOffset() (string, string) {
- // Copy the Vector selector before changing the offset
vecSelector := node.VectorSelector.(*VectorSelector)
offset := ""
switch {
@@ -213,22 +253,28 @@ func (node *MatrixSelector) atOffset() (string, string) {
func (node *MatrixSelector) String() string {
at, offset := node.atOffset()
- // Copy the Vector selector before changing the offset
+ // Copy the Vector selector so we can modify it to not print @, offset, and other modifiers twice.
vecSelector := *node.VectorSelector.(*VectorSelector)
- // Do not print the @ and offset twice.
- offsetVal, offsetExprVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd
+ anchored, smoothed := vecSelector.Anchored, vecSelector.Smoothed
vecSelector.OriginalOffset = 0
vecSelector.OriginalOffsetExpr = nil
vecSelector.Timestamp = nil
vecSelector.StartOrEnd = 0
+ vecSelector.Anchored = false
+ vecSelector.Smoothed = false
+ extendedAttribute := ""
+ switch {
+ case anchored:
+ extendedAttribute = " anchored"
+ case smoothed:
+ extendedAttribute = " smoothed"
+ }
rangeStr := model.Duration(node.Range).String()
if node.RangeExpr != nil {
rangeStr = node.RangeExpr.String()
}
- str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), rangeStr, at, offset)
-
- vecSelector.OriginalOffset, vecSelector.OriginalOffsetExpr, vecSelector.Timestamp, vecSelector.StartOrEnd = offsetVal, offsetExprVal, atVal, preproc
+ str := fmt.Sprintf("%s[%s]%s%s%s", vecSelector.String(), rangeStr, extendedAttribute, at, offset)
return str
}
@@ -294,15 +340,15 @@ func (node *NumberLiteral) String() string {
}
func (node *ParenExpr) String() string {
- return fmt.Sprintf("(%s)", node.Expr)
+ return "(" + node.Expr.String() + ")"
}
func (node *StringLiteral) String() string {
- return fmt.Sprintf("%q", node.Val)
+ return strconv.Quote(node.Val)
}
func (node *UnaryExpr) String() string {
- return fmt.Sprintf("%s%s", node.Op, node.Expr)
+ return node.Op.String() + node.Expr.String()
}
func (node *UnaryExpr) ShortString() string {
@@ -321,28 +367,39 @@ func (node *VectorSelector) String() string {
}
labelStrings = append(labelStrings, matcher.String())
}
- offset := ""
- switch {
- case node.OriginalOffsetExpr != nil:
- offset = fmt.Sprintf(" offset %s", node.OriginalOffsetExpr)
- case node.OriginalOffset > time.Duration(0):
- offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
- case node.OriginalOffset < time.Duration(0):
- offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteString(node.Name)
+ if len(labelStrings) != 0 {
+ b.WriteByte('{')
+ sort.Strings(labelStrings)
+ writeStringsJoin(b, labelStrings, ",")
+ b.WriteByte('}')
}
- at := ""
switch {
case node.Timestamp != nil:
- at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
+ b.WriteString(" @ ")
+ b.Write(strconv.AppendFloat(b.AvailableBuffer(), float64(*node.Timestamp)/1000.0, 'f', 3, 64))
case node.StartOrEnd == START:
- at = " @ start()"
+ b.WriteString(" @ start()")
case node.StartOrEnd == END:
- at = " @ end()"
+ b.WriteString(" @ end()")
}
-
- if len(labelStrings) == 0 {
- return fmt.Sprintf("%s%s%s", node.Name, at, offset)
+ switch {
+ case node.Anchored:
+ b.WriteString(" anchored")
+ case node.Smoothed:
+ b.WriteString(" smoothed")
+ }
+ switch {
+ case node.OriginalOffsetExpr != nil:
+ b.WriteString(" offset ")
+ node.OriginalOffsetExpr.writeTo(b)
+ case node.OriginalOffset > time.Duration(0):
+ b.WriteString(" offset ")
+ b.WriteString(model.Duration(node.OriginalOffset).String())
+ case node.OriginalOffset < time.Duration(0):
+ b.WriteString(" offset -")
+ b.WriteString(model.Duration(-node.OriginalOffset).String())
}
- sort.Strings(labelStrings)
- return fmt.Sprintf("%s{%s}%s%s", node.Name, strings.Join(labelStrings, ","), at, offset)
+ return b.String()
}
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md b/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md
index 84a0e69f3ad..d26c01c6f1e 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md
@@ -106,8 +106,44 @@ eval range from to step
* `` and `` specify the time range of the range query, and use the same syntax as ``
* `` is the step of the range query, and uses the same syntax as `` (eg. `30s`)
* ``(optional) specifies expected annotations, errors, or result ordering.
+* `` (optional) for an instant query you can specify expected range vector timestamps
+* ` ""` (optional) for matching a string literal
* `` and `` specify the expected values, and follow the same syntax as for `load` above
+### `expect string`
+
+This can be used to specify that a string literal is the expected result.
+
+Note that this is only supported on instant queries.
+
+For example;
+
+```
+eval instant at 50m ("Foo")
+ expect string "Foo"
+```
+
+The expected string value must be within quotes. Double or back quotes are supported.
+
+### `expect range vector`
+
+This can be used to specify the expected timestamps on a range vector resulting from an instant query.
+
+```
+expect range vector to step
+```
+
+For example;
+```
+load 10s
+ some_metric{env="a"} 1+1x5
+ some_metric{env="b"} 2+2x5
+eval instant at 1m some_metric[1m]
+ expect range vector from 10s to 1m step 10s
+ some_metric{env="a"} 2 3 4 5 6
+ some_metric{env="b"} 4 6 8 10 12
+```
+
### `expect` Syntax
```
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go
index 1754f6635d1..41d8cdde20a 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go
@@ -53,11 +53,14 @@ var (
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
patExpect = regexp.MustCompile(`^expect\s+(ordered|fail|warn|no_warn|info|no_info)(?:\s+(regex|msg):(.+))?$`)
patMatchAny = regexp.MustCompile(`^.*$`)
+ patExpectRange = regexp.MustCompile(`^` + rangeVectorPrefix + `\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+)$`)
)
const (
defaultEpsilon = 0.000001 // Relative error allowed for sample values.
DefaultMaxSamplesPerQuery = 10000
+ rangeVectorPrefix = "expect range vector"
+ expectStringPrefix = "expect string"
)
type TBRun interface {
@@ -120,9 +123,11 @@ func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage f
t.Cleanup(func() {
parser.EnableExperimentalFunctions = false
parser.ExperimentalDurationExpr = false
+ parser.EnableExtendedRangeSelectors = false
})
parser.EnableExperimentalFunctions = true
parser.ExperimentalDurationExpr = true
+ parser.EnableExtendedRangeSelectors = true
files, err := fs.Glob(testsFs, "*/*.test")
require.NoError(t, err)
@@ -219,7 +224,7 @@ func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) }
//go:embed testdata
var testsFs embed.FS
-func raise(line int, format string, v ...interface{}) error {
+func raise(line int, format string, v ...any) error {
return &parser.ParseErr{
LineOffset: line,
Err: fmt.Errorf(format, v...),
@@ -314,6 +319,57 @@ func validateExpectedCmds(cmd *evalCmd) error {
return nil
}
+// Given an expected range vector definition, parse the line and return the start & end times and the step duration.
+// ie parse a line such as "expect range vector from 10s to 1m step 10s".
+// The from and to are parsed as durations and their values added to epoch(0) to form a time.Time.
+// The step is parsed as a duration and returned as a time.Duration.
+func (t *test) parseExpectRangeVector(line string) (*time.Time, *time.Time, *time.Duration, error) {
+ parts := patExpectRange.FindStringSubmatch(line)
+ if len(parts) != 4 {
+ return nil, nil, nil, fmt.Errorf("invalid range vector definition %q", line)
+ }
+
+ from := parts[1]
+ to := parts[2]
+ step := parts[3]
+
+ parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ start := testStartTime.Add(time.Duration(*parsedFrom))
+ end := testStartTime.Add(time.Duration(*parsedTo))
+ stepDuration := time.Duration(*parsedStep)
+
+ return &start, &end, &stepDuration, nil
+}
+
+// parseDurations parses the given from, to and step strings to Durations.
+// Additionally, a check is performed to ensure to is before from.
+func (*test) parseDurations(from, to, step string) (*model.Duration, *model.Duration, *model.Duration, error) {
+ parsedFrom, err := model.ParseDuration(from)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid start timestamp definition %q: %w", from, err)
+ }
+
+ parsedTo, err := model.ParseDuration(to)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid end timestamp definition %q: %w", to, err)
+ }
+
+ if parsedTo < parsedFrom {
+ return nil, nil, nil, fmt.Errorf("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
+ }
+
+ parsedStep, err := model.ParseDuration(step)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid step definition %q: %w", step, err)
+ }
+
+ return &parsedFrom, &parsedTo, &parsedStep, nil
+}
+
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
instantParts := patEvalInstant.FindStringSubmatch(lines[i])
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
@@ -355,10 +411,11 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
}
var cmd *evalCmd
+ var offset model.Duration
if isInstant {
at := instantParts[2]
- offset, err := model.ParseDuration(at)
+ offset, err = model.ParseDuration(at)
if err != nil {
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
}
@@ -369,26 +426,12 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
to := rangeParts[3]
step := rangeParts[4]
- parsedFrom, err := model.ParseDuration(from)
- if err != nil {
- return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
- }
-
- parsedTo, err := model.ParseDuration(to)
- if err != nil {
- return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
- }
-
- if parsedTo < parsedFrom {
- return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
- }
-
- parsedStep, err := model.ParseDuration(step)
+ parsedFrom, parsedTo, parsedStep, err := t.parseDurations(from, to, step)
if err != nil {
- return i, nil, formatErr("invalid step definition %q: %s", step, err)
+ return i, nil, formatErr(err.Error())
}
- cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
+ cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(*parsedFrom)), testStartTime.Add(time.Duration(*parsedTo)), time.Duration(*parsedStep), i+1)
}
switch mod {
@@ -404,6 +447,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
cmd.info = true
}
+ var expectRangeVector bool
+
for j := 1; i+1 < len(lines); j++ {
i++
defLine := lines[i]
@@ -426,6 +471,32 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
break
}
+ if strings.HasPrefix(defLine, rangeVectorPrefix) {
+ start, end, step, err := t.parseExpectRangeVector(defLine)
+ if err != nil {
+ return i, nil, formatErr("%w", err)
+ }
+
+ expectRangeVector = true
+ cmd.start = *start
+ cmd.end = *end
+ cmd.step = *step
+ cmd.eval = *end
+ cmd.excludeFromRangeQuery = true
+
+ continue
+ }
+
+ if strings.HasPrefix(defLine, expectStringPrefix) {
+ expectString, err := parseAsStringLiteral(defLine)
+ if err != nil {
+ return i, nil, formatErr("%w", err)
+ }
+ cmd.expectedString = expectString
+ cmd.excludeFromRangeQuery = true
+ continue
+ }
+
// This would still allow a metric named 'expect' if it is written as 'expect{}'.
if strings.Split(defLine, " ")[0] == "expect" {
annoType, expectedAnno, err := parseExpect(defLine)
@@ -450,15 +521,35 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err
}
- // Currently, we are not expecting any matrices.
- if len(vals) > 1 && isInstant {
- return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
+ // Only allow a range vector for an instant query where we have defined the expected range vector timestamps.
+ if len(vals) > 1 && isInstant && !expectRangeVector {
+ return i, nil, formatErr("expecting multiple values in instant evaluation not allowed. consider using 'expect range vector' directive to enable a range vector result for an instant query")
}
cmd.expectMetric(j, metric, vals...)
}
return i, cmd, nil
}
+// parseAsStringLiteral returns the expected string from an expect string expression.
+// It is valid for the line to match the expect string prefix exactly, and an empty string is returned.
+func parseAsStringLiteral(line string) (string, error) {
+ if line == expectStringPrefix {
+ return "", errors.New("expected string literal not valid - a quoted string literal is required")
+ }
+
+ str := strings.TrimPrefix(line, expectStringPrefix+" ")
+ if len(str) == 0 {
+ return "", errors.New("expected string literal not valid - a quoted string literal is required")
+ }
+
+ str, err := strconv.Unquote(str)
+ if err != nil {
+ return "", errors.New("expected string literal not valid - check that the string is correctly quoted")
+ }
+
+ return str, nil
+}
+
// getLines returns trimmed lines after removing the comments.
func getLines(input string) []string {
lines := strings.Split(input, "\n")
@@ -532,7 +623,7 @@ func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
}
}
-func (cmd loadCmd) String() string {
+func (loadCmd) String() string {
return "load"
}
@@ -692,6 +783,7 @@ type evalCmd struct {
end time.Time
step time.Duration
line int
+ eval time.Time
isRange bool // if false, instant query
fail, warn, ordered, info bool
@@ -703,6 +795,12 @@ type evalCmd struct {
metrics map[uint64]labels.Labels
expectScalar bool
expected map[uint64]entry
+
+ // we expect a string literal - is set instead of expected
+ expectedString string
+
+ // if true and this is an instant query then we will not test this in a range query scenario
+ excludeFromRangeQuery bool
}
func (ev *evalCmd) isOrdered() bool {
@@ -772,6 +870,7 @@ func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
+ eval: start,
line: line,
metrics: map[uint64]labels.Labels{},
@@ -795,7 +894,7 @@ func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line
}
}
-func (ev *evalCmd) String() string {
+func (*evalCmd) String() string {
return "eval"
}
@@ -1016,7 +1115,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
}
-
+ case promql.String:
+ if ev.expectedString != val.V {
+ return fmt.Errorf("expected string \"%v\" but got \"%v\"", ev.expectedString, val.V)
+ }
default:
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
}
@@ -1037,7 +1139,7 @@ func compareNativeHistogram(exp, cur *histogram.FloatHistogram) bool {
}
if exp.UsesCustomBuckets() {
- if !histogram.FloatBucketsMatch(exp.CustomValues, cur.CustomValues) {
+ if !histogram.CustomBucketBoundsMatch(exp.CustomValues, cur.CustomValues) {
return false
}
}
@@ -1195,7 +1297,7 @@ func HistogramTestExpression(h *histogram.FloatHistogram) string {
// clearCmd is a command that wipes the test's storage state.
type clearCmd struct{}
-func (cmd clearCmd) String() string {
+func (clearCmd) String() string {
return "clear"
}
@@ -1354,11 +1456,12 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
}
func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
- queries, err := atModifierTestCases(cmd.expr, cmd.start)
+ queries, err := atModifierTestCases(cmd.expr, cmd.eval)
if err != nil {
return err
}
- queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
+ queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.eval}}, queries...)
+
for _, iq := range queries {
if err := t.runInstantQuery(iq, cmd, engine); err != nil {
return err
@@ -1395,6 +1498,12 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
+ // this query has have been explicitly excluded from range query testing
+ // ie it could be that the query result is not an instant vector or scalar
+ if cmd.excludeFromRangeQuery {
+ return nil
+ }
+
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
@@ -1527,7 +1636,7 @@ func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
func (ll *LazyLoader) parse(input string) error {
lines := getLines(input)
// Accepts only 'load' command.
- for i := 0; i < len(lines); i++ {
+ for i := range lines {
l := lines[i]
if len(l) == 0 {
continue
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/extended_vectors.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/extended_vectors.test
new file mode 100644
index 00000000000..8e116b1ac58
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/extended_vectors.test
@@ -0,0 +1,414 @@
+# Reference from PROM-52: Complete dataset
+
+load 15s
+ metric 1+1x4 9+1x4
+
+eval instant at 5s increase(metric[1m])
+
+eval instant at 20s increase(metric[1m])
+ {} 1.833333333
+
+eval instant at 35s increase(metric[1m])
+ {} 2.833333333
+
+eval instant at 50s increase(metric[1m])
+ {} 4
+
+eval instant at 65s increase(metric[1m])
+ {} 4
+
+eval instant at 80s increase(metric[1m])
+ {} 8
+
+eval instant at 95s increase(metric[1m])
+ {} 8
+
+eval instant at 110s increase(metric[1m])
+ {} 8
+
+eval instant at 125s increase(metric[1m])
+ {} 4
+
+eval instant at 5s increase(metric[1m] anchored)
+ {} 0
+
+eval instant at 20s increase(metric[1m] anchored)
+ {} 1
+
+eval instant at 35s increase(metric[1m] anchored)
+ {} 2
+
+eval instant at 50s increase(metric[1m] anchored)
+ {} 3
+
+eval instant at 65s increase(metric[1m] anchored)
+ {} 4
+
+eval instant at 80s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 95s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 110s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 125s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 5s increase(metric[1m] smoothed)
+ {} 0.333333333
+
+eval instant at 20s increase(metric[1m] smoothed)
+ {} 1.333333333
+
+eval instant at 35s increase(metric[1m] smoothed)
+ {} 2.333333333
+
+eval instant at 50s increase(metric[1m] smoothed)
+ {} 3.333333333
+
+eval instant at 65s increase(metric[1m] smoothed)
+ {} 5
+
+eval instant at 80s increase(metric[1m] smoothed)
+ {} 7
+
+eval instant at 95s increase(metric[1m] smoothed)
+ {} 7
+
+eval instant at 110s increase(metric[1m] smoothed)
+ {} 7
+
+eval instant at 125s increase(metric[1m] smoothed)
+ {} 6
+
+# Reference from PROM-52: Partial dataset
+
+clear
+load 15s
+ metric 1+1x2 _ _ 9+1x4
+
+eval instant at 5s increase(metric[1m])
+
+eval instant at 20s increase(metric[1m])
+ {} 1.833333333
+
+eval instant at 35s increase(metric[1m])
+ {} 2.833333333
+
+eval instant at 50s increase(metric[1m])
+ {} 3.166666666
+
+eval instant at 65s increase(metric[1m])
+ {} 2.166666666
+
+eval instant at 80s increase(metric[1m])
+ {} 8
+
+eval instant at 95s increase(metric[1m])
+ {} 1.833333333
+
+eval instant at 110s increase(metric[1m])
+ {} 2.833333333
+
+eval instant at 125s increase(metric[1m])
+ {} 4
+
+eval instant at 5s increase(metric[1m] anchored)
+ {} 0
+
+eval instant at 20s increase(metric[1m] anchored)
+ {} 1
+
+eval instant at 35s increase(metric[1m] anchored)
+ {} 2
+
+eval instant at 50s increase(metric[1m] anchored)
+ {} 2
+
+eval instant at 65s increase(metric[1m] anchored)
+ {} 2
+
+eval instant at 80s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 95s increase(metric[1m] anchored)
+ {} 7
+
+eval instant at 110s increase(metric[1m] anchored)
+ {} 8
+
+eval instant at 125s increase(metric[1m] anchored)
+ {} 9
+
+eval instant at 5s increase(metric[1m] smoothed)
+ {} 0.333333333
+
+eval instant at 20s increase(metric[1m] smoothed)
+ {} 1.333333333
+
+eval instant at 35s increase(metric[1m] smoothed)
+ {} 2.666666666
+
+eval instant at 50s increase(metric[1m] smoothed)
+ {} 4.666666666
+
+eval instant at 65s increase(metric[1m] smoothed)
+ {} 6.333333333
+
+eval instant at 80s increase(metric[1m] smoothed)
+ {} 7
+
+eval instant at 95s increase(metric[1m] smoothed)
+ {} 6.666666666
+
+eval instant at 110s increase(metric[1m] smoothed)
+ {} 5.666666666
+
+eval instant at 125s increase(metric[1m] smoothed)
+ {} 4.666666666
+
+# Test that interval is left-open.
+
+clear
+load 1m
+ metric 1 2 _ 4 5
+
+eval instant at 2m increase(metric[1m] smoothed)
+ {} 1
+
+eval instant at 2m increase(metric[1m] anchored)
+
+# Basic test with counter resets
+
+clear
+load 1m
+ metric{id="1"} 1+1x4 1+1x4
+ metric{id="2"} 3 2+2x9
+ metric{id="3"} 5+3x2 3+3x6
+
+eval instant at 1m30s increase(metric[1m])
+
+eval instant at 1m30s increase(metric[1m] smoothed)
+ {id="1"} 1
+ {id="2"} 2
+ {id="3"} 3
+
+eval instant at 1m30s increase(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 2
+ {id="3"} 3
+
+eval instant at 1m30s delta(metric[1m])
+
+eval instant at 1m30s delta(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} -1
+ {id="3"} 3
+
+eval instant at 3m0s delta(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 2
+ {id="3"} -8
+
+eval instant at 3m30s delta(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 2
+ {id="3"} -8
+
+eval instant at 6m increase(metric[5m])
+ {id="1"} 5
+ {id="2"} 10
+ {id="3"} 15
+
+eval instant at 6m15s increase(metric[5m] smoothed)
+ {id="1"} 5
+ {id="2"} 10
+ {id="3"} 15
+
+eval instant at 6m increase(metric[5m] smoothed)
+ {id="1"} 5
+ {id="2"} 10
+ {id="3"} 15
+
+eval instant at 5m increase(metric[5m] anchored)
+ {id="1"} 5
+ {id="2"} 10
+ {id="3"} 15
+
+eval instant at 15m increase(metric[5m] anchored)
+
+clear
+load 1m
+ metric{id="1"} 11 -1 100 0
+ metric{id="2"} 0 0 100 0 0 11 -1
+
+eval instant at 5m30s delta(metric[5m] smoothed)
+ {id="1"} -5
+ {id="2"} 5
+
+eval instant at 5m45s delta(metric[5m] smoothed)
+ {id="1"} -2
+ {id="2"} 2
+
+clear
+load 1m
+ metric{id="1"} 1+1x10
+ metric{id="2"} 1 1+1x10
+ metric{id="3"} 99-1x10
+ metric{id="4"} 99 99-1x10
+
+eval instant at 5m changes(metric[5m])
+ {id="1"} 4
+ {id="2"} 4
+ {id="3"} 4
+ {id="4"} 4
+
+eval instant at 5m30s changes(metric[5m])
+ {id="1"} 4
+ {id="2"} 4
+ {id="3"} 4
+ {id="4"} 4
+
+
+eval instant at 5m0s changes(metric[5m] anchored)
+ {id="1"} 5
+ {id="2"} 4
+ {id="3"} 5
+ {id="4"} 4
+
+eval instant at 6m changes(metric[5m] anchored)
+ {id="1"} 5
+ {id="2"} 5
+ {id="3"} 5
+ {id="4"} 5
+
+eval instant at 5m30s changes(metric[5m] anchored)
+ {id="1"} 5
+ {id="2"} 4
+ {id="3"} 5
+ {id="4"} 4
+
+eval instant at 5m30s resets(metric[5m])
+ {id="1"} 0
+ {id="2"} 0
+ {id="3"} 4
+ {id="4"} 4
+
+eval instant at 5m30s resets(metric[5m] anchored)
+ {id="1"} 0
+ {id="2"} 0
+ {id="3"} 5
+ {id="4"} 4
+
+clear
+load 1m
+ metric{id="1"} 2 _ 1 _ _ _ _ _ 0
+ metric{id="2"} 99-1x10
+
+eval instant at 2m changes(metric[1m])
+ {id="1"} 0
+ {id="2"} 0
+
+eval instant at 3m changes(metric[1m])
+ {id="2"} 0
+
+eval instant at 2m changes(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 1
+
+eval instant at 3m changes(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 1
+
+eval instant at 8m changes(metric[1m] anchored)
+ {id="1"} 0
+ {id="2"} 1
+
+eval instant at 8m changes(metric[1m1ms] anchored)
+ {id="1"} 1
+ {id="2"} 2
+
+eval instant at 2m resets(metric[1m])
+ {id="1"} 0
+ {id="2"} 0
+
+eval instant at 3m resets(metric[1m])
+ {id="2"} 0
+
+eval instant at 2m resets(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 1
+
+eval instant at 3m resets(metric[1m] anchored)
+ {id="1"} 1
+ {id="2"} 1
+
+eval instant at 8m resets(metric[1m] anchored)
+ {id="1"} 0
+ {id="2"} 1
+
+eval instant at 8m resets(metric[1m1ms] anchored)
+ {id="1"} 1
+ {id="2"} 2
+
+clear
+load 1m
+ metric 9 8 5 4
+
+eval instant at 2m15s increase(metric[2m] smoothed)
+ {} 12
+
+clear
+eval instant at 1m deriv(foo[3m] smoothed)
+ expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with deriv
+
+eval instant at 1m resets(foo[3m] smoothed)
+ expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with resets
+
+eval instant at 1m changes(foo[3m] smoothed)
+ expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with changes
+
+eval instant at 1m max_over_time(foo[3m] smoothed)
+ expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with max_over_time
+
+eval instant at 1m predict_linear(foo[3m] smoothed, 4)
+ expect fail msg: smoothed modifier can only be used with: delta, increase, rate - not with predict_linear
+
+eval instant at 1m deriv(foo[3m] anchored)
+ expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with deriv
+
+eval instant at 1m resets(foo[3m] anchored)
+
+eval instant at 1m changes(foo[3m] anchored)
+
+eval instant at 1m max_over_time(foo[3m] anchored)
+ expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with max_over_time
+
+eval instant at 1m predict_linear(foo[3m] anchored, 4)
+ expect fail msg: anchored modifier can only be used with: changes, delta, increase, rate, resets - not with predict_linear
+
+clear
+load 10s
+ metric 1+1x10
+ withreset 1+1x4 1+1x5
+ notregular 0 5 100 2 8
+
+eval instant at 10s metric smoothed
+ metric 2
+
+eval instant at 15s metric smoothed
+ metric 2.5
+
+eval instant at 5s metric smoothed
+ metric 1.5
+
+eval instant at 105s metric smoothed
+ metric 11
+
+eval instant at 45s withreset smoothed
+ withreset 3
+
+eval instant at 30s notregular smoothed
+ notregular 2
\ No newline at end of file
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test
index b1eda909f83..ba3df76ff61 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test
@@ -254,15 +254,15 @@ eval instant at 20m irate(http_requests_histogram{path="/b"}[6m])
expect no_warn
eval instant at 20m irate(http_requests_histogram{path="/c"}[20m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a counter: "http_requests_histogram"
{path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
eval instant at 20m irate(http_requests_histogram{path="/d"}[20m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a counter: "http_requests_histogram"
{path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
eval instant at 20m irate(http_requests_histogram{path="/e"}[20m])
- expect warn
+ expect warn msg: PromQL warning: encountered a mix of histograms and floats for metric name "http_requests_histogram"
eval instant at 20m irate(http_requests_histogram{path="/f"}[20m])
expect no_warn
@@ -270,7 +270,7 @@ eval instant at 20m irate(http_requests_histogram{path="/f"}[20m])
eval instant at 20m irate(http_requests_histogram{path="/g"}[20m])
expect no_warn
- {path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}}
+ {path="/g"} {{schema:-53 counter_reset_hint:gauge}}
clear
@@ -293,12 +293,12 @@ eval instant at 20m delta(http_requests_gauge[20m])
# delta emits warn annotation for non-gauge histogram types.
eval instant at 20m delta(http_requests_counter[20m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a gauge: "http_requests_counter"
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
# delta emits warn annotation for mix of histogram and floats.
eval instant at 20m delta(http_requests_mix[20m])
- expect warn
+ expect warn msg: PromQL warning: encountered a mix of histograms and floats for metric name "http_requests_mix"
#empty
clear
@@ -314,7 +314,7 @@ load 5m
http_requests_histogram{path="/d"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{sum:2 count:2}}
http_requests_histogram{path="/e"} 0 1 2 {{sum:1 count:2 counter_reset_hint:gauge}}
http_requests_histogram{path="/f"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}}
- http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}}
+ http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[1 10] buckets:[1] counter_reset_hint:gauge}}
eval instant at 20m idelta(http_requests[20m])
expect no_warn
@@ -337,21 +337,23 @@ eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m])
expect no_warn
eval instant at 20m idelta(http_requests_histogram{path="/c"}[20m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a gauge: "http_requests_histogram"
{path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}}
eval instant at 20m idelta(http_requests_histogram{path="/d"}[20m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a gauge: "http_requests_histogram"
{path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}}
eval instant at 20m idelta(http_requests_histogram{path="/e"}[20m])
- expect warn
+ expect warn msg: PromQL warning: encountered a mix of histograms and floats for metric name "http_requests_histogram"
eval instant at 20m idelta(http_requests_histogram{path="/f"}[20m])
- expect warn
+ expect warn msg: PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "http_requests_histogram"
eval instant at 20m idelta(http_requests_histogram{path="/g"}[20m])
- expect warn
+ expect no_warn
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction
+ {path="/g"} {{schema:-53 custom_values:[1] counter_reset_hint:gauge buckets:[-1]}}
clear
@@ -1320,6 +1322,28 @@ eval instant at 95s ts_of_last_over_time(metric_histogram{type="only_histogram"}
eval instant at 95s ts_of_last_over_time(metric_histogram{type="mix"}[90s])
{type="mix"} 50.265
+# Tests for ts_of_first_over_time
+clear
+load 10s53ms
+ metric _ _ 1 2 3 _ _
+ metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x4
+ metric_histogram{type="mix"} _ 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} 1
+
+eval instant at 90s ts_of_first_over_time(metric[90s])
+ {} 20.106
+
+eval instant at 95s ts_of_first_over_time(metric[90s])
+ {} 20.106
+
+eval instant at 15s ts_of_first_over_time(metric[90s])
+ #empty
+
+eval instant at 95s ts_of_first_over_time(metric_histogram{type="only_histogram"}[90s])
+ {type="only_histogram"} 10.053
+
+eval instant at 95s ts_of_first_over_time(metric_histogram{type="mix"}[90s])
+ {type="mix"} 10.053
+
# Tests for quantile_over_time
clear
@@ -1520,7 +1544,19 @@ load 10s
data{type="some_nan3"} NaN 0 1
data{type="only_nan"} NaN NaN NaN
data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} {{schema:0 sum:3 count:4}}
- data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}}
+ data_histogram{type="mix_samples_hlast"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}}
+ data_sparse{type="sparse_numbers"} _ 5 2 _ 4 _
+ data_empty{type="empty_series"} _ _ _ _ _ _ _ _ _ _ _ _ _
+
+# workaround for https://github.com/prometheus/prometheus/issues/17025 causing histogram samples
+# before float samples in a load directive to be silently dropped as (incorrectly) out-of-order.
+# By splitting the vector across two loads, a commit is forced inbetween so the
+# ordering will be handled correctly.
+load 10s
+ data_histogram{type="mix_samples_hfirst"} {{schema:0 sum:1 count:2}} {{schema:0 sum:9 count:3}}
+
+load 10s
+ data_histogram{type="mix_samples_hfirst"} _ _ 4 1
eval instant at 1m min_over_time(data[2m])
expect no_info
@@ -1534,9 +1570,13 @@ eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m])
expect no_info
#empty
-eval instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m])
+eval instant at 1m min_over_time(data_histogram{type=~"mix_samples.*"}[2m])
expect info
- {type="mix_samples"} 0
+ {type="mix_samples_hlast"} 0
+ {type="mix_samples_hfirst"} 1
+
+eval instant at 1m min_over_time(data_sparse[2m])
+ {type="sparse_numbers"} 2
eval instant at 1m max_over_time(data[2m])
expect no_info
@@ -1550,11 +1590,15 @@ eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m])
expect no_info
#empty
-eval instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m])
+eval instant at 1m max_over_time(data_histogram{type=~"mix_samples.*"}[2m])
expect info
- {type="mix_samples"} 1
+ {type="mix_samples_hlast"} 1
+ {type="mix_samples_hfirst"} 4
+
+eval instant at 1m max_over_time(data_sparse[2m])
+ {type="sparse_numbers"} 5
-eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
+eval instant at 1m last_over_time({__name__=~"data(_histogram|_sparse|_empty)?"}[2m])
expect no_info
data{type="numbers"} 3
data{type="some_nan"} NaN
@@ -1562,9 +1606,23 @@ eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
data{type="some_nan3"} 1
data{type="only_nan"} NaN
data_histogram{type="only_histogram"} {{schema:0 sum:3 count:4}}
- data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}}
+ data_histogram{type="mix_samples_hlast"} {{schema:0 sum:2 count:3}}
+ data_histogram{type="mix_samples_hfirst"} 1
+ data_sparse{type="sparse_numbers"} 4
+
+eval instant at 1m first_over_time({__name__=~"data(_histogram|_sparse|_empty)?"}[2m])
+ expect no_info
+ data{type="numbers"} 2
+ data{type="some_nan"} 2
+ data{type="some_nan2"} 2
+ data{type="some_nan3"} NaN
+ data{type="only_nan"} NaN
+ data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}}
+ data_histogram{type="mix_samples_hlast"} 0
+ data_histogram{type="mix_samples_hfirst"} {{schema:0 sum:1 count:2}}
+ data_sparse{type="sparse_numbers"} 5
-eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m])
+eval instant at 1m count_over_time({__name__=~"data(_histogram|_sparse|_empty)?"}[2m])
expect no_info
{type="numbers"} 3
{type="some_nan"} 3
@@ -1572,7 +1630,9 @@ eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m])
{type="some_nan3"} 3
{type="only_nan"} 3
{type="only_histogram"} 3
- {type="mix_samples"} 4
+ {type="mix_samples_hlast"} 4
+ {type="mix_samples_hfirst"} 4
+ {type="sparse_numbers"} 3
clear
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test
index 84a467a3145..436390ee412 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test
@@ -158,6 +158,383 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
+# Positive buckets, lower falls in the first bucket.
+load_with_nhcb 5m
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="1"} 1+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="2"} 3+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="3"} 6+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 1]: contributes 1.0 observation (full bucket).
+# - Bucket [1, 2]: contributes (1.5-1)/(2-1) * (3-1) = 0.5 * 2 = 1.0 observations.
+# Total: (1.0 + 1.0) / 100.0 = 0.02
+
+eval instant at 50m histogram_fraction(0, 1.5, positive_buckets_lower_falls_in_the_first_bucket_bucket)
+ expect no_warn
+ {} 0.02
+
+eval instant at 50m histogram_fraction(0, 1.5, positive_buckets_lower_falls_in_the_first_bucket)
+ expect no_warn
+ {} 0.02
+
+# Negative buckets, lower falls in the first bucket.
+load_with_nhcb 5m
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-3"} 10+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-2"} 12+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-1"} 15+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-Inf, -3]: contributes zero observations (no interpolation with infinite width bucket).
+# - Bucket [-3, -2]: contributes 12-10 = 2.0 observations (full bucket).
+# Total: 2.0 / 100.0 = 0.02
+
+eval instant at 50m histogram_fraction(-4, -2, negative_buckets_lower_falls_in_the_first_bucket_bucket)
+ expect no_warn
+ {} 0.02
+
+eval instant at 50m histogram_fraction(-4, -2, negative_buckets_lower_falls_in_the_first_bucket)
+ expect no_warn
+ {} 0.02
+
+# Lower is -Inf.
+load_with_nhcb 5m
+ lower_is_negative_Inf_bucket{le="-3"} 10+0x10
+ lower_is_negative_Inf_bucket{le="-2"} 12+0x10
+ lower_is_negative_Inf_bucket{le="-1"} 15+0x10
+ lower_is_negative_Inf_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-Inf, -3]: contributes 10.0 observations (full bucket).
+# - Bucket [-3, -2]: contributes 12-10 = 2.0 observations (full bucket).
+# - Bucket [-2, -1]: contributes (-1.5-(-2))/(-1-(-2)) * (15-12) = 0.5 * 3 = 1.5 observations.
+# Total: (10.0 + 2.0 + 1.5) / 100.0 = 0.135
+
+eval instant at 50m histogram_fraction(-Inf, -1.5, lower_is_negative_Inf_bucket)
+ expect no_warn
+ {} 0.135
+
+eval instant at 50m histogram_fraction(-Inf, -1.5, lower_is_negative_Inf)
+ expect no_warn
+ {} 0.135
+
+# Lower is -Inf and upper is +Inf (positive buckets).
+load_with_nhcb 5m
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="1"} 1+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="2"} 3+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="3"} 6+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="+Inf"} 100+0x10
+
+# Range [-Inf, +Inf] captures all observations.
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket)
+ expect no_warn
+ {} 1.0
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets_)
+ expect no_warn
+ {} 1.0
+
+# Lower is -Inf and upper is +Inf (negative buckets).
+load_with_nhcb 5m
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-3"} 10+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-2"} 12+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-1"} 15+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="+Inf"} 100+0x10
+
+# Range [-Inf, +Inf] captures all observations.
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket)
+ expect no_warn
+ {} 1.0
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets_)
+ expect no_warn
+ {} 1.0
+
+# Lower and upper fall in last bucket (positive buckets).
+load_with_nhcb 5m
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="1"} 1+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="2"} 3+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="3"} 6+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="+Inf"} 100+0x10
+
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(4, 5, lower_and_upper_fall_in_last_bucket__positive_buckets__bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(4, 5, lower_and_upper_fall_in_last_bucket__positive_buckets_)
+ expect no_warn
+ {} 0.0
+
+# Lower and upper fall in last bucket (negative buckets).
+load_with_nhcb 5m
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-3"} 10+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-2"} 12+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-1"} 15+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-1, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(0, 1, lower_and_upper_fall_in_last_bucket__negative_buckets__bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(0, 1, lower_and_upper_fall_in_last_bucket__negative_buckets_)
+ expect no_warn
+ {} 0.0
+
+# Upper falls in last bucket.
+load_with_nhcb 5m
+ upper_falls_in_last_bucket_bucket{le="1"} 1+0x10
+ upper_falls_in_last_bucket_bucket{le="2"} 3+0x10
+ upper_falls_in_last_bucket_bucket{le="3"} 6+0x10
+ upper_falls_in_last_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 3.0 / 100.0 = 0.03
+
+eval instant at 50m histogram_fraction(2, 5, upper_falls_in_last_bucket_bucket)
+ expect no_warn
+ {} 0.03
+
+eval instant at 50m histogram_fraction(2, 5, upper_falls_in_last_bucket)
+ expect no_warn
+ {} 0.03
+
+# Upper is +Inf.
+load_with_nhcb 5m
+ upper_is_positive_Inf_bucket{le="1"} 1+0x10
+ upper_is_positive_Inf_bucket{le="2"} 3+0x10
+ upper_is_positive_Inf_bucket{le="3"} 6+0x10
+ upper_is_positive_Inf_bucket{le="+Inf"} 100+0x10
+
+# All observations in +Inf bucket: 100-6 = 94.0 observations.
+# Total: 94.0 / 100.0 = 0.94
+
+eval instant at 50m histogram_fraction(400, +Inf, upper_is_positive_Inf_bucket)
+ expect no_warn
+ {} 0.94
+
+eval instant at 50m histogram_fraction(400, +Inf, upper_is_positive_Inf)
+ expect no_warn
+ {} 0.94
+
+# Lower equals upper.
+load_with_nhcb 5m
+ lower_equals_upper_bucket{le="1"} 1+0x10
+ lower_equals_upper_bucket{le="2"} 3+0x10
+ lower_equals_upper_bucket{le="3"} 6+0x10
+ lower_equals_upper_bucket{le="+Inf"} 100+0x10
+
+# No observations can be captured in a zero-width range.
+
+eval instant at 50m histogram_fraction(2, 2, lower_equals_upper_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(2, 2, lower_equals_upper)
+ expect no_warn
+ {} 0.0
+
+# Lower greater than upper.
+load_with_nhcb 5m
+ lower_greater_than_upper_bucket{le="1"} 1+0x10
+ lower_greater_than_upper_bucket{le="2"} 3+0x10
+ lower_greater_than_upper_bucket{le="3"} 6+0x10
+ lower_greater_than_upper_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(3, 2, lower_greater_than_upper_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(3, 2, lower_greater_than_upper)
+ expect no_warn
+ {} 0.0
+
+# Single bucket.
+load_with_nhcb 5m
+ single_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(0, 1, single_bucket_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(0, 1, single_bucket)
+ expect no_warn
+ {} 0.0
+
+# All zero counts.
+load_with_nhcb 5m
+ all_zero_counts_bucket{le="1"} 0+0x10
+ all_zero_counts_bucket{le="2"} 0+0x10
+ all_zero_counts_bucket{le="3"} 0+0x10
+ all_zero_counts_bucket{le="+Inf"} 0+0x10
+
+eval instant at 50m histogram_fraction(0, 5, all_zero_counts_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(0, 5, all_zero_counts)
+ expect no_warn
+ {} NaN
+
+# Lower exactly on bucket boundary.
+load_with_nhcb 5m
+ lower_exactly_on_bucket_boundary_bucket{le="1"} 1+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="2"} 3+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="3"} 6+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 3.0 / 100.0 = 0.03
+
+eval instant at 50m histogram_fraction(2, 3.5, lower_exactly_on_bucket_boundary_bucket)
+ expect no_warn
+ {} 0.03
+
+eval instant at 50m histogram_fraction(2, 3.5, lower_exactly_on_bucket_boundary)
+ expect no_warn
+ {} 0.03
+
+# Upper exactly on bucket boundary.
+load_with_nhcb 5m
+ upper_exactly_on_bucket_boundary_bucket{le="1"} 1+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="2"} 3+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="3"} 6+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 1]: (1.0-0.5)/(1.0-0.0) * 1.0 = 0.5 * 1.0 = 0.5 observations.
+# - Bucket [1, 2]: 3-1 = 2.0 observations (full bucket).
+# Total: (0.5 + 2.0) / 100.0 = 0.025
+
+eval instant at 50m histogram_fraction(0.5, 2, upper_exactly_on_bucket_boundary_bucket)
+ expect no_warn
+ {} 0.025
+
+eval instant at 50m histogram_fraction(0.5, 2, upper_exactly_on_bucket_boundary)
+ expect no_warn
+ {} 0.025
+
+# Both bounds exactly on bucket boundaries.
+load_with_nhcb 5m
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="1"} 1+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="2"} 3+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="3"} 6+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [1, 2]: 3-1 = 2.0 observations (full bucket).
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# Total: (2.0 + 3.0) / 100.0 = 0.05
+
+eval instant at 50m histogram_fraction(1, 3, both_bounds_exactly_on_bucket_boundaries_bucket)
+ expect no_warn
+ {} 0.05
+
+eval instant at 50m histogram_fraction(1, 3, both_bounds_exactly_on_bucket_boundaries)
+ expect no_warn
+ {} 0.05
+
+# Fractional bucket bounds.
+load_with_nhcb 5m
+ fractional_bucket_bounds_bucket{le="0.5"} 2.5+0x10
+ fractional_bucket_bounds_bucket{le="1"} 7.5+0x10
+ fractional_bucket_bounds_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 0.5]: (0.5-0.1)/(0.5-0.0) * 2.5 = 0.8 * 2.5 = 2.0 observations.
+# - Bucket [0.5, 1.0]: (0.75-0.5)/(1.0-0.5) * (7.5-2.5) = 0.5 * 5.0 = 2.5 observations.
+# Total: (2.0 + 2.5) / 100.0 = 0.045
+
+eval instant at 50m histogram_fraction(0.1, 0.75, fractional_bucket_bounds_bucket)
+ expect no_warn
+ {} 0.045
+
+eval instant at 50m histogram_fraction(0.1, 0.75, fractional_bucket_bounds)
+ expect no_warn
+ {} 0.045
+
+# Range crosses zero.
+load_with_nhcb 5m
+ range_crosses_zero_bucket{le="-2"} 5+0x10
+ range_crosses_zero_bucket{le="-1"} 10+0x10
+ range_crosses_zero_bucket{le="0"} 15+0x10
+ range_crosses_zero_bucket{le="1"} 20+0x10
+ range_crosses_zero_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-1, 0]: 15-10 = 5.0 observations (full bucket).
+# - Bucket [0, 1]: 20-15 = 5.0 observations (full bucket).
+# Total: (5.0 + 5.0) / 100.0 = 0.1
+
+eval instant at 50m histogram_fraction(-1, 1, range_crosses_zero_bucket)
+ expect no_warn
+ {} 0.1
+
+eval instant at 50m histogram_fraction(-1, 1, range_crosses_zero)
+ expect no_warn
+ {} 0.1
+
+# Lower is NaN.
+load_with_nhcb 5m
+ lower_is_NaN_bucket{le="1"} 1+0x10
+ lower_is_NaN_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(NaN, 1, lower_is_NaN_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(NaN, 1, lower_is_NaN)
+ expect no_warn
+ {} NaN
+
+# Upper is NaN.
+load_with_nhcb 5m
+ upper_is_NaN_bucket{le="1"} 1+0x10
+ upper_is_NaN_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(0, NaN, upper_is_NaN_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(0, NaN, upper_is_NaN)
+ expect no_warn
+ {} NaN
+
+# Range entirely below all buckets.
+load_with_nhcb 5m
+ range_entirely_below_all_buckets_bucket{le="1"} 1+0x10
+ range_entirely_below_all_buckets_bucket{le="2"} 3+0x10
+ range_entirely_below_all_buckets_bucket{le="+Inf"} 10+0x10
+
+eval instant at 50m histogram_fraction(-10, -5, range_entirely_below_all_buckets_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(-10, -5, range_entirely_below_all_buckets)
+ expect no_warn
+ {} 0.0
+
+# Range entirely above all buckets.
+load_with_nhcb 5m
+ range_entirely_above_all_buckets_bucket{le="1"} 1+0x10
+ range_entirely_above_all_buckets_bucket{le="2"} 3+0x10
+ range_entirely_above_all_buckets_bucket{le="+Inf"} 10+0x10
+
+eval instant at 50m histogram_fraction(5, 10, range_entirely_above_all_buckets_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(5, 10, range_entirely_above_all_buckets)
+ expect no_warn
+ {} 0.0
+
+
# In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result.
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/info.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/info.test
new file mode 100644
index 00000000000..891e0eaa535
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/info.test
@@ -0,0 +1,152 @@
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2
+ metric_with_overlapping_label{instance="a", job="1", label="value", data="base"} 0 1 2
+ target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 1
+ build_info{instance="a", job="1", build_data="build"} 1 1 1
+
+# Include one info metric data label.
+eval range from 0m to 10m step 5m info(metric, {data=~".+"})
+ metric{data="info", instance="a", job="1", label="value"} 0 1 2
+
+# Include all info metric data labels.
+eval range from 0m to 10m step 5m info(metric)
+ metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2
+
+# Try including all info metric data labels, but non-matching identifying labels.
+eval range from 0m to 10m step 5m info(metric_not_matching_target_info)
+ metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2
+
+# Try including a certain info metric data label with a non-matching matcher not accepting empty labels.
+# Metric is ignored, due there being a data label matcher not matching empty labels,
+# and there being no info series matches.
+eval range from 0m to 10m step 5m info(metric, {non_existent=~".+"})
+
+# Include a certain info metric data label together with a non-matching matcher accepting empty labels.
+# Since the non_existent matcher matches empty labels, it's simply ignored when there's no match.
+# XXX: This case has to include a matcher not matching empty labels, due the PromQL limitation
+# that vector selectors have to contain at least one matcher not accepting empty labels.
+# We might need another construct than vector selector to get around this limitation.
+eval range from 0m to 10m step 5m info(metric, {data=~".+", non_existent=~".*"})
+ metric{data="info", instance="a", job="1", label="value"} 0 1 2
+
+# Info series data labels overlapping with those of base series are ignored.
+eval range from 0m to 10m step 5m info(metric_with_overlapping_label)
+ metric_with_overlapping_label{data="base", instance="a", job="1", label="value", another_data="another info"} 0 1 2
+
+# Include data labels from target_info specifically.
+eval range from 0m to 10m step 5m info(metric, {__name__="target_info"})
+ metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2
+
+# Try to include all data labels from a non-existent info metric.
+eval range from 0m to 10m step 5m info(metric, {__name__="non_existent"})
+ metric{instance="a", job="1", label="value"} 0 1 2
+
+# Try to include a certain data label from a non-existent info metric.
+eval range from 0m to 10m step 5m info(metric, {__name__="non_existent", data=~".+"})
+
+# Include data labels from build_info.
+eval range from 0m to 10m step 5m info(metric, {__name__="build_info"})
+ metric{instance="a", job="1", label="value", build_data="build"} 0 1 2
+
+# Include data labels from build_info and target_info.
+eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info"})
+ metric{instance="a", job="1", label="value", build_data="build", data="info", another_data="another info"} 0 1 2
+
+# Info metrics themselves are ignored when it comes to enriching with info metric data labels.
+eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info", build_data=~".+"})
+ build_info{instance="a", job="1", build_data="build"} 1 1 1
+
+clear
+
+# Overlapping target_info series.
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 _
+ target_info{instance="a", job="1", data="updated info", another_data="another info"} _ _ 1
+
+# Conflicting info series are resolved through picking the latest sample.
+eval range from 0m to 10m step 5m info(metric)
+ metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 _
+ metric{data="updated info", instance="a", job="1", label="value", another_data="another info"} _ _ 2
+
+clear
+
+# Non-overlapping target_info series.
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ target_info{instance="a", job="1", data="info"} 1 1 stale
+ target_info{instance="a", job="1", data="updated info"} _ _ 1
+
+# Include info metric data labels from a metric which data labels change over time.
+eval range from 0m to 10m step 5m info(metric)
+ metric{data="info", instance="a", job="1", label="value"} 0 1 _
+ metric{data="updated info", instance="a", job="1", label="value"} _ _ 2
+
+clear
+
+# Info series selector matches histogram series, info metrics should be float type.
+load 5m
+ metric{instance="a", job="1", label="value"} 0 1 2
+ histogram{instance="a", job="1"} {{schema:1 sum:3 count:22 buckets:[5 10 7]}}
+
+eval_fail range from 0m to 10m step 5m info(metric, {__name__="histogram"})
+
+clear
+
+# Series with skipped scrape.
+load 1m
+ metric{instance="a", job="1", label="value"} 0 _ 2 3 4
+ target_info{instance="a", job="1", data="info"} 1 _ 1 1 1
+
+# Lookback works also for the info series.
+eval range from 1m to 4m step 1m info(metric)
+ metric{data="info", instance="a", job="1", label="value"} 0 2 3 4
+
+# @ operator works also with info.
+# Note that we pick the timestamp missing a sample, lookback should pick previous sample.
+eval range from 1m to 4m step 1m info(metric @ 60)
+ metric{data="info", instance="a", job="1", label="value"} 0 0 0 0
+
+# offset operator works also with info.
+eval range from 1m to 4m step 1m info(metric offset 1m)
+ metric{data="info", instance="a", job="1", label="value"} 0 0 2 3
+
+clear
+
+# info_metric churn:
+
+load 1m
+ data_metric{instance="a", job="work"} 10 20 30
+ data_metric{instance="b", job="work"} 11 21 31
+ info_metric{instance="b", job="work", state="stopped"} 1 1 _
+ info_metric{instance="b", job="work", state="running"} _ _ 1
+ info_metric{instance="a", job="work", state="running"} 1 1 1
+
+eval range from 0 to 2m step 1m info(data_metric, {__name__="info_metric"})
+ data_metric{instance="a", job="work", state="running"} 10 20 30
+ data_metric{instance="b", job="work", state="stopped"} 11 21 _
+ data_metric{instance="b", job="work", state="running"} _ _ 31
+
+clear
+
+# data_metric churn:
+
+load 1m
+ data_metric{instance="a", job="work"} 10 20 stale
+ data_metric{instance="b", job="work"} 11 21 31
+ data_metric{instance="a", job="work", label="new"} _ _ 30
+ info_metric{instance="b", job="work", state="stopped"} 1 1 1
+ info_metric{instance="a", job="work", state="running"} 1 1 1
+
+eval range from 0 to 2m step 1m info(data_metric, {__name__="info_metric"})
+ data_metric{instance="a", job="work", state="running"} 10 20 _
+ data_metric{instance="b", job="work", state="stopped"} 11 21 31
+ data_metric{instance="a", job="work", state="running", label="new"} _ _ 30
+
+eval range from 0 to 2m step 1m info({job="work"}, {__name__="info_metric"})
+ data_metric{instance="a", job="work", state="running"} 10 20 _
+ data_metric{instance="b", job="work", state="stopped"} 11 21 31
+ data_metric{instance="a", job="work", state="running", label="new"} _ _ 30
+ info_metric{instance="b", job="work", state="stopped"} 1 1 1
+ info_metric{instance="a", job="work", state="running"} 1 1 1
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/literals.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/literals.test
index 0d866384295..5d62ac9a8ca 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/literals.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/literals.test
@@ -57,3 +57,18 @@ eval instant at 50m 0 / 0
eval instant at 50m 1 % 0
NaN
+
+eval instant at 50m ("Foo")
+ expect string `Foo`
+
+eval instant at 50m "Foo"
+ expect string "Foo"
+
+eval instant at 50m " Foo "
+ expect string " Foo "
+
+eval instant at 50m ("")
+ expect string ""
+
+eval instant at 50m ""
+ expect string ""
\ No newline at end of file
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test
index 48cdb9ba4e9..3682021ba93 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test
@@ -43,6 +43,10 @@ eval instant at 10m rate(metric_total{env="1"}[10m])
eval instant at 10m last_over_time(metric_total{env="1"}[10m])
metric_total{env="1"} 120
+# Does not drop __name__ for first_over_time function.
+eval instant at 10m first_over_time(metric_total{env="1"}[10m])
+ metric_total{env="1"} 60
+
# Drops name for other _over_time functions.
eval instant at 10m max_over_time(metric_total{env="1"}[10m])
{env="1"} 120
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test
index e38e003b3f4..fd4b1f41783 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test
@@ -101,54 +101,56 @@ clear
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0.
load 5m
- incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10
+ incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10
eval instant at 5m histogram_count(incr_histogram)
- {} 5
+ {} 5
eval instant at 5m histogram_sum(incr_histogram)
- {} 6
+ {} 6
eval instant at 5m histogram_avg(incr_histogram)
- {} 1.2
+ {} 1.2
# We expect 3/5ths of the values to fall in the range 1 < x <= 2.
eval instant at 5m histogram_fraction(1, 2, incr_histogram)
- {} 0.6
+ {} 0.6
# See explanation for exponential interpolation above.
eval instant at 5m histogram_quantile(0.5, incr_histogram)
- {} 1.414213562373095
+ {} 1.414213562373095
eval instant at 50m incr_histogram
- {__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}}
+ {__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}}
eval instant at 50m histogram_count(incr_histogram)
- {} 14
+ {} 14
eval instant at 50m histogram_sum(incr_histogram)
- {} 24
+ {} 24
eval instant at 50m histogram_avg(incr_histogram)
{} 1.7142857142857142
# We expect 12/14ths of the values to fall in the range 1 < x <= 2.
eval instant at 50m histogram_fraction(1, 2, incr_histogram)
- {} 0.8571428571428571
+ {} 0.8571428571428571
# See explanation for exponential interpolation above.
eval instant at 50m histogram_quantile(0.5, incr_histogram)
- {} 1.414213562373095
+ {} 1.414213562373095
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[10m])
+ expect no_warn
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
# See explanation for exponential interpolation above.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
- {} 1.414213562373095
+ expect no_warn
+ {} 1.414213562373095
clear
@@ -291,9 +293,11 @@ load 15s
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
eval instant at 5m rate(histogram_rate[45s])
+ expect no_warn
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
+ expect no_warn
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
clear
@@ -1044,13 +1048,16 @@ load 5m
reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}}
eval instant at 10m increase(reset_in_bucket[15m])
+ expect no_warn
{} {{count:9 sum:10.5 buckets:[1.5 3 4.5]}}
# The following two test the "fast path" where only sum and count is decoded.
eval instant at 10m histogram_count(increase(reset_in_bucket[15m]))
+ expect no_warn
{} 9
eval instant at 10m histogram_sum(increase(reset_in_bucket[15m]))
+ expect no_warn
{} 10.5
clear
@@ -1076,12 +1083,12 @@ load 30s
# Test the case where we only have two points for rate
eval instant at 30s rate(some_metric[1m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a counter: "some_metric"
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
# Test the case where we have more than two points for rate
eval instant at 1m rate(some_metric[1m30s])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a counter: "some_metric"
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
clear
@@ -1092,24 +1099,26 @@ load 30s
# Start and end with exponential, with custom in the middle.
eval instant at 1m rate(some_metric[1m30s])
- expect warn
+ expect warn msg: PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "some_metric"
# Should produce no results.
# Start and end with custom, with exponential in the middle.
eval instant at 1m30s rate(some_metric[1m30s])
- expect warn
+ expect warn msg: PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "some_metric"
# Should produce no results.
# Start with custom, end with exponential. Return the exponential histogram divided by 48.
# (The 1st sample is the NHCB with count:1. It is mostly ignored with the exception of the
# count, which means the rate calculation extrapolates until the count hits 0.)
eval instant at 1m rate(some_metric[1m])
+ expect no_warn
{} {{count:0.08333333333333333 sum:0.10416666666666666 counter_reset_hint:gauge buckets:[0.020833333333333332 0.041666666666666664 0.020833333333333332]}}
# Start with exponential, end with custom. Return the custom buckets histogram divided by 30.
# (With the 2nd sample having a count of 1, the extrapolation to zero lands exactly at the
# left boundary of the range, so no extrapolation limitation needed.)
eval instant at 30s rate(some_metric[1m])
+ expect no_warn
{} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}}
clear
@@ -1121,41 +1130,50 @@ load 1m
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
# However native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
+ expect no_warn
{} {{schema:0 sum:0 count:0}}
# Zero buckets mean no observations, thus the denominator in the average is 0
# leading to 0/0, which is NaN.
eval instant at 5m histogram_avg(rate(const_histogram[5m]))
+ expect no_warn
{} NaN
# Zero buckets mean no observations, so count is 0.
eval instant at 5m histogram_count(rate(const_histogram[5m]))
+ expect no_warn
{} 0.0
# Zero buckets mean no observations and empty histogram has a sum of 0 by definition.
eval instant at 5m histogram_sum(rate(const_histogram[5m]))
+ expect no_warn
{} 0.0
# Zero buckets mean no observations, thus the denominator in the fraction is 0,
# leading to 0/0, which is NaN.
eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m]))
+ expect no_warn
{} NaN
# Workaround to calculate the observation count corresponding to NaN fraction.
eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m]))
+ expect no_warn
{} 0.0
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m]))
+ expect no_warn
{} NaN
# Zero buckets mean no observations, so there is no standard deviation.
eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
+ expect no_warn
{} NaN
# Zero buckets mean no observations, so there is no standard variance.
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
+ expect no_warn
{} NaN
clear
@@ -1180,24 +1198,21 @@ eval range from 0 to 12m step 6m avg(metric)
clear
-# Test incompatible custom bucket schemas.
+# Test mismatched custom bucket boundaries.
load 6m
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
- metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
+ metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}}
+ metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
-# T=0: incompatible, should be ignored and emit a warning
-# T=6: compatible
-# T=12: incompatible followed by compatible, should be ignored and emit a warning
eval range from 0 to 12m step 6m sum(metric)
- expect warn
- {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
+ expect no_warn
+ {} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}} {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}}
eval range from 0 to 12m step 6m avg(metric)
- expect warn
- {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
+ expect no_warn
+ {} {{schema:-53 count:1 sum:1 custom_values:[10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}}
-# Test incompatible schemas with additional aggregation operators
+# Test mismatched boundaries with additional aggregation operators
eval range from 0 to 12m step 6m count(metric)
{} 2 2 3
@@ -1209,32 +1224,34 @@ eval range from 0 to 12m step 6m count(limitk(1, metric))
eval range from 0 to 12m step 6m limitk(3, metric)
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
- metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
+ metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}}
+ metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
eval range from 0 to 12m step 6m limit_ratio(1, metric)
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
- metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
+ metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}}
+ metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[2 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
-# Test incompatible schemas with and/or
+# Test mismatched schemas with and/or
eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"}
metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"}
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _
+ metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}} _ _
-# Test incompatible schemas with arithmetic binary operators
+# Test mismatched boundaries with arithmetic binary operators
eval range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"}
- expect warn
+ expect info msg:PromQL info: mismatched custom buckets were reconciled during addition
+ {} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}} _ {{schema:-53 count:2 sum:2 custom_values:[5] buckets:[2]}}
eval range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"}
- expect warn
+ expect info msg:PromQL info: mismatched custom buckets were reconciled during subtraction
+ {} {{schema:-53 custom_values:[10] counter_reset_hint:gauge}} _ {{schema:-53 custom_values:[5] counter_reset_hint:gauge}}
clear
-# Test incompatible schemas with comparison binary operators
+# Test mismatched boundaries with comparison binary operators
load 6m
metric1 {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
@@ -1253,16 +1270,20 @@ eval range from 0 to 6m step 6m metric2 > metric2
clear
load 6m
- nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
+ nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# If evaluating at 12m, the first two NHCBs have the same custom values
# while the 3rd one has different ones.
eval instant at 12m sum_over_time(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during aggregation
+ {} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}}
eval instant at 12m avg_over_time(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during aggregation
+ {} {{schema:-53 count:1 sum:1 custom_values:[5] counter_reset_hint:gauge buckets:[1]}}
eval instant at 12m last_over_time(nhcb_metric[13m])
expect no_warn
@@ -1281,28 +1302,31 @@ eval instant at 12m changes(nhcb_metric[13m])
{} 1
eval instant at 12m delta(nhcb_metric[13m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a gauge: "nhcb_metric"
+ {} {{schema:-53 custom_values:[5]}}
eval instant at 12m increase(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ {} {{schema:-53 custom_values:[5]}}
eval instant at 12m rate(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ {} {{schema:-53 custom_values:[5] }}
eval instant at 12m resets(nhcb_metric[13m])
expect no_warn
- {} 1
+ {} 0
# Now doing the same again, but at 18m, where the first NHCB has
-# different custom_values compared to the other two. This now
-# works with no warning for increase() and rate(). No change
-# otherwise.
+# different custom_values compared to the other two.
eval instant at 18m sum_over_time(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ {} {{schema:-53 count:3 sum:3 custom_values:[5] buckets:[3]}}
eval instant at 18m avg_over_time(nhcb_metric[13m])
- expect warn
+ expect no_warn
+ {} {{schema:-53 count:1 sum:1 custom_values:[5] buckets:[1]}}
eval instant at 18m last_over_time(nhcb_metric[13m])
expect no_warn
@@ -1321,19 +1345,21 @@ eval instant at 18m changes(nhcb_metric[13m])
{} 1
eval instant at 18m delta(nhcb_metric[13m])
- expect warn
+ expect warn msg: PromQL warning: this native histogram metric is not a gauge: "nhcb_metric"
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction
+ {} {{schema:-53 custom_values:[5]}}
eval instant at 18m increase(nhcb_metric[13m])
expect no_warn
- {} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}}
+ {} {{schema:-53 custom_values:[5]}}
eval instant at 18m rate(nhcb_metric[13m])
expect no_warn
- {} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}}
+ {} {{schema:-53 custom_values:[5]}}
eval instant at 18m resets(nhcb_metric[13m])
expect no_warn
- {} 1
+ {} 0
clear
@@ -1348,14 +1374,15 @@ load 1m
metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
- metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
+ metric{group="mismatched-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
+ metric{group="mismatched-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[10] buckets:[1]}}
eval instant at 0 sum by (group) (metric)
expect warn
{group="just-floats"} 5
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}
+ {group="mismatched-custom-histograms"} {{schema:-53 count:2 sum:2 custom_values:[10] buckets:[2]}}
clear
@@ -1485,6 +1512,7 @@ load 1m
# Note that the 2nd bucket has an exaggerated increase of 2479.939393939394 (although
# it has a value of only 2475 at the end of the range).
eval instant at 55m increase(metric[90m])
+ expect no_warn
{type="histogram"} {{count:2490 sum:50.303030303030305 counter_reset_hint:gauge buckets:[10.06060606060606 2479.939393939394]}}
{type="counter"} 2490
@@ -1492,15 +1520,17 @@ eval instant at 55m increase(metric[90m])
# The 2nd bucket again has an exaggerated increase, but it is less obvious because of the
# right-side extrapolation.
eval instant at 54m30s increase(metric[90m])
+ expect no_warn
{type="histogram"} {{count:2512.9166666666665 sum:50.76599326599326 counter_reset_hint:gauge buckets:[10.153198653198652 2502.7634680134674]}}
{type="counter"} 2512.9166666666665
-
+
# End of range coincides with sample. Zero point of count is reached outside of (i.e. before) the range.
# This means no change of extrapolation is required for the histogram count (and neither for the float counter),
# however, the 2nd bucket's extrapolation will reach zero within the range. The overestimation is visible
# easily here because the last sample in the range coincides with the boundary, where the 2nd bucket has
# a value of 2475 but has increased by 2476.2045454545455 according to the returned result.
eval instant at 55m increase(metric[55m15s])
+ expect no_warn
{type="histogram"} {{count:2486.25 sum:50.227272727272734 counter_reset_hint:gauge buckets:[10.045454545454547 2476.2045454545455]}}
{type="counter"} 2486.25
@@ -1508,18 +1538,337 @@ eval instant at 55m increase(metric[55m15s])
# This means no change of extrapolation is required for the histogram count (and neither for the float counter),
# however, the 2nd bucket's extrapolation will reach zero within the range.
eval instant at 54m30s increase(metric[54m45s])
+ expect no_warn
{type="histogram"} {{count:2509.375 sum:50.69444444444444 counter_reset_hint:gauge buckets:[10.13888888888889 2499.236111111111]}}
{type="counter"} 2509.375
# Try the same, but now extract just the histogram count via `histogram_count`.
eval instant at 55m histogram_count(increase(metric[90m]))
+ expect no_warn
{type="histogram"} 2490
eval instant at 54m30s histogram_count(increase(metric[90m]))
+ expect no_warn
{type="histogram"} 2512.9166666666665
eval instant at 55m histogram_count(increase(metric[55m15s]))
+ expect no_warn
{type="histogram"} 2486.25
eval instant at 54m30s histogram_count(increase(metric[54m45s]))
+ expect no_warn
{type="histogram"} 2509.375
+
+clear
+
+# Test counter reset hint adjustment in subtraction and aggregation, including _over_time.
+load 5m
+ metric{id="1"} {{schema:0 sum:4 count:4 buckets:[1 2 1]}}x10
+ metric{id="2"} {{schema:0 sum:4 count:4 buckets:[1 2 1]}}x10
+
+# Unary minus turns counters into gauges.
+eval instant at 5m -metric
+ expect no_warn
+ expect no_info
+ {id="1"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}}
+ {id="2"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}}
+
+# Subtraction results in gauges, even if the result is not negative.
+eval instant at 5m metric - 0.5 * metric
+ expect no_warn
+ expect no_info
+ {id="1"} {{count:2 sum:2 counter_reset_hint:gauge buckets:[0.5 1 0.5]}}
+ {id="2"} {{count:2 sum:2 counter_reset_hint:gauge buckets:[0.5 1 0.5]}}
+
+# Subtraction results in gauges, now with actually negative result.
+eval instant at 5m metric - 2 * metric
+ expect no_warn
+ expect no_info
+ {id="1"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}}
+ {id="2"} {{count:-4 sum:-4 counter_reset_hint:gauge buckets:[-1 -2 -1]}}
+
+# sum and avg of counters yield a counter.
+eval instant at 5m sum(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:8 sum:8 counter_reset_hint:not_reset buckets:[2 4 2]}}
+
+eval instant at 5m avg(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:4 sum:4 counter_reset_hint:not_reset buckets:[1 2 1]}}
+
+clear
+
+# Note that with all the series below, we never get counter_reset_hint:reset
+# as a result because of of https://github.com/prometheus/prometheus/issues/15346 .
+# Therefore, all the tests only look at the hints gauge, not_reset, and unknown.
+load 1m
+ metric{type="gauge"} {{sum:4 count:4 counter_reset_hint:gauge buckets:[1 2 1]}}+{{sum:2 count:3 counter_reset_hint:gauge buckets:[1 1 1]}}x10
+ metric{type="counter"} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x10
+ metric{type="counter_with_reset"} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5 {{sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5
+ mixed {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x4 {{sum:4 count:4 counter_reset_hint:gauge buckets:[1 2 1]}} {{sum:6 count:5 buckets:[2 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x4 {{sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:3 buckets:[1 1 1]}}x5
+
+# Mix of gauge and not_reset results in gauge.
+eval instant at 3m sum(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:41 sum:34 counter_reset_hint:gauge buckets:[14 15 12]}}
+
+eval instant at 3m avg(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:13.666666666666668 sum:11.333333333333334 counter_reset_hint:gauge buckets:[4.666666666666667 5 4]}}
+
+eval instant at 5m sum_over_time(mixed[3m])
+ expect no_warn
+ expect no_info
+ {} {{count:35 sum:30 counter_reset_hint:gauge buckets:[12 13 10]}}
+
+eval instant at 5m avg_over_time(mixed[3m])
+ expect no_warn
+ expect no_info
+ {} {{count:11.666666666666666 sum:10 counter_reset_hint:gauge buckets:[4 4.333333333333334 3.333333333333333]}}
+
+# Mix of gauge, not_reset, and unknown results in gauge.
+eval instant at 6m sum(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:49 sum:38 counter_reset_hint:gauge buckets:[16 18 15]}}
+
+eval instant at 6m avg(metric)
+ expect no_warn
+ expect no_info
+ {} {{count:16.333333333333332 sum:12.666666666666666 counter_reset_hint:gauge buckets:[5.333333333333334 6 5]}}
+
+eval instant at 14m sum_over_time(mixed[10m])
+ expect no_warn
+ expect no_info
+ {} {{count:93 sum:82 counter_reset_hint:gauge buckets:[31 36 26]}}
+
+eval instant at 14m avg_over_time(mixed[10m])
+ expect no_warn
+ expect no_info
+ {} {{count:9.3 sum:8.2 counter_reset_hint:gauge buckets:[3.1 3.6 2.6]}}
+
+# Only not_reset results in not_reset.
+eval instant at 3m sum(metric{type=~"counter.*"})
+ expect no_warn
+ expect no_info
+ {} {{count:28 sum:24 counter_reset_hint:not_reset buckets:[10 10 8]}}
+
+eval instant at 3m avg(metric{type=~"counter.*"})
+ expect no_warn
+ expect no_info
+ {} {{count:14 sum:12 counter_reset_hint:not_reset buckets:[5 5 4]}}
+
+eval instant at 3m sum_over_time(mixed[3m])
+ expect no_warn
+ expect no_info
+ {} {{count:33 sum:30 counter_reset_hint:not_reset buckets:[12 12 9]}}
+
+eval instant at 3m avg_over_time(mixed[3m])
+ expect no_warn
+ expect no_info
+ {} {{count:11 sum:10 counter_reset_hint:not_reset buckets:[4 4 3]}}
+
+# Mix of not_reset and unknown results in unknown.
+eval instant at 6m sum(metric{type=~"counter.*"})
+ expect no_warn
+ expect no_info
+ {} {{count:27 sum:22 counter_reset_hint:unknown buckets:[9 10 8]}}
+
+eval instant at 6m avg(metric{type=~"counter.*"})
+ expect no_warn
+ expect no_info
+ {} {{count:13.5 sum:11 counter_reset_hint:unknown buckets:[4.5 5 4]}}
+
+eval instant at 15m sum_over_time(mixed[10m])
+ expect no_warn
+ expect no_info
+ {} {{count:105 sum:90 counter_reset_hint:unknown buckets:[35 40 30]}}
+
+eval instant at 15m avg_over_time(mixed[10m])
+ expect no_warn
+ expect no_info
+ {} {{count:10.5 sum:9 counter_reset_hint:unknown buckets:[3.5 4 3]}}
+
+# To finally test the warning about a direct counter reset collisions, we can
+# utilize the HistogramStatsIterator (by calling histogram_count()). This
+# special iterator does counter reset detection on the fly and therefore
+# is able to create the counter reset hint "reset", which we can then mix
+# with the "not_reset" hint in the test and provoke the warning.
+eval instant at 6m histogram_count(sum(metric))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 49
+
+eval instant at 6m histogram_count(avg(metric))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 16.333333333333332
+
+eval instant at 14m histogram_count(sum_over_time(mixed[10m]))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 93
+
+eval instant at 14m histogram_count(avg_over_time(mixed[10m]))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 9.3
+
+# In the following two tests, the first sample has hint "not_reset"
+# and the second has "reset". This tests if the conflict is detected
+# between the first two samples, too.
+eval instant at 11m histogram_count(sum_over_time(mixed[2m]))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 21
+
+eval instant at 11m histogram_count(avg_over_time(mixed[2m]))
+ expect warn msg:PromQL warning: conflicting counter resets during histogram aggregation
+ expect no_info
+ {} 10.5
+
+# Test histogram_quantile annotations.
+load 1m
+ nonmonotonic_bucket{le="0.1"} 0+2x10
+ nonmonotonic_bucket{le="1"} 0+1x10
+ nonmonotonic_bucket{le="10"} 0+5x10
+ nonmonotonic_bucket{le="100"} 0+4x10
+ nonmonotonic_bucket{le="1000"} 0+9x10
+ nonmonotonic_bucket{le="+Inf"} 0+8x10
+ myHistogram1{abe="0.1"} 0+2x10
+ myHistogram2{le="Hello World"} 0+2x10
+ mixedHistogram{le="0.1"} 0+2x10
+ mixedHistogram{le="1"} 0+3x10
+ mixedHistogram{} {{schema:0 count:10 sum:50 buckets:[1 2 3]}}
+
+eval instant at 1m histogram_quantile(0.5, nonmonotonic_bucket)
+ expect info msg: PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "nonmonotonic_bucket"
+ {} 8.5
+
+eval instant at 1m histogram_quantile(0.5, myHistogram1)
+ expect warn msg: PromQL warning: bucket label "le" is missing or has a malformed value of "" for metric name "myHistogram1"
+
+eval instant at 1m histogram_quantile(0.5, myHistogram2)
+ expect warn msg: PromQL warning: bucket label "le" is missing or has a malformed value of "Hello World" for metric name "myHistogram2"
+
+eval instant at 1m histogram_quantile(0.5, mixedHistogram)
+ expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram"
+
+clear
+
+# A counter reset only in a bucket. Sub-queries still need to detect
+# it via explicit counter reset detection. This test also runs it with
+# histogram_count in the expression to make sure that the
+# HistogramStatsIterator is not used. (The latter fails to correctly
+# do the counter resets because Seek is used with sub-queries. And the
+# explicit counter reset detection done with sub-queries cannot access
+# the buckets anymore, if HistogramStatsIterator is used.)
+load 1m
+ h{} {{schema:0 count:1 sum:10 buckets:[1]}}+{{}}x20 {{schema:0 count:1 sum:10 buckets:[0 1]}}+{{}}x20
+
+# Both evals below should yield the same value for the count.
+eval instant at 41m histogram_count(increase(h[40m:9m]))
+ {} 1.4814814814814814
+
+eval instant at 41m increase(h[40m:9m])
+ {} {{count:1.4814814814814814 sum:14.814814814814813 counter_reset_hint:gauge offset:1 buckets:[1.4814814814814814]}}
+
+clear
+
+load 1m
+ reset{timing="late"} {{schema:0 sum:1 count:0 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:3 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}}
+ reset{timing="early"} {{schema:0 sum:1 count:3 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:2 buckets:[1 1 1]}} {{schema:0 sum:1 count:3 buckets:[1 1 1]}}
+
+# Trigger an annotation about conflicting counter resets by going through the
+# HistogramStatsIterator, which creates counter reset hints on the fly.
+eval instant at 5m histogram_count(sum_over_time(reset{timing="late"}[5m]))
+ expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation
+ {timing="late"} 7
+
+eval instant at 5m histogram_count(sum(reset))
+ expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation
+ {} 5
+
+eval instant at 5m histogram_count(avg(reset))
+ expect warn msg: PromQL warning: conflicting counter resets during histogram aggregation
+ {} 2.5
+
+# No annotation with the right timing.
+eval instant at 30s histogram_count(sum(reset))
+ expect no_warn
+ {} 3
+
+eval instant at 30s histogram_count(avg(reset))
+ expect no_warn
+ {} 1.5
+
+# Ensure that the annotation does not happen with rate.
+eval instant at 5m histogram_count(rate(reset{timing="late"}[5m]))
+ expect no_warn
+ {timing="late"} 0.0175
+
+clear
+
+# Test edge cases of HistogramStatsIterator detection.
+# We access the same series multiple times within the same expression,
+# once with and once without HistogramStatsIterator. The results here
+# at least prove that we do not use HistogramStatsIterator where we
+# should not.
+load 1m
+ histogram {{schema:0 count:10 sum:50 counter_reset_hint:gauge buckets:[1 2 3 4]}}
+
+eval instant at 1m histogram_count(histogram unless histogram_quantile(0.5, histogram) < 3)
+ {} 10
+
+eval instant at 1m histogram_quantile(0.5, histogram unless histogram_count(histogram) == 0)
+ {} 3.1748021039363987
+
+clear
+
+# Regression test for:
+# https://github.com/prometheus/prometheus/issues/14172
+# https://github.com/prometheus/prometheus/issues/15177
+load 1m
+ mixed_metric1 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
+ mixed_metric2 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}}
+
+# The order of the float vs native histograms is preserved.
+eval range from 0 to 8m step 1m mixed_metric1
+ mixed_metric1{} 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} 4 5 {{schema:0 sum:18 count:10 buckets:[3 4 3]}} {{schema:0 sum:18 count:10 buckets:[3 4 3]}}
+
+eval range from 0 to 5m step 1m mixed_metric2
+ mixed_metric2 1 2 3 {{count:4 sum:5 buckets:[1 2 1]}} {{count:6 sum:8 buckets:[1 4 1]}} {{count:6 sum:8 buckets:[1 4 1]}}
+
+clear
+
+# Test native histograms with custom buckets, reconciling mismatched bounds.
+load 1m
+ nhcb_add_buckets {{schema:-53 sum:55 count:15 custom_values:[2 4 6] buckets:[1 2 5 7]}} {{schema:-53 sum:555 count:450 custom_values:[1 2 3 4 5 6 7 8] buckets:[10 20 30 40 50 60 70 80 90]}}
+
+eval instant at 1m irate(nhcb_add_buckets[2m]) * 60
+ expect no_warn
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction
+ {} {{schema:-53 sum:500 count:435 custom_values:[2 4 6] buckets:[29 68 105 233]}}
+
+load 1m
+ nhcb_remove_buckets {{schema:-53 sum:55 count:45 custom_values:[1 2 3 4 5 6 7 8] buckets:[1 2 3 4 5 6 7 8 9]}} {{schema:-53 sum:5560 count:1000 custom_values:[3 5 7] buckets:[100 200 300 400]}}
+
+eval instant at 1m irate(nhcb_remove_buckets[2m]) * 60
+ expect no_warn
+ expect info msg: PromQL info: mismatched custom buckets were reconciled during subtraction
+ {} {{schema:-53 sum:5505 count:955 custom_values:[3 5 7] buckets:[94 191 287 383]}}
+
+clear
+
+# Test native histograms with custom buckets, reconciling mismatched bounds, with counter reset in one bucket.
+load 1m
+ nhcb_add_bucket {{schema:-53 sum:55 count:15 custom_values:[2 4 6] buckets:[1 2 5 7]}} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8]}}
+
+eval instant at 1m irate(nhcb_add_bucket[2m]) * 60
+ expect no_warn
+ expect no_info
+ {} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}}
diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test
index 3bfe2ce4cb3..35a2f1b27e9 100644
--- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test
+++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test
@@ -71,3 +71,37 @@ eval range from 0 to 2m step 1m requests * 2
{job="1", __address__="bar"} 200 200 200
clear
+
+load 10s
+ some_metric{env="a"} 1+1x5
+ some_metric{env="b"} 2+2x5
+
+# Return a range vector - note the use of the expect range vector directive which defines expected range
+eval instant at 1m some_metric[1m]
+ expect range vector from 10s to 1m step 10s
+ some_metric{env="a"} 2 3 4 5 6
+ some_metric{env="b"} 4 6 8 10 12
+
+clear
+
+load 1m
+ some_metric{env="1"} 0+1x4
+ some_metric{env="2"} 0+2x4
+ some_metric{env="3"} {{count:0}}+{{count:1}}x4
+ some_metric_with_stale_marker 0 1 stale 3
+
+eval instant at 2m some_metric[2m]
+ expect range vector from 1m to 2m step 60s
+ some_metric{env="1"} 1 2
+ some_metric{env="2"} 2 4
+ some_metric{env="3"} {{count:1 counter_reset_hint:not_reset}} {{count:2 counter_reset_hint:not_reset}}
+
+eval instant at 3m some_metric_with_stale_marker[3m]
+ expect range vector from 1m to 3m step 60s
+ some_metric_with_stale_marker{} 1 _ 3
+
+eval instant at 1m some_nonexistent_metric[1m]
+ expect range vector from 10s to 1m step 10s
+
+eval instant at 10m some_metric[1m]
+ expect range vector from 9m10s to 10m step 1m
\ No newline at end of file
diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go
index 14549741078..78df925c519 100644
--- a/vendor/github.com/prometheus/prometheus/promql/quantile.go
+++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go
@@ -406,6 +406,18 @@ func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram, metric
// consistent with the linear interpolation known from classic
// histograms. It is also used for the zero bucket.
interpolateLinearly := func(v float64) float64 {
+ // Note: `v` is a finite value.
+ // For buckets with infinite bounds, we cannot interpolate meaningfully.
+ // For +Inf upper bound, interpolation returns the cumulative count of the previous bucket
+ // as the second term in the interpolation formula yields 0 (finite/Inf).
+ // In other words, no observations from the last bucket are considered in the fraction calculation.
+ // For -Inf lower bound, however, the second term would be (v-(-Inf))/(upperBound-(-Inf)) = Inf/Inf = NaN.
+ // To achieve the same effect of no contribution as the +Inf bucket, handle the -Inf case by returning
+ // the cumulative count at the first bucket (which equals the bucket's count).
+ // In both cases, we effectively skip interpolation within the infinite-width bucket.
+ if b.Lower == math.Inf(-1) {
+ return b.Count
+ }
return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower)
}
@@ -531,14 +543,34 @@ func BucketFraction(lower, upper float64, buckets Buckets) float64 {
rank, lowerRank, upperRank float64
lowerSet, upperSet bool
)
+
+ // If the upper bound of the first bucket is greater than 0, we assume
+ // we are dealing with positive buckets only and lowerBound for the
+ // first bucket is set to 0; otherwise it is set to -Inf.
+ lowerBound := 0.0
+ if buckets[0].UpperBound <= 0 {
+ lowerBound = math.Inf(-1)
+ }
+
for i, b := range buckets {
- lowerBound := math.Inf(-1)
if i > 0 {
lowerBound = buckets[i-1].UpperBound
}
upperBound := b.UpperBound
interpolateLinearly := func(v float64) float64 {
+ // Note: `v` is a finite value.
+ // For buckets with infinite bounds, we cannot interpolate meaningfully.
+ // For +Inf upper bound, interpolation returns the cumulative count of the previous bucket
+ // as the second term in the interpolation formula yields 0 (finite/Inf).
+ // In other words, no observations from the last bucket are considered in the fraction calculation.
+ // For -Inf lower bound, however, the second term would be (v-(-Inf))/(upperBound-(-Inf)) = Inf/Inf = NaN.
+ // To achieve the same effect of no contribution as the +Inf bucket, handle the -Inf case by returning
+ // the cumulative count at the first bucket.
+ // In both cases, we effectively skip interpolation within the infinite-width bucket.
+ if lowerBound == math.Inf(-1) {
+ return b.Count
+ }
return rank + (b.Count-rank)*(v-lowerBound)/(upperBound-lowerBound)
}
diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go
index c0a70b66d77..5923223aa0d 100644
--- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go
+++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go
@@ -195,7 +195,7 @@ func newJSONEntry(query string, logger *slog.Logger) []byte {
}
func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) {
- for i := 0; i < maxConcurrent; i++ {
+ for i := range maxConcurrent {
tracker.getNextIndex <- 1 + (i * entrySize)
}
}
diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go
index 2e387117e51..b909085b177 100644
--- a/vendor/github.com/prometheus/prometheus/promql/value.go
+++ b/vendor/github.com/prometheus/prometheus/promql/value.go
@@ -45,7 +45,7 @@ func (s String) String() string {
}
func (s String) MarshalJSON() ([]byte, error) {
- return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V})
+ return json.Marshal([...]any{float64(s.T) / 1000, s.V})
}
// Scalar is a data point that's explicitly not associated with a metric.
@@ -61,7 +61,7 @@ func (s Scalar) String() string {
func (s Scalar) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(s.V, 'f', -1, 64)
- return json.Marshal([...]interface{}{float64(s.T) / 1000, v})
+ return json.Marshal([...]any{float64(s.T) / 1000, v})
}
// Series is a stream of data points belonging to a metric.
@@ -111,7 +111,7 @@ func (p FPoint) String() string {
// timestamp.
func (p FPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.F, 'f', -1, 64)
- return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
+ return json.Marshal([...]any{float64(p.T) / 1000, v})
}
// HPoint represents a single histogram data point for a given timestamp.
@@ -136,9 +136,9 @@ func (p HPoint) String() string {
// timestamp.
func (p HPoint) MarshalJSON() ([]byte, error) {
h := struct {
- Count string `json:"count"`
- Sum string `json:"sum"`
- Buckets [][]interface{} `json:"buckets,omitempty"`
+ Count string `json:"count"`
+ Sum string `json:"sum"`
+ Buckets [][]any `json:"buckets,omitempty"`
}{
Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64),
Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64),
@@ -161,7 +161,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
boundaries = 0 // Inclusive only on upper end AKA left open.
}
}
- bucketToMarshal := []interface{}{
+ bucketToMarshal := []any{
boundaries,
strconv.FormatFloat(bucket.Lower, 'f', -1, 64),
strconv.FormatFloat(bucket.Upper, 'f', -1, 64),
@@ -169,7 +169,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
}
h.Buckets = append(h.Buckets, bucketToMarshal)
}
- return json.Marshal([...]interface{}{float64(p.T) / 1000, h})
+ return json.Marshal([...]any{float64(p.T) / 1000, h})
}
// size returns the size of the HPoint compared to the size of an FPoint.
@@ -471,7 +471,7 @@ func (ssi *storageSeriesIterator) At() (t int64, v float64) {
return ssi.currT, ssi.currF
}
-func (ssi *storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+func (*storageSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic(errors.New("storageSeriesIterator: AtHistogram not supported"))
}
@@ -535,7 +535,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
}
}
-func (ssi *storageSeriesIterator) Err() error {
+func (*storageSeriesIterator) Err() error {
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go
index 77d53395e09..b0151d7cb38 100644
--- a/vendor/github.com/prometheus/prometheus/rules/alerting.go
+++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go
@@ -25,7 +25,7 @@ import (
"github.com/prometheus/common/model"
"go.uber.org/atomic"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
@@ -50,8 +50,10 @@ const (
type AlertState int
const (
+ // StateUnknown is the state of an alert that has not yet been evaluated.
+ StateUnknown AlertState = iota
// StateInactive is the state of an alert that is neither firing nor pending.
- StateInactive AlertState = iota
+ StateInactive
// StatePending is the state of an alert that has been active for less than
// the configured threshold duration.
StatePending
@@ -62,6 +64,8 @@ const (
func (s AlertState) String() string {
switch s {
+ case StateUnknown:
+ return "unknown"
case StateInactive:
return "inactive"
case StatePending:
@@ -530,10 +534,14 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
}
// State returns the maximum state of alert instances for this rule.
-// StateFiring > StatePending > StateInactive.
+// StateFiring > StatePending > StateInactive > StateUnknown.
func (r *AlertingRule) State() AlertState {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
+ // Check if the rule has been evaluated
+ if r.evaluationTimestamp.Load().IsZero() {
+ return StateUnknown
+ }
maxState := StateInactive
for _, a := range r.active {
@@ -596,10 +604,7 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay
if alert.needsSending(ts, resendDelay) {
alert.LastSentAt = ts
// Allow for two Eval or Alertmanager send failures.
- delta := resendDelay
- if interval > resendDelay {
- delta = interval
- }
+ delta := max(interval, resendDelay)
alert.ValidUntil = ts.Add(4 * delta)
anew := *alert
// The notifier re-uses the labels slice, hence make a copy.
diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go
index ed727ff9838..8cedcd40d16 100644
--- a/vendor/github.com/prometheus/prometheus/rules/group.go
+++ b/vendor/github.com/prometheus/prometheus/rules/group.go
@@ -17,6 +17,7 @@ import (
"context"
"errors"
"log/slog"
+ "maps"
"math"
"slices"
"strings"
@@ -215,7 +216,7 @@ func (g *Group) run(ctx context.Context) {
return
}
- ctx = promql.NewOriginContext(ctx, map[string]interface{}{
+ ctx = promql.NewOriginContext(ctx, map[string]any{
"ruleGroup": map[string]string{
"file": g.File(),
"name": g.Name(),
@@ -482,9 +483,7 @@ func (g *Group) CopyState(from *Group) {
continue
}
- for fp, a := range far.active {
- ar.active[fp] = a
- }
+ maps.Copy(ar.active, far.active)
}
// Handle deleted and unmatched duplicate rules.
@@ -790,7 +789,7 @@ func (g *Group) RestoreForState(ts time.Time) {
// While not technically the same number of series we expect, it's as good of an approximation as any.
seriesByLabels := make(map[string]storage.Series, alertRule.ActiveAlertsCount())
for sset.Next() {
- seriesByLabels[sset.At().Labels().DropMetricName().String()] = sset.At()
+ seriesByLabels[sset.At().Labels().DropReserved(func(n string) bool { return n == labels.MetricName }).String()] = sset.At()
}
// No results for this alert rule.
@@ -1092,13 +1091,12 @@ func (m dependencyMap) isIndependent(r Rule) bool {
// buildDependencyMap builds a data-structure which contains the relationships between rules within a group.
//
-// Alert rules, by definition, cannot have any dependents - but they can have dependencies. Any recording rule on whose
-// output an Alert rule depends will not be able to run concurrently.
+// Both Alert and RecordingRule can have dependents and dependencies. Alert can have dependents if another rule,
+// with in the group, queries ALERTS or ALERTS_FOR_NAME metrics.
//
// There is a class of rule expressions which are considered "indeterminate", because either relationships cannot be
// inferred, or concurrent evaluation of rules depending on these series would produce undefined/unexpected behaviour:
// - wildcard queriers like {cluster="prod1"} which would match every series with that label selector
-// - any "meta" series (series produced by Prometheus itself) like ALERTS, ALERTS_FOR_STATE
//
// Rules which are independent can run concurrently with no side-effects.
func buildDependencyMap(rules []Rule) dependencyMap {
@@ -1138,22 +1136,43 @@ func buildDependencyMap(rules []Rule) dependencyMap {
return nil
}
- // Rules which depend on "meta-metrics" like ALERTS and ALERTS_FOR_STATE will have undefined behaviour
- // if they run concurrently.
- if nameMatcher.Matches(alertMetricName) || nameMatcher.Matches(alertForStateMetricName) {
- indeterminate = true
- return nil
+ // Check if the vector selector is querying "meta-metrics" like ALERTS and ALERTS_FOR_STATE and, if so,
+ // find out the "alertname" label matcher (it could be missing).
+ nameMatchesAlerts := nameMatcher.Matches(alertMetricName) || nameMatcher.Matches(alertForStateMetricName)
+ var alertsNameMatcher *labels.Matcher
+ if nameMatchesAlerts {
+ for _, m := range n.LabelMatchers {
+ if m.Name == labels.AlertName {
+ alertsNameMatcher = m
+ break
+ }
+ }
}
- // Find rules which depend on the output of this rule.
+ // Find the other rules that this rule depends on.
for _, other := range rules {
+ // Rules are defined in order in a rule group. Once we find our rule we can stop searching
+ // because next rules can't be considered dependencies of this rule by specification, given
+ // they are defined later in the group. The next rules can still query this rule, but they're
+ // just not strict dependencies to honor.
if other == rule {
- continue
+ break
}
otherName := other.Name()
+
+ // If this rule vector selector matches the other rule name, then it's a dependency.
if nameMatcher.Matches(otherName) {
dependencies[other] = append(dependencies[other], rule)
+ continue
+ }
+
+ // If this rule vector selector is querying the alerts meta-metrics and the other rule
+ // is an alerting rule, then we check if the "alertname" matches. If it does, then it's a dependency.
+ if _, otherIsAlertingRule := other.(*AlertingRule); nameMatchesAlerts && otherIsAlertingRule {
+ if alertsNameMatcher == nil || alertsNameMatcher.Matches(otherName) {
+ dependencies[other] = append(dependencies[other], rule)
+ }
}
}
}
diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go
index 7cbe3ce15ab..d2fb0a77974 100644
--- a/vendor/github.com/prometheus/prometheus/rules/manager.go
+++ b/vendor/github.com/prometheus/prometheus/rules/manager.go
@@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"log/slog"
+ maps0 "maps"
"net/url"
"path/filepath"
"slices"
@@ -26,6 +27,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"golang.org/x/sync/semaphore"
@@ -107,6 +109,7 @@ type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
// ManagerOptions bundles options for the Manager.
type ManagerOptions struct {
+ NameValidationScheme model.ValidationScheme
ExternalURL *url.URL
QueryFunc QueryFunc
NotifyFunc NotifyFunc
@@ -135,6 +138,17 @@ type ManagerOptions struct {
// NewManager returns an implementation of Manager, ready to be started
// by calling the Run method.
func NewManager(o *ManagerOptions) *Manager {
+ switch o.NameValidationScheme {
+ case model.UTF8Validation, model.LegacyValidation:
+ case model.UnsetValidation:
+ o.NameValidationScheme = model.UTF8Validation
+ default:
+ panic(fmt.Errorf("unrecognized name validation scheme: %s", o.NameValidationScheme))
+ }
+ if o.Context == nil {
+ o.Context = context.Background()
+ }
+
if o.Metrics == nil {
o.Metrics = NewGroupMetrics(o.Registerer)
}
@@ -289,7 +303,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
type GroupLoader interface {
- Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error)
+ Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error)
Parse(query string) (parser.Expr, error)
}
@@ -297,8 +311,8 @@ type GroupLoader interface {
// and parser.ParseExpr.
type FileLoader struct{}
-func (FileLoader) Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) {
- return rulefmt.ParseFile(identifier, ignoreUnknownFields)
+func (FileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) {
+ return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme)
}
func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
@@ -312,7 +326,7 @@ func (m *Manager) LoadGroups(
shouldRestore := !m.restored || m.restoreNewRuleGroups
for _, fn := range filenames {
- rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields)
+ rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields, m.opts.NameValidationScheme)
if errs != nil {
return nil, errs
}
@@ -465,7 +479,7 @@ type RuleDependencyController interface {
type ruleDependencyController struct{}
// AnalyseRules implements RuleDependencyController.
-func (c ruleDependencyController) AnalyseRules(rules []Rule) {
+func (ruleDependencyController) AnalyseRules(rules []Rule) {
depMap := buildDependencyMap(rules)
if depMap == nil {
@@ -509,11 +523,11 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle
}
}
-func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
+func (c *concurrentRuleEvalController) Allow(context.Context, *Group, Rule) bool {
return c.sema.TryAcquire(1)
}
-func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
+func (*concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
// Using the rule dependency controller information (rules being identified as having no dependencies or no dependants),
// we can safely run the following concurrent groups:
// 1. Concurrently, all rules that have no dependencies
@@ -549,7 +563,7 @@ func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context,
return order
}
-func (c *concurrentRuleEvalController) Done(_ context.Context) {
+func (c *concurrentRuleEvalController) Done(context.Context) {
c.sema.Release(1)
}
@@ -558,31 +572,29 @@ var _ RuleConcurrencyController = &sequentialRuleEvalController{}
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
type sequentialRuleEvalController struct{}
-func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
+func (sequentialRuleEvalController) Allow(context.Context, *Group, Rule) bool {
return false
}
-func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, _ *Group) []ConcurrentRules {
+func (sequentialRuleEvalController) SplitGroupIntoBatches(context.Context, *Group) []ConcurrentRules {
return nil
}
-func (c sequentialRuleEvalController) Done(_ context.Context) {}
+func (sequentialRuleEvalController) Done(context.Context) {}
// FromMaps returns new sorted Labels from the given maps, overriding each other in order.
func FromMaps(maps ...map[string]string) labels.Labels {
mLables := make(map[string]string)
for _, m := range maps {
- for k, v := range m {
- mLables[k] = v
- }
+ maps0.Copy(mLables, m)
}
return labels.FromMap(mLables)
}
// ParseFiles parses the rule files corresponding to glob patterns.
-func ParseFiles(patterns []string) error {
+func ParseFiles(patterns []string, nameValidationScheme model.ValidationScheme) error {
files := map[string]string{}
for _, pat := range patterns {
fns, err := filepath.Glob(pat)
@@ -602,7 +614,7 @@ func ParseFiles(patterns []string) error {
}
}
for fn, pat := range files {
- _, errs := rulefmt.ParseFile(fn, false)
+ _, errs := rulefmt.ParseFile(fn, false, nameValidationScheme)
if len(errs) > 0 {
return fmt.Errorf("parse rules from file %q (pattern: %q): %w", fn, pat, errors.Join(errs...))
}
diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go
index 3b6db210af1..2da6885f5b9 100644
--- a/vendor/github.com/prometheus/prometheus/rules/recording.go
+++ b/vendor/github.com/prometheus/prometheus/rules/recording.go
@@ -22,7 +22,7 @@ import (
"time"
"go.uber.org/atomic"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
diff --git a/vendor/github.com/prometheus/prometheus/schema/labels.go b/vendor/github.com/prometheus/prometheus/schema/labels.go
index c68121322b8..6df74451711 100644
--- a/vendor/github.com/prometheus/prometheus/schema/labels.go
+++ b/vendor/github.com/prometheus/prometheus/schema/labels.go
@@ -139,18 +139,22 @@ func (m Metadata) SetToLabels(b *labels.Builder) {
b.Set(metricUnit, m.Unit)
}
-// IgnoreOverriddenMetadataLabelsScratchBuilder is a wrapper over labels scratch builder
+// NewIgnoreOverriddenMetadataLabelScratchBuilder creates IgnoreOverriddenMetadataLabelScratchBuilder.
+func (m Metadata) NewIgnoreOverriddenMetadataLabelScratchBuilder(b *labels.ScratchBuilder) *IgnoreOverriddenMetadataLabelScratchBuilder {
+ return &IgnoreOverriddenMetadataLabelScratchBuilder{ScratchBuilder: b, overwrite: m}
+}
+
+// IgnoreOverriddenMetadataLabelScratchBuilder is a wrapper over labels.ScratchBuilder
// that ignores label additions that would collide with non-empty Overwrite Metadata fields.
-type IgnoreOverriddenMetadataLabelsScratchBuilder struct {
+type IgnoreOverriddenMetadataLabelScratchBuilder struct {
*labels.ScratchBuilder
-
- Overwrite Metadata
+ overwrite Metadata
}
// Add a name/value pair, unless it would collide with the non-empty Overwrite Metadata
// field. Note if you Add the same name twice you will get a duplicate label, which is invalid.
-func (b IgnoreOverriddenMetadataLabelsScratchBuilder) Add(name, value string) {
- if !b.Overwrite.IsEmptyFor(name) {
+func (b IgnoreOverriddenMetadataLabelScratchBuilder) Add(name, value string) {
+ if !b.overwrite.IsEmptyFor(name) {
return
}
b.ScratchBuilder.Add(name, value)
diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go
index c2da4558588..7389f24b523 100644
--- a/vendor/github.com/prometheus/prometheus/scrape/manager.go
+++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go
@@ -62,7 +62,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
graceShut: make(chan struct{}),
triggerReload: make(chan struct{}, 1),
metrics: sm,
- buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }),
+ buffers: pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
}
m.metrics.setTargetMetadataCacheGatherer(m)
@@ -86,8 +86,6 @@ type Options struct {
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
EnableCreatedTimestampZeroIngestion bool
- // Option to enable the ingestion of native histograms.
- EnableNativeHistogramsIngestion bool
// EnableTypeAndUnitLabels
EnableTypeAndUnitLabels bool
@@ -184,11 +182,6 @@ func (m *Manager) reload() {
m.logger.Error("error reloading target set", "err", "invalid config id:"+setName)
continue
}
- if scrapeConfig.ConvertClassicHistogramsToNHCBEnabled() && m.opts.EnableCreatedTimestampZeroIngestion {
- // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137
- m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137")
- continue
- }
m.metrics.targetScrapePools.Inc()
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
if err != nil {
@@ -404,3 +397,34 @@ func (m *Manager) TargetsDroppedCounts() map[string]int {
}
return counts
}
+
+func (m *Manager) ScrapePoolConfig(scrapePool string) (*config.ScrapeConfig, error) {
+ m.mtxScrape.Lock()
+ defer m.mtxScrape.Unlock()
+
+ sp, ok := m.scrapePools[scrapePool]
+ if !ok {
+ return nil, fmt.Errorf("scrape pool %q not found", scrapePool)
+ }
+
+ return sp.config, nil
+}
+
+// DisableEndOfRunStalenessMarkers disables the end-of-run staleness markers for the provided targets in the given
+// targetSet. When the end-of-run staleness is disabled for a target, when it goes away, there will be no staleness
+// markers written for its series.
+func (m *Manager) DisableEndOfRunStalenessMarkers(targetSet string, targets []*Target) {
+ // This avoids mutex lock contention.
+ if len(targets) == 0 {
+ return
+ }
+
+ // Only hold the lock to find the scrape pool
+ m.mtxScrape.Lock()
+ sp, ok := m.scrapePools[targetSet]
+ m.mtxScrape.Unlock()
+
+ if ok {
+ sp.disableEndOfRunStalenessMarkers(targets)
+ }
+}
diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go
index 84e00af600b..09652d04849 100644
--- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go
+++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go
@@ -41,6 +41,7 @@ import (
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
+ "go.uber.org/atomic"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
@@ -54,6 +55,7 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/logging"
+ "github.com/prometheus/prometheus/util/namevalidationutil"
"github.com/prometheus/prometheus/util/pool"
)
@@ -130,6 +132,7 @@ type scrapeLoopOptions struct {
trackTimestampsStaleness bool
interval time.Duration
timeout time.Duration
+ scrapeNativeHist bool
alwaysScrapeClassicHist bool
convertClassicHistToNHCB bool
fallbackScrapeProtocol string
@@ -154,6 +157,9 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
return nil, err
}
+ if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil {
+ return nil, errors.New("newScrapePool: MetricNameValidationScheme must be set in scrape configuration")
+ }
var escapingScheme model.EscapingScheme
escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme)
if err != nil {
@@ -208,7 +214,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.timeout,
opts.alwaysScrapeClassicHist,
opts.convertClassicHistToNHCB,
- options.EnableNativeHistogramsIngestion,
+ cfg.ScrapeNativeHistogramsEnabled(),
options.EnableCreatedTimestampZeroIngestion,
options.EnableTypeAndUnitLabels,
options.ExtraMetrics,
@@ -326,6 +332,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
sp.config = cfg
oldClient := sp.client
sp.client = client
+ if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil {
+ return errors.New("scrapePool.reload: MetricNameValidationScheme must be set in scrape configuration")
+ }
sp.validationScheme = cfg.MetricNameValidationScheme
var escapingScheme model.EscapingScheme
escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme)
@@ -364,6 +373,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
mrc = sp.config.MetricRelabelConfigs
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
+ scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
)
@@ -408,6 +418,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
interval: targetInterval,
timeout: targetTimeout,
fallbackScrapeProtocol: fallbackScrapeProtocol,
+ scrapeNativeHist: scrapeNativeHist,
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
convertClassicHistToNHCB: convertClassicHistToNHCB,
})
@@ -474,7 +485,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
for _, t := range targets {
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
nonEmpty := false
- t.LabelsRange(func(_ labels.Label) { nonEmpty = true })
+ t.LabelsRange(func(labels.Label) { nonEmpty = true })
switch {
case nonEmpty:
all = append(all, t)
@@ -520,6 +531,7 @@ func (sp *scrapePool) sync(targets []*Target) {
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
mrc = sp.config.MetricRelabelConfigs
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
+ scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
)
@@ -557,6 +569,7 @@ func (sp *scrapePool) sync(targets []*Target) {
mrc: mrc,
interval: interval,
timeout: timeout,
+ scrapeNativeHist: scrapeNativeHist,
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
convertClassicHistToNHCB: convertClassicHistToNHCB,
fallbackScrapeProtocol: fallbackScrapeProtocol,
@@ -629,6 +642,16 @@ func (sp *scrapePool) refreshTargetLimitErr() error {
return nil
}
+func (sp *scrapePool) disableEndOfRunStalenessMarkers(targets []*Target) {
+ sp.mtx.Lock()
+ defer sp.mtx.Unlock()
+ for i := range targets {
+ if l, ok := sp.loops[targets[i].hash()]; ok {
+ l.disableEndOfRunStalenessMarkers()
+ }
+ }
+}
+
func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits == nil {
return nil
@@ -923,16 +946,16 @@ type scrapeLoop struct {
labelLimits *labelLimits
interval time.Duration
timeout time.Duration
- alwaysScrapeClassicHist bool
- convertClassicHistToNHCB bool
validationScheme model.ValidationScheme
escapingScheme model.EscapingScheme
+
+ alwaysScrapeClassicHist bool
+ convertClassicHistToNHCB bool
+ enableCTZeroIngestion bool
+ enableTypeAndUnitLabels bool
fallbackScrapeProtocol string
- // Feature flagged options.
- enableNativeHistogramIngestion bool
- enableCTZeroIngestion bool
- enableTypeAndUnitLabels bool
+ enableNativeHistogramScraping bool
appender func(ctx context.Context) storage.Appender
symbolTable *labels.SymbolTable
@@ -945,7 +968,7 @@ type scrapeLoop struct {
cancel func()
stopped chan struct{}
- disabledEndOfRunStalenessMarkers bool
+ disabledEndOfRunStalenessMarkers atomic.Bool
reportExtraMetrics bool
appendMetadataToWAL bool
@@ -976,8 +999,8 @@ type scrapeCache struct {
// seriesCur and seriesPrev store the labels of series that were seen
// in the current and previous scrape.
// We hold two maps and swap them out to save allocations.
- seriesCur map[uint64]labels.Labels
- seriesPrev map[uint64]labels.Labels
+ seriesCur map[uint64]*cacheEntry
+ seriesPrev map[uint64]*cacheEntry
// TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to
// avoid locking (using metadata API can block scraping).
@@ -1004,8 +1027,8 @@ func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
return &scrapeCache{
series: map[string]*cacheEntry{},
droppedSeries: map[string]*uint64{},
- seriesCur: map[uint64]labels.Labels{},
- seriesPrev: map[uint64]labels.Labels{},
+ seriesCur: map[uint64]*cacheEntry{},
+ seriesPrev: map[uint64]*cacheEntry{},
metadata: map[string]*metaEntry{},
metrics: metrics,
}
@@ -1074,11 +1097,13 @@ func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
return e, true, alreadyScraped
}
-func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
+func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) (ce *cacheEntry) {
if ref == 0 {
- return
+ return nil
}
- c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
+ ce = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
+ c.series[string(met)] = ce
+ return ce
}
func (c *scrapeCache) addDropped(met []byte) {
@@ -1094,14 +1119,14 @@ func (c *scrapeCache) getDropped(met []byte) bool {
return ok
}
-func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) {
- c.seriesCur[hash] = lset
+func (c *scrapeCache) trackStaleness(hash uint64, ce *cacheEntry) {
+ c.seriesCur[hash] = ce
}
-func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
- for h, lset := range c.seriesPrev {
+func (c *scrapeCache) forEachStale(f func(storage.SeriesRef, labels.Labels) bool) {
+ for h, ce := range c.seriesPrev {
if _, ok := c.seriesCur[h]; !ok {
- if !f(lset) {
+ if !f(ce.ref, ce.lset) {
break
}
}
@@ -1238,7 +1263,7 @@ func newScrapeLoop(ctx context.Context,
timeout time.Duration,
alwaysScrapeClassicHist bool,
convertClassicHistToNHCB bool,
- enableNativeHistogramIngestion bool,
+ enableNativeHistogramScraping bool,
enableCTZeroIngestion bool,
enableTypeAndUnitLabels bool,
reportExtraMetrics bool,
@@ -1255,7 +1280,7 @@ func newScrapeLoop(ctx context.Context,
l = promslog.NewNopLogger()
}
if buffers == nil {
- buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
+ buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
}
if cache == nil {
cache = newScrapeCache(metrics)
@@ -1273,39 +1298,39 @@ func newScrapeLoop(ctx context.Context,
}
sl := &scrapeLoop{
- scraper: sc,
- buffers: buffers,
- cache: cache,
- appender: appender,
- symbolTable: symbolTable,
- sampleMutator: sampleMutator,
- reportSampleMutator: reportSampleMutator,
- stopped: make(chan struct{}),
- offsetSeed: offsetSeed,
- l: l,
- parentCtx: ctx,
- appenderCtx: appenderCtx,
- honorTimestamps: honorTimestamps,
- trackTimestampsStaleness: trackTimestampsStaleness,
- enableCompression: enableCompression,
- sampleLimit: sampleLimit,
- bucketLimit: bucketLimit,
- maxSchema: maxSchema,
- labelLimits: labelLimits,
- interval: interval,
- timeout: timeout,
- alwaysScrapeClassicHist: alwaysScrapeClassicHist,
- convertClassicHistToNHCB: convertClassicHistToNHCB,
- enableNativeHistogramIngestion: enableNativeHistogramIngestion,
- enableCTZeroIngestion: enableCTZeroIngestion,
- enableTypeAndUnitLabels: enableTypeAndUnitLabels,
- reportExtraMetrics: reportExtraMetrics,
- appendMetadataToWAL: appendMetadataToWAL,
- metrics: metrics,
- skipOffsetting: skipOffsetting,
- validationScheme: validationScheme,
- escapingScheme: escapingScheme,
- fallbackScrapeProtocol: fallbackScrapeProtocol,
+ scraper: sc,
+ buffers: buffers,
+ cache: cache,
+ appender: appender,
+ symbolTable: symbolTable,
+ sampleMutator: sampleMutator,
+ reportSampleMutator: reportSampleMutator,
+ stopped: make(chan struct{}),
+ offsetSeed: offsetSeed,
+ l: l,
+ parentCtx: ctx,
+ appenderCtx: appenderCtx,
+ honorTimestamps: honorTimestamps,
+ trackTimestampsStaleness: trackTimestampsStaleness,
+ enableCompression: enableCompression,
+ sampleLimit: sampleLimit,
+ bucketLimit: bucketLimit,
+ maxSchema: maxSchema,
+ labelLimits: labelLimits,
+ interval: interval,
+ timeout: timeout,
+ alwaysScrapeClassicHist: alwaysScrapeClassicHist,
+ convertClassicHistToNHCB: convertClassicHistToNHCB,
+ enableCTZeroIngestion: enableCTZeroIngestion,
+ enableTypeAndUnitLabels: enableTypeAndUnitLabels,
+ fallbackScrapeProtocol: fallbackScrapeProtocol,
+ enableNativeHistogramScraping: enableNativeHistogramScraping,
+ reportExtraMetrics: reportExtraMetrics,
+ appendMetadataToWAL: appendMetadataToWAL,
+ metrics: metrics,
+ skipOffsetting: skipOffsetting,
+ validationScheme: validationScheme,
+ escapingScheme: escapingScheme,
}
sl.ctx, sl.cancel = context.WithCancel(ctx)
@@ -1383,7 +1408,7 @@ mainLoop:
close(sl.stopped)
- if !sl.disabledEndOfRunStalenessMarkers {
+ if !sl.disabledEndOfRunStalenessMarkers.Load() {
sl.endOfRunStaleness(last, ticker, sl.interval)
}
}
@@ -1549,6 +1574,11 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
case <-time.After(interval / 10):
}
+ // Check if end-of-run staleness markers have been disabled while we were waiting.
+ if sl.disabledEndOfRunStalenessMarkers.Load() {
+ return
+ }
+
// Call sl.append again with an empty scrape to trigger stale markers.
// If the target has since been recreated and scraped, the
// stale markers will be out of order and ignored.
@@ -1583,7 +1613,7 @@ func (sl *scrapeLoop) stop() {
}
func (sl *scrapeLoop) disableEndOfRunStalenessMarkers() {
- sl.disabledEndOfRunStalenessMarkers = true
+ sl.disabledEndOfRunStalenessMarkers.Store(true)
}
func (sl *scrapeLoop) getCache() *scrapeCache {
@@ -1599,10 +1629,10 @@ type appendErrors struct {
// Update the stale markers.
func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) {
- sl.cache.forEachStale(func(lset labels.Labels) bool {
+ sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
// Series no longer exposed, mark it stale.
app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
- _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN))
+ _, err = app.Append(ref, lset, defTime, math.Float64frombits(value.StaleNaN))
app.SetOptions(nil)
switch {
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
@@ -1612,7 +1642,7 @@ func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (e
}
return err == nil
})
- return
+ return err
}
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
@@ -1622,10 +1652,17 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
// Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
err = sl.updateStaleMarkers(app, defTime)
sl.cache.iterDone(false)
- return
+ return total, added, seriesAdded, err
}
- p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.enableTypeAndUnitLabels, sl.symbolTable)
+ p, err := textparse.New(b, contentType, sl.symbolTable, textparse.ParserOptions{
+ EnableTypeAndUnitLabels: sl.enableTypeAndUnitLabels,
+ IgnoreNativeHistograms: !sl.enableNativeHistogramScraping,
+ ConvertClassicHistogramsToNHCB: sl.convertClassicHistToNHCB,
+ KeepClassicOnClassicAndNativeHistograms: sl.alwaysScrapeClassicHist,
+ OpenMetricsSkipCTSeries: sl.enableCTZeroIngestion,
+ FallbackContentType: sl.fallbackScrapeProtocol,
+ })
if p == nil {
sl.l.Error(
"Failed to determine correct type of scrape target.",
@@ -1633,10 +1670,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
"fallback_media_type", sl.fallbackScrapeProtocol,
"err", err,
)
- return
- }
- if sl.convertClassicHistToNHCB {
- p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist)
+ return total, added, seriesAdded, err
}
if err != nil {
sl.l.Debug(
@@ -1650,8 +1684,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
appErrs = appendErrors{}
sampleLimitErr error
bucketLimitErr error
- lset labels.Labels // escapes to heap so hoisted out of loop
- e exemplar.Exemplar // escapes to heap so hoisted out of loop
+ lset labels.Labels // Escapes to heap so hoisted out of loop.
+ e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
lastMeta *metaEntry
lastMFName []byte
)
@@ -1721,7 +1755,7 @@ loop:
t = *parsedTimestamp
}
- if sl.cache.getDropped(met) || isHistogram && !sl.enableNativeHistogramIngestion {
+ if sl.cache.getDropped(met) {
continue
}
ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met)
@@ -1799,7 +1833,7 @@ loop:
if err == nil {
if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
- sl.cache.trackStaleness(ce.hash, ce.lset)
+ sl.cache.trackStaleness(ce.hash, ce)
}
}
@@ -1811,12 +1845,17 @@ loop:
break loop
}
- if !seriesCached {
- if parsedTimestamp == nil || sl.trackTimestampsStaleness {
+ // If series wasn't cached (is new, not seen on previous scrape) we need need to add it to the scrape cache.
+ // But we only do this for series that were appended to TSDB without errors.
+ // If a series was new but we didn't append it due to sample_limit or other errors then we don't need
+ // it in the scrape cache because we don't need to emit StaleNaNs for it when it disappears.
+ if !seriesCached && sampleAdded {
+ ce = sl.cache.addRef(met, ref, lset, hash)
+ if ce != nil && (parsedTimestamp == nil || sl.trackTimestampsStaleness) {
// Bypass staleness logic if there is an explicit timestamp.
- sl.cache.trackStaleness(hash, lset)
+ // But make sure we only do this if we have a cache entry (ce) for our series.
+ sl.cache.trackStaleness(hash, ce)
}
- sl.cache.addRef(met, ref, lset, hash)
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
seriesAdded++
}
@@ -1913,7 +1952,7 @@ loop:
if err == nil {
err = sl.updateStaleMarkers(app, defTime)
}
- return
+ return total, added, seriesAdded, err
}
func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool {
@@ -2111,32 +2150,32 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil {
- return
+ return err
}
if sl.reportExtraMetrics {
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil {
- return
+ return err
}
}
- return
+ return err
}
func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) {
@@ -2146,32 +2185,32 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
b := labels.NewBuilder(labels.EmptyLabels())
if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil {
- return
+ return err
}
if sl.reportExtraMetrics {
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil {
- return
+ return err
}
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil {
- return
+ return err
}
}
- return
+ return err
}
func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error {
diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go
index 30b47976a34..563fe33f82f 100644
--- a/vendor/github.com/prometheus/prometheus/scrape/target.go
+++ b/vendor/github.com/prometheus/prometheus/scrape/target.go
@@ -190,9 +190,9 @@ func (t *Target) LabelsRange(f func(l labels.Label)) {
// DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels(lb *labels.Builder) labels.Labels {
- t.mtx.Lock()
+ t.mtx.RLock()
cfg, tLabels, tgLabels := t.scrapeConfig, t.tLabels, t.tgLabels
- t.mtx.Unlock()
+ t.mtx.RUnlock()
PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels)
return lb.Labels()
}
@@ -208,9 +208,9 @@ func (t *Target) SetScrapeConfig(scrapeConfig *config.ScrapeConfig, tLabels, tgL
// URL returns a copy of the target's URL.
func (t *Target) URL() *url.URL {
- t.mtx.Lock()
+ t.mtx.RLock()
configParams := t.scrapeConfig.Params
- t.mtx.Unlock()
+ t.mtx.RUnlock()
params := url.Values{}
for k, v := range configParams {
@@ -332,7 +332,9 @@ type limitAppender struct {
}
func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- if !value.IsStaleNaN(v) {
+ // Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
+ // This ensures that if a series is already in TSDB then we always write the marker.
+ if ref == 0 || !value.IsStaleNaN(v) {
app.i++
if app.i > app.limit {
return 0, errSampleLimit
@@ -345,6 +347,22 @@ func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t in
return ref, nil
}
+func (app *limitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
+ // Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
+ // This ensures that if a series is already in TSDB then we always write the marker.
+ if ref == 0 || (h != nil && !value.IsStaleNaN(h.Sum)) || (fh != nil && !value.IsStaleNaN(fh.Sum)) {
+ app.i++
+ if app.i > app.limit {
+ return 0, errSampleLimit
+ }
+ }
+ ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
+ if err != nil {
+ return 0, err
+ }
+ return ref, nil
+}
+
type timeLimitAppender struct {
storage.Appender
@@ -412,12 +430,12 @@ type maxSchemaAppender struct {
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
- if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
+ if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
h = h.ReduceResolution(app.maxSchema)
}
}
if fh != nil {
- if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
+ if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema {
fh = fh.ReduceResolution(app.maxSchema)
}
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go
index e847c10e61a..bc27948fd07 100644
--- a/vendor/github.com/prometheus/prometheus/storage/buffer.go
+++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go
@@ -175,15 +175,15 @@ func (s fSample) F() float64 {
return s.f
}
-func (s fSample) H() *histogram.Histogram {
+func (fSample) H() *histogram.Histogram {
panic("H() called for fSample")
}
-func (s fSample) FH() *histogram.FloatHistogram {
+func (fSample) FH() *histogram.FloatHistogram {
panic("FH() called for fSample")
}
-func (s fSample) Type() chunkenc.ValueType {
+func (fSample) Type() chunkenc.ValueType {
return chunkenc.ValFloat
}
@@ -200,7 +200,7 @@ func (s hSample) T() int64 {
return s.t
}
-func (s hSample) F() float64 {
+func (hSample) F() float64 {
panic("F() called for hSample")
}
@@ -212,7 +212,7 @@ func (s hSample) FH() *histogram.FloatHistogram {
return s.h.ToFloat(nil)
}
-func (s hSample) Type() chunkenc.ValueType {
+func (hSample) Type() chunkenc.ValueType {
return chunkenc.ValHistogram
}
@@ -229,11 +229,11 @@ func (s fhSample) T() int64 {
return s.t
}
-func (s fhSample) F() float64 {
+func (fhSample) F() float64 {
panic("F() called for fhSample")
}
-func (s fhSample) H() *histogram.Histogram {
+func (fhSample) H() *histogram.Histogram {
panic("H() called for fhSample")
}
@@ -241,7 +241,7 @@ func (s fhSample) FH() *histogram.FloatHistogram {
return s.fh
}
-func (s fhSample) Type() chunkenc.ValueType {
+func (fhSample) Type() chunkenc.ValueType {
return chunkenc.ValFloatHistogram
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go
index 4d076788a7c..f99edb473a0 100644
--- a/vendor/github.com/prometheus/prometheus/storage/fanout.go
+++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go
@@ -253,7 +253,7 @@ func (f *fanoutAppender) Commit() (err error) {
}
}
}
- return
+ return err
}
func (f *fanoutAppender) Rollback() (err error) {
diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go
index 636473d07c7..9d7e5d93a6b 100644
--- a/vendor/github.com/prometheus/prometheus/storage/interface.go
+++ b/vendor/github.com/prometheus/prometheus/storage/interface.go
@@ -125,15 +125,15 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
}
-func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
-func (q *MockQuerier) Close() error {
+func (*MockQuerier) Close() error {
return nil
}
@@ -389,7 +389,7 @@ type SeriesSet interface {
Next() bool
// At returns full series. Returned series should be iterable even after Next is called.
At() Series
- // The error that iteration as failed with.
+ // The error that iteration has failed with.
// When an error occurs, set cannot continue to iterate.
Err() error
// A collection of warnings for the whole set.
@@ -408,10 +408,10 @@ type testSeriesSet struct {
series Series
}
-func (s testSeriesSet) Next() bool { return true }
-func (s testSeriesSet) At() Series { return s.series }
-func (s testSeriesSet) Err() error { return nil }
-func (s testSeriesSet) Warnings() annotations.Annotations { return nil }
+func (testSeriesSet) Next() bool { return true }
+func (s testSeriesSet) At() Series { return s.series }
+func (testSeriesSet) Err() error { return nil }
+func (testSeriesSet) Warnings() annotations.Annotations { return nil }
// TestSeriesSet returns a mock series set.
func TestSeriesSet(series Series) SeriesSet {
@@ -422,10 +422,10 @@ type errSeriesSet struct {
err error
}
-func (s errSeriesSet) Next() bool { return false }
-func (s errSeriesSet) At() Series { return nil }
-func (s errSeriesSet) Err() error { return s.err }
-func (s errSeriesSet) Warnings() annotations.Annotations { return nil }
+func (errSeriesSet) Next() bool { return false }
+func (errSeriesSet) At() Series { return nil }
+func (s errSeriesSet) Err() error { return s.err }
+func (errSeriesSet) Warnings() annotations.Annotations { return nil }
// ErrSeriesSet returns a series set that wraps an error.
func ErrSeriesSet(err error) SeriesSet {
@@ -443,10 +443,10 @@ type errChunkSeriesSet struct {
err error
}
-func (s errChunkSeriesSet) Next() bool { return false }
-func (s errChunkSeriesSet) At() ChunkSeries { return nil }
-func (s errChunkSeriesSet) Err() error { return s.err }
-func (s errChunkSeriesSet) Warnings() annotations.Annotations { return nil }
+func (errChunkSeriesSet) Next() bool { return false }
+func (errChunkSeriesSet) At() ChunkSeries { return nil }
+func (s errChunkSeriesSet) Err() error { return s.err }
+func (errChunkSeriesSet) Warnings() annotations.Annotations { return nil }
// ErrChunkSeriesSet returns a chunk series set that wraps an error.
func ErrChunkSeriesSet(err error) ChunkSeriesSet {
diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go
index 9b3bcee5807..f8ba1ab76a9 100644
--- a/vendor/github.com/prometheus/prometheus/storage/merge.go
+++ b/vendor/github.com/prometheus/prometheus/storage/merge.go
@@ -233,10 +233,7 @@ func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *Label
}
func mergeStrings(a, b []string) []string {
- maxl := len(a)
- if len(b) > len(a) {
- maxl = len(b)
- }
+ maxl := max(len(b), len(a))
res := make([]string, 0, maxl*10/9)
for len(a) > 0 && len(b) > 0 {
@@ -440,11 +437,11 @@ func (h genericSeriesSetHeap) Less(i, j int) bool {
return labels.Compare(a, b) < 0
}
-func (h *genericSeriesSetHeap) Push(x interface{}) {
+func (h *genericSeriesSetHeap) Push(x any) {
*h = append(*h, x.(genericSeriesSet))
}
-func (h *genericSeriesSetHeap) Pop() interface{} {
+func (h *genericSeriesSetHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
@@ -698,11 +695,11 @@ func (h samplesIteratorHeap) Less(i, j int) bool {
return h[i].AtT() < h[j].AtT()
}
-func (h *samplesIteratorHeap) Push(x interface{}) {
+func (h *samplesIteratorHeap) Push(x any) {
*h = append(*h, x.(chunkenc.Iterator))
}
-func (h *samplesIteratorHeap) Pop() interface{} {
+func (h *samplesIteratorHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
@@ -846,11 +843,11 @@ func (h chunkIteratorHeap) Less(i, j int) bool {
return at.MinTime < bt.MinTime
}
-func (h *chunkIteratorHeap) Push(x interface{}) {
+func (h *chunkIteratorHeap) Push(x any) {
*h = append(*h, x.(chunks.Iterator))
}
-func (h *chunkIteratorHeap) Pop() interface{} {
+func (h *chunkIteratorHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go
index 1b577a56bc8..ea2a816d942 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go
@@ -43,12 +43,32 @@ const (
IngestionPublicAudience = "https://monitor.azure.com//.default"
)
+const (
+ // DefaultWorkloadIdentityTokenPath is the default path where the Azure Workload Identity
+ // webhook puts the service account token on Azure environments. See .
+ DefaultWorkloadIdentityTokenPath = "/var/run/secrets/azure/tokens/azure-identity-token"
+)
+
// ManagedIdentityConfig is used to store managed identity config values.
type ManagedIdentityConfig struct {
// ClientID is the clientId of the managed identity that is being used to authenticate.
ClientID string `yaml:"client_id,omitempty"`
}
+// WorkloadIdentityConfig is used to store workload identity config values.
+type WorkloadIdentityConfig struct {
+ // ClientID is the clientId of the Microsoft Entra application or user-assigned managed identity.
+ ClientID string `yaml:"client_id,omitempty"`
+
+ // TenantID is the tenantId of the Microsoft Entra application or user-assigned managed identity.
+ // This should match the tenant ID where your application or managed identity is registered.
+ TenantID string `yaml:"tenant_id,omitempty"`
+
+ // TokenFilePath is the path to the token file provided by the Kubernetes service account projected volume.
+ // If not specified, it defaults to DefaultWorkloadIdentityTokenPath.
+ TokenFilePath string `yaml:"token_file_path,omitempty"`
+}
+
// OAuthConfig is used to store azure oauth config values.
type OAuthConfig struct {
// ClientID is the clientId of the azure active directory application that is being used to authenticate.
@@ -72,6 +92,9 @@ type AzureADConfig struct { //nolint:revive // exported.
// ManagedIdentity is the managed identity that is being used to authenticate.
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
+ // WorkloadIdentity is the workload identity that is being used to authenticate.
+ WorkloadIdentity *WorkloadIdentityConfig `yaml:"workload_identity,omitempty"`
+
// OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"`
@@ -111,20 +134,25 @@ func (c *AzureADConfig) Validate() error {
return errors.New("must provide a cloud in the Azure AD config")
}
- if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
- return errors.New("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
+ authenticators := 0
+ if c.ManagedIdentity != nil {
+ authenticators++
}
-
- if c.ManagedIdentity != nil && c.OAuth != nil {
- return errors.New("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
+ if c.WorkloadIdentity != nil {
+ authenticators++
}
-
- if c.ManagedIdentity != nil && c.SDK != nil {
- return errors.New("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
+ if c.OAuth != nil {
+ authenticators++
+ }
+ if c.SDK != nil {
+ authenticators++
}
- if c.OAuth != nil && c.SDK != nil {
- return errors.New("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
+ if authenticators == 0 {
+ return errors.New("must provide an Azure Managed Identity, Azure Workload Identity, Azure OAuth or Azure SDK in the Azure AD config")
+ }
+ if authenticators > 1 {
+ return errors.New("cannot provide multiple authentication methods in the Azure AD config")
}
if c.ManagedIdentity != nil {
@@ -136,6 +164,26 @@ func (c *AzureADConfig) Validate() error {
}
}
+ if c.WorkloadIdentity != nil {
+ if c.WorkloadIdentity.ClientID == "" {
+ return errors.New("must provide an Azure Workload Identity client_id in the Azure AD config")
+ }
+ if c.WorkloadIdentity.TenantID == "" {
+ return errors.New("must provide an Azure Workload Identity tenant_id in the Azure AD config")
+ }
+
+ if _, err := uuid.Parse(c.WorkloadIdentity.ClientID); err != nil {
+ return errors.New("the provided Azure Workload Identity client_id is invalid")
+ }
+ if _, err := uuid.Parse(c.WorkloadIdentity.TenantID); err != nil {
+ return errors.New("the provided Azure Workload Identity tenant_id is invalid")
+ }
+
+ if c.WorkloadIdentity.TokenFilePath == "" {
+ c.WorkloadIdentity.TokenFilePath = DefaultWorkloadIdentityTokenPath
+ }
+ }
+
if c.OAuth != nil {
if c.OAuth.ClientID == "" {
return errors.New("must provide an Azure OAuth client_id in the Azure AD config")
@@ -147,24 +195,18 @@ func (c *AzureADConfig) Validate() error {
return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config")
}
- var err error
- _, err = uuid.Parse(c.OAuth.ClientID)
- if err != nil {
+ if _, err := uuid.Parse(c.OAuth.ClientID); err != nil {
return errors.New("the provided Azure OAuth client_id is invalid")
}
- _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
- if err != nil {
+ if _, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID); err != nil {
return errors.New("the provided Azure OAuth tenant_id is invalid")
}
}
if c.SDK != nil {
- var err error
-
if c.SDK.TenantID != "" {
- _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
- if err != nil {
- return errors.New("the provided Azure OAuth tenant_id is invalid")
+ if _, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID); err != nil {
+ return errors.New("the provided Azure SDK tenant_id is invalid")
}
}
}
@@ -173,7 +215,7 @@ func (c *AzureADConfig) Validate() error {
}
// UnmarshalYAML unmarshal the Azure AD config yaml.
-func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *AzureADConfig) UnmarshalYAML(unmarshal func(any) error) error {
type plain AzureADConfig
*c = AzureADConfig{}
if err := unmarshal((*plain)(c)); err != nil {
@@ -217,7 +259,7 @@ func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
return rt.next.RoundTrip(req)
}
-// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application.
+// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity, Workload Identity and Azure AD application.
func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
var cred azcore.TokenCredential
var err error
@@ -239,6 +281,18 @@ func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
}
}
+ if cfg.WorkloadIdentity != nil {
+ workloadIdentityConfig := &WorkloadIdentityConfig{
+ ClientID: cfg.WorkloadIdentity.ClientID,
+ TenantID: cfg.WorkloadIdentity.TenantID,
+ TokenFilePath: cfg.WorkloadIdentity.TokenFilePath,
+ }
+ cred, err = newWorkloadIdentityTokenCredential(clientOpts, workloadIdentityConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
if cfg.OAuth != nil {
oAuthConfig := &OAuthConfig{
ClientID: cfg.OAuth.ClientID,
@@ -276,6 +330,21 @@ func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managed
return azidentity.NewManagedIdentityCredential(opts)
}
+// newWorkloadIdentityTokenCredential returns new Microsoft Entra Workload Identity token credential.
+//
+// For detailed setup instructions, see:
+// https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/prometheus-metrics-enable-workload-identity
+func newWorkloadIdentityTokenCredential(clientOpts *azcore.ClientOptions, workloadIdentityConfig *WorkloadIdentityConfig) (azcore.TokenCredential, error) {
+ opts := &azidentity.WorkloadIdentityCredentialOptions{
+ ClientOptions: *clientOpts,
+ ClientID: workloadIdentityConfig.ClientID,
+ TenantID: workloadIdentityConfig.TenantID,
+ TokenFilePath: workloadIdentityConfig.TokenFilePath,
+ }
+
+ return azidentity.NewWorkloadIdentityCredential(opts)
+}
+
// newOAuthTokenCredential returns new OAuth token credential.
func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) {
opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go
index 68891f659e6..c535ea3425a 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go
@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net/http"
"net/http/httptrace"
"strconv"
@@ -27,6 +28,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
+ remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -37,12 +39,10 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
- "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
- "github.com/prometheus/prometheus/util/compression"
)
const (
@@ -58,9 +58,9 @@ var (
// UserAgent represents Prometheus version to use for user agent header.
UserAgent = version.PrometheusUserAgent()
- remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
- config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
- config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
+ remoteWriteContentTypeHeaders = map[remoteapi.WriteMessageType]string{
+ remoteapi.WriteV1MessageType: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
+ remoteapi.WriteV2MessageType: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
}
AcceptedResponseTypes = []prompb.ReadRequest_ResponseType{
@@ -112,36 +112,39 @@ type Client struct {
Client *http.Client
timeout time.Duration
- retryOnRateLimit bool
- chunkedReadLimit uint64
+ retryOnRateLimit bool
+ chunkedReadLimit uint64
+ acceptedResponseTypes []prompb.ReadRequest_ResponseType
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
readQueriesDuration prometheus.ObserverVec
- writeProtoMsg config.RemoteWriteProtoMsg
- writeCompression compression.Type // Not exposed by ClientConfig for now.
+ writeProtoMsg remoteapi.WriteMessageType
+ writeCompression remoteapi.Compression
}
// ClientConfig configures a client.
type ClientConfig struct {
- URL *config_util.URL
- Timeout model.Duration
- HTTPClientConfig config_util.HTTPClientConfig
- SigV4Config *sigv4.SigV4Config
- AzureADConfig *azuread.AzureADConfig
- GoogleIAMConfig *googleiam.Config
- Headers map[string]string
- RetryOnRateLimit bool
- WriteProtoMsg config.RemoteWriteProtoMsg
- ChunkedReadLimit uint64
- RoundRobinDNS bool
+ URL *config_util.URL
+ Timeout model.Duration
+ HTTPClientConfig config_util.HTTPClientConfig
+ SigV4Config *sigv4.SigV4Config
+ AzureADConfig *azuread.AzureADConfig
+ GoogleIAMConfig *googleiam.Config
+ Headers map[string]string
+ RetryOnRateLimit bool
+ WriteProtoMsg remoteapi.WriteMessageType
+ ChunkedReadLimit uint64
+ RoundRobinDNS bool
+ AcceptedResponseTypes []prompb.ReadRequest_ResponseType
}
// ReadClient will request the STREAMED_XOR_CHUNKS method of remote read but can
// also fall back to the SAMPLES method if necessary.
type ReadClient interface {
Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error)
+ ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error)
}
// NewReadClient creates a new client for remote read.
@@ -157,15 +160,22 @@ func NewReadClient(name string, conf *ClientConfig, optFuncs ...config_util.HTTP
}
httpClient.Transport = otelhttp.NewTransport(t)
+ // Set accepted response types, default to existing behavior if not specified.
+ acceptedResponseTypes := conf.AcceptedResponseTypes
+ if len(acceptedResponseTypes) == 0 {
+ acceptedResponseTypes = AcceptedResponseTypes
+ }
+
return &Client{
- remoteName: name,
- urlString: conf.URL.String(),
- Client: httpClient,
- timeout: time.Duration(conf.Timeout),
- chunkedReadLimit: conf.ChunkedReadLimit,
- readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
- readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
- readQueriesDuration: remoteReadQueryDuration.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
+ remoteName: name,
+ urlString: conf.URL.String(),
+ Client: httpClient,
+ timeout: time.Duration(conf.Timeout),
+ chunkedReadLimit: conf.ChunkedReadLimit,
+ acceptedResponseTypes: acceptedResponseTypes,
+ readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
+ readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
+ readQueriesDuration: remoteReadQueryDuration.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
}, nil
}
@@ -206,7 +216,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
}
}
- writeProtoMsg := config.RemoteWriteProtoMsgV1
+ writeProtoMsg := remoteapi.WriteV1MessageType
if conf.WriteProtoMsg != "" {
writeProtoMsg = conf.WriteProtoMsg
}
@@ -215,6 +225,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace {
return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans())
}))
+
return &Client{
remoteName: name,
urlString: conf.URL.String(),
@@ -222,7 +233,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout),
writeProtoMsg: writeProtoMsg,
- writeCompression: compression.Snappy,
+ writeCompression: remoteapi.SnappyBlockCompression,
}, nil
}
@@ -259,10 +270,10 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
return WriteResponseStats{}, err
}
- httpReq.Header.Add("Content-Encoding", c.writeCompression)
+ httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
httpReq.Header.Set("User-Agent", UserAgent)
- if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
+ if c.writeProtoMsg == remoteapi.WriteV1MessageType {
// Compatibility mode for 1.0.
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
} else {
@@ -337,27 +348,44 @@ func (c *Client) Endpoint() string {
return c.urlString
}
-// Read reads from a remote endpoint. The sortSeries parameter is only respected in the case of a sampled response;
+// Read reads from a remote endpoint. The sortSeries parameter is only respected in the case of a samples response;
// chunked responses arrive already sorted by the server.
func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
+ return c.ReadMultiple(ctx, []*prompb.Query{query}, sortSeries)
+}
+
+// ReadMultiple reads from a remote endpoint using multiple queries in a single request.
+// The sortSeries parameter is only respected in the case of a samples response;
+// chunked responses arrive already sorted by the server.
+// Returns a single SeriesSet with interleaved series from all queries.
+func (c *Client) ReadMultiple(ctx context.Context, queries []*prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
c.readQueries.Inc()
defer c.readQueries.Dec()
req := &prompb.ReadRequest{
- // TODO: Support batching multiple queries into one read request,
- // as the protobuf interface allows for it.
- Queries: []*prompb.Query{query},
- AcceptedResponseTypes: AcceptedResponseTypes,
+ Queries: queries,
+ AcceptedResponseTypes: c.acceptedResponseTypes,
}
+
+ httpResp, cancel, start, err := c.executeReadRequest(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.handleReadResponse(httpResp, req, queries, sortSeries, start, cancel)
+}
+
+// executeReadRequest creates and executes an HTTP request for reading data.
+func (c *Client) executeReadRequest(ctx context.Context, req *prompb.ReadRequest) (*http.Response, context.CancelFunc, time.Time, error) {
data, err := proto.Marshal(req)
if err != nil {
- return nil, fmt.Errorf("unable to marshal read request: %w", err)
+ return nil, nil, time.Time{}, fmt.Errorf("unable to marshal read request: %w", err)
}
compressed := snappy.Encode(nil, data)
httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(compressed))
if err != nil {
- return nil, fmt.Errorf("unable to create request: %w", err)
+ return nil, nil, time.Time{}, fmt.Errorf("unable to create request: %w", err)
}
httpReq.Header.Add("Content-Encoding", "snappy")
httpReq.Header.Add("Accept-Encoding", "snappy")
@@ -375,9 +403,14 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool)
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
if err != nil {
cancel()
- return nil, fmt.Errorf("error sending request: %w", err)
+ return nil, nil, time.Time{}, fmt.Errorf("error sending request: %w", err)
}
+ return httpResp, cancel, start, nil
+}
+
+// handleReadResponse processes the HTTP response and returns a SeriesSet.
+func (c *Client) handleReadResponse(httpResp *http.Response, req *prompb.ReadRequest, queries []*prompb.Query, sortSeries bool, start time.Time, cancel context.CancelFunc) (storage.SeriesSet, error) {
if httpResp.StatusCode/100 != 2 {
// Make an attempt at getting an error message.
body, _ := io.ReadAll(httpResp.Body)
@@ -402,7 +435,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool)
c.readQueriesDuration.WithLabelValues("chunked").Observe(time.Since(start).Seconds())
s := NewChunkedReader(httpResp.Body, c.chunkedReadLimit, nil)
- return NewChunkedSeriesSet(s, httpResp.Body, query.StartTimestampMs, query.EndTimestampMs, func(err error) {
+ return c.handleChunkedResponseImpl(s, httpResp, queries, func(err error) {
code := strconv.Itoa(httpResp.StatusCode)
if !errors.Is(err, io.EOF) {
code = "aborted_stream"
@@ -418,7 +451,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool)
}
}
-func (c *Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) {
+func (*Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) {
compressed, err := io.ReadAll(httpResp.Body)
if err != nil {
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
@@ -443,8 +476,60 @@ func (c *Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.R
return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
}
- // This client does not batch queries so there's always only 1 result.
- res := resp.Results[0]
+ return combineQueryResults(resp.Results, sortSeries)
+}
+
+// combineQueryResults combines multiple query results into a single SeriesSet,
+// handling both sorted and unsorted cases appropriately.
+func combineQueryResults(results []*prompb.QueryResult, sortSeries bool) (storage.SeriesSet, error) {
+ if len(results) == 0 {
+ return &concreteSeriesSet{series: nil, cur: 0}, nil
+ }
+
+ if len(results) == 1 {
+ return FromQueryResult(sortSeries, results[0]), nil
+ }
+
+ // Multiple queries case - combine all results
+ if sortSeries {
+ // When sorting is requested, use MergeSeriesSet which can efficiently merge sorted inputs
+ var allSeriesSets []storage.SeriesSet
+ for _, result := range results {
+ seriesSet := FromQueryResult(sortSeries, result)
+ if err := seriesSet.Err(); err != nil {
+ return nil, fmt.Errorf("error reading series from query result: %w", err)
+ }
+ allSeriesSets = append(allSeriesSets, seriesSet)
+ }
+ return storage.NewMergeSeriesSet(allSeriesSets, 0, storage.ChainedSeriesMerge), nil
+ }
+
+ // When sorting is not requested, just concatenate all series without using MergeSeriesSet
+ // since MergeSeriesSet requires sorted inputs
+ var allSeries []storage.Series
+ for _, result := range results {
+ seriesSet := FromQueryResult(sortSeries, result)
+ for seriesSet.Next() {
+ allSeries = append(allSeries, seriesSet.At())
+ }
+ if err := seriesSet.Err(); err != nil {
+ return nil, fmt.Errorf("error reading series from query result: %w", err)
+ }
+ }
+
+ return &concreteSeriesSet{series: allSeries, cur: 0}, nil
+}
+
+// handleChunkedResponseImpl handles chunked responses for both single and multiple queries.
+func (*Client) handleChunkedResponseImpl(s *ChunkedReader, httpResp *http.Response, queries []*prompb.Query, onClose func(error)) storage.SeriesSet {
+ // For multiple queries in chunked response, we'll still use the existing infrastructure
+ // but we need to provide the timestamp range that covers all queries
+ var minStartTs, maxEndTs int64 = math.MaxInt64, math.MinInt64
+
+ for _, query := range queries {
+ minStartTs = min(minStartTs, query.StartTimestampMs)
+ maxEndTs = max(maxEndTs, query.EndTimestampMs)
+ }
- return FromQueryResult(sortSeries, res), nil
+ return NewChunkedSeriesSet(s, httpResp.Body, minStartTs, maxEndTs, onClose)
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go
index 3dbf432bcfd..7e21909354e 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go
@@ -340,7 +340,7 @@ func (e errSeriesSet) Err() error {
return e.err
}
-func (e errSeriesSet) Warnings() annotations.Annotations { return nil }
+func (errSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeriesSet implements storage.SeriesSet.
type concreteSeriesSet struct {
@@ -357,11 +357,11 @@ func (c *concreteSeriesSet) At() storage.Series {
return c.series[c.cur-1]
}
-func (c *concreteSeriesSet) Err() error {
+func (*concreteSeriesSet) Err() error {
return nil
}
-func (c *concreteSeriesSet) Warnings() annotations.Annotations { return nil }
+func (*concreteSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeries implements storage.Series.
type concreteSeries struct {
@@ -388,6 +388,7 @@ type concreteSeriesIterator struct {
histogramsCur int
curValType chunkenc.ValueType
series *concreteSeries
+ err error
}
func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator {
@@ -404,10 +405,14 @@ func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.histogramsCur = -1
c.curValType = chunkenc.ValNone
c.series = series
+ c.err = nil
}
// Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
+ if c.err != nil {
+ return chunkenc.ValNone
+ }
if c.floatsCur == -1 {
c.floatsCur = 0
}
@@ -439,7 +444,7 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
c.curValType = chunkenc.ValFloat
} else {
- c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
+ c.curValType = chunkenc.ValHistogram
}
// When the timestamps do not overlap the cursor for the non-selected sample type has advanced too
// far; we decrement it back down here.
@@ -453,11 +458,26 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
case c.floatsCur < len(c.series.floats):
c.curValType = chunkenc.ValFloat
case c.histogramsCur < len(c.series.histograms):
- c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
+ c.curValType = chunkenc.ValHistogram
+ }
+ if c.curValType == chunkenc.ValHistogram {
+ h := &c.series.histograms[c.histogramsCur]
+ c.curValType = getHistogramValType(h)
+ c.err = validateHistogramSchema(h)
+ }
+ if c.err != nil {
+ c.curValType = chunkenc.ValNone
}
return c.curValType
}
+func validateHistogramSchema(h *prompb.Histogram) error {
+ if histogram.IsKnownSchema(h.Schema) {
+ return nil
+ }
+ return histogram.UnknownSchemaError(h.Schema)
+}
+
func getHistogramValType(h *prompb.Histogram) chunkenc.ValueType {
if h.IsFloatHistogram() {
return chunkenc.ValFloatHistogram
@@ -480,14 +500,28 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
panic("iterator is not on an integer histogram sample")
}
h := c.series.histograms[c.histogramsCur]
- return h.Timestamp, h.ToIntHistogram()
+ mh := h.ToIntHistogram()
+ if mh.Schema > histogram.ExponentialSchemaMax && mh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // sample is from a newer Prometheus version that supports higher
+ // resolution.
+ mh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ return h.Timestamp, mh
}
// AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
fh := c.series.histograms[c.histogramsCur]
- return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
+ mfh := fh.ToFloatHistogram() // integer will be auto-converted.
+ if mfh.Schema > histogram.ExponentialSchemaMax && mfh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // sample is from a newer Prometheus version that supports higher
+ // resolution.
+ mfh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ return fh.Timestamp, mfh
}
panic("iterator is not on a histogram sample")
}
@@ -504,6 +538,9 @@ const noTS = int64(math.MaxInt64)
// Next implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
+ if c.err != nil {
+ return chunkenc.ValNone
+ }
peekFloatTS := noTS
if c.floatsCur+1 < len(c.series.floats) {
peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp
@@ -532,12 +569,21 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
c.histogramsCur++
c.curValType = chunkenc.ValFloat
}
+
+ if c.curValType == chunkenc.ValHistogram {
+ h := &c.series.histograms[c.histogramsCur]
+ c.curValType = getHistogramValType(h)
+ c.err = validateHistogramSchema(h)
+ }
+ if c.err != nil {
+ c.curValType = chunkenc.ValNone
+ }
return c.curValType
}
// Err implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Err() error {
- return nil
+ return c.err
}
// chunkedSeriesSet implements storage.SeriesSet.
@@ -607,7 +653,7 @@ func (s *chunkedSeriesSet) Err() error {
return s.err
}
-func (s *chunkedSeriesSet) Warnings() annotations.Annotations {
+func (*chunkedSeriesSet) Warnings() annotations.Annotations {
return nil
}
@@ -766,10 +812,10 @@ func (it *chunkedSeriesIterator) Err() error {
// also making sure that there are no labels with duplicate names.
func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls {
- if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
+ if l.Name == labels.MetricName && !model.UTF8Validation.IsValidMetricName(l.Value) {
return fmt.Errorf("invalid metric name: %v", l.Value)
}
- if !model.LabelName(l.Name).IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(l.Name) {
return fmt.Errorf("invalid label name: %v", l.Name)
}
if !model.LabelValue(l.Value).IsValid() {
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go
index d7f376c96a8..b1f98038fc0 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go
@@ -37,7 +37,7 @@ type Watchable interface {
type noopScrapeManager struct{}
-func (noop *noopScrapeManager) Get() (*scrape.Manager, error) {
+func (*noopScrapeManager) Get() (*scrape.Manager, error) {
return nil, errors.New("scrape manager not ready")
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
new file mode 100644
index 00000000000..1441aecb6d0
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/combined_appender.go
@@ -0,0 +1,238 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// TODO(krajorama): rename this package to otlpappender or similar, as it is
+// not specific to Prometheus remote write anymore.
+// Note otlptranslator is already used by prometheus/otlptranslator repo.
+package prometheusremotewrite
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+)
+
+// Metadata extends metadata.Metadata with the metric family name.
+// OTLP calculates the metric family name for all metrics and uses
+// it for generating summary, histogram series by adding the magic
+// suffixes. The metric family name is passed down to the appender
+// in case the storage needs it for metadata updates.
+// Known user is Mimir that implements /api/v1/metadata and uses
+// Remote-Write 1.0 for this. Might be removed later if no longer
+// needed by any downstream project.
+type Metadata struct {
+ metadata.Metadata
+ MetricFamilyName string
+}
+
+// CombinedAppender is similar to storage.Appender, but combines updates to
+// metadata, created timestamps, exemplars and samples into a single call.
+type CombinedAppender interface {
+ // AppendSample appends a sample and related exemplars, metadata, and
+ // created timestamp to the storage.
+ AppendSample(ls labels.Labels, meta Metadata, ct, t int64, v float64, es []exemplar.Exemplar) error
+ // AppendHistogram appends a histogram and related exemplars, metadata, and
+ // created timestamp to the storage.
+ AppendHistogram(ls labels.Labels, meta Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error
+}
+
+// CombinedAppenderMetrics is for the metrics observed by the
+// combinedAppender implementation.
+type CombinedAppenderMetrics struct {
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+}
+
+func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics {
+ return CombinedAppenderMetrics{
+ samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_appended_samples_without_metadata_total",
+ Help: "The total number of samples ingested from OTLP without corresponding metadata.",
+ }),
+ outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "prometheus",
+ Subsystem: "api",
+ Name: "otlp_out_of_order_exemplars_total",
+ Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
+ }),
+ }
+}
+
+// NewCombinedAppender creates a combined appender that sets start times and
+// updates metadata for each series only once, and appends samples and
+// exemplars for each call.
+func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestCTZeroSample bool, metrics CombinedAppenderMetrics) CombinedAppender {
+ return &combinedAppender{
+ app: app,
+ logger: logger,
+ ingestCTZeroSample: ingestCTZeroSample,
+ refs: make(map[uint64]seriesRef),
+ samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata,
+ outOfOrderExemplars: metrics.outOfOrderExemplars,
+ }
+}
+
+type seriesRef struct {
+ ref storage.SeriesRef
+ ct int64
+ ls labels.Labels
+ meta metadata.Metadata
+}
+
+type combinedAppender struct {
+ app storage.Appender
+ logger *slog.Logger
+ samplesAppendedWithoutMetadata prometheus.Counter
+ outOfOrderExemplars prometheus.Counter
+ ingestCTZeroSample bool
+ // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs.
+ // To detect hash collision it also stores the labels.
+ // There is no overflow/conflict list, the TSDB will handle that part.
+ refs map[uint64]seriesRef
+}
+
+func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, ct, t int64, v float64, es []exemplar.Exemplar) (err error) {
+ return b.appendFloatOrHistogram(ls, meta.Metadata, ct, t, v, nil, es)
+}
+
+func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, ct, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
+ if h == nil {
+ // Sanity check, we should never get here with a nil histogram.
+ b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String())
+ return errors.New("internal error, attempted to append nil histogram")
+ }
+ return b.appendFloatOrHistogram(ls, meta.Metadata, ct, t, 0, h, es)
+}
+
+func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, ct, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
+ hash := ls.Hash()
+ series, exists := b.refs[hash]
+ ref := series.ref
+ if exists && !labels.Equal(series.ls, ls) {
+ // Hash collision. The series reference we stored is pointing to a
+ // different series so we cannot use it, we need to reset the
+ // reference and cache.
+ // Note: we don't need to keep track of conflicts here,
+ // the TSDB will handle that part when we pass 0 reference.
+ exists = false
+ ref = 0
+ }
+ updateRefs := !exists || series.ct != ct
+ if updateRefs && ct != 0 && ct < t && b.ingestCTZeroSample {
+ var newRef storage.SeriesRef
+ if h != nil {
+ newRef, err = b.app.AppendHistogramCTZeroSample(ref, ls, t, ct, h, nil)
+ } else {
+ newRef, err = b.app.AppendCTZeroSample(ref, ls, t, ct)
+ }
+ if err != nil {
+ if !errors.Is(err, storage.ErrOutOfOrderCT) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ // Even for the first sample OOO is a common scenario because
+ // we can't tell if a CT was already ingested in a previous request.
+ // We ignore the error.
+ // ErrDuplicateSampleForTimestamp is also a common scenario because
+ // unknown start times in Opentelemetry are indicated by setting
+ // the start time to the same as the first sample time.
+ // https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
+ b.logger.Warn("Error when appending CT from OTLP", "err", err, "series", ls.String(), "created_timestamp", ct, "timestamp", t, "sample_type", sampleType(h))
+ }
+ } else {
+ // We only use the returned reference on success as otherwise an
+ // error of CT append could invalidate the series reference.
+ ref = newRef
+ }
+ }
+ {
+ var newRef storage.SeriesRef
+ if h != nil {
+ newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil)
+ } else {
+ newRef, err = b.app.Append(ref, ls, t, v)
+ }
+ if err != nil {
+ // Although Append does not currently return ErrDuplicateSampleForTimestamp there is
+ // a note indicating its inclusion in the future.
+ if errors.Is(err, storage.ErrOutOfOrderSample) ||
+ errors.Is(err, storage.ErrOutOfBounds) ||
+ errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
+ b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h))
+ }
+ } else {
+ // If the append was successful, we can use the returned reference.
+ ref = newRef
+ }
+ }
+
+ if ref == 0 {
+ // We cannot update metadata or add exemplars on non existent series.
+ return err
+ }
+
+ if !exists || series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit {
+ updateRefs = true
+ // If this is the first time we see this series, set the metadata.
+ _, err := b.app.UpdateMetadata(ref, ls, meta)
+ if err != nil {
+ b.samplesAppendedWithoutMetadata.Add(1)
+ b.logger.Warn("Error while updating metadata from OTLP", "err", err)
+ }
+ }
+
+ if updateRefs {
+ b.refs[hash] = seriesRef{
+ ref: ref,
+ ct: ct,
+ ls: ls,
+ meta: meta,
+ }
+ }
+
+ b.appendExemplars(ref, ls, es)
+
+ return err
+}
+
+func sampleType(h *histogram.Histogram) string {
+ if h == nil {
+ return "float"
+ }
+ return "histogram"
+}
+
+func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef {
+ var err error
+ for _, e := range es {
+ if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil {
+ switch {
+ case errors.Is(err, storage.ErrOutOfOrderExemplar):
+ b.outOfOrderExemplars.Add(1)
+ b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
+ default:
+ // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
+ b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
+ }
+ }
+ }
+ return ref
+}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go
index 2056045f639..aa544338365 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go
@@ -23,32 +23,31 @@ import (
"log"
"math"
"slices"
- "sort"
"strconv"
"strings"
"time"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
"github.com/prometheus/otlptranslator"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
- "github.com/prometheus/prometheus/prompb"
)
const (
- sumStr = "_sum"
- countStr = "_count"
- bucketStr = "_bucket"
- leStr = "le"
- quantileStr = "quantile"
- pInfStr = "+Inf"
- createdSuffix = "_created"
+ sumStr = "_sum"
+ countStr = "_count"
+ bucketStr = "_bucket"
+ leStr = "le"
+ quantileStr = "quantile"
+ pInfStr = "+Inf"
// maxExemplarRunes is the maximum number of UTF-8 exemplar characters
// according to the prometheus specification
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
@@ -57,146 +56,103 @@ const (
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2
traceIDKey = "trace_id"
spanIDKey = "span_id"
- infoType = "info"
targetMetricName = "target_info"
defaultLookbackDelta = 5 * time.Minute
)
-type bucketBoundsData struct {
- ts *prompb.TimeSeries
- bound float64
-}
-
-// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds.
-type byBucketBoundsData []bucketBoundsData
-
-func (m byBucketBoundsData) Len() int { return len(m) }
-func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
-func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-// ByLabelName enables the usage of sort.Sort() with a slice of labels.
-type ByLabelName []prompb.Label
-
-func (a ByLabelName) Len() int { return len(a) }
-func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name }
-func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// timeSeriesSignature returns a hashed label set signature.
-// The label slice should not contain duplicate label names; this method sorts the slice by label name before creating
-// the signature.
-// The algorithm is the same as in Prometheus' labels.StableHash function.
-func timeSeriesSignature(labels []prompb.Label) uint64 {
- sort.Sort(ByLabelName(labels))
-
- // Use xxhash.Sum64(b) for fast path as it's faster.
- b := make([]byte, 0, 1024)
- for i, v := range labels {
- if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
- // If labels entry is 1KB+ do not allocate whole entry.
- h := xxhash.New()
- _, _ = h.Write(b)
- for _, v := range labels[i:] {
- _, _ = h.WriteString(v.Name)
- _, _ = h.Write(seps)
- _, _ = h.WriteString(v.Value)
- _, _ = h.Write(seps)
- }
- return h.Sum64()
- }
-
- b = append(b, v.Name...)
- b = append(b, seps[0])
- b = append(b, v.Value...)
- b = append(b, seps[0])
- }
- return xxhash.Sum64(b)
-}
-
-var seps = []byte{'\xff'}
-
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
-func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings,
- ignoreAttrs []string, logOnOverwrite bool, metadata prompb.MetricMetadata, extras ...string,
-) []prompb.Label {
+func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings,
+ ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string,
+) (labels.Labels, error) {
resourceAttrs := resource.Attributes()
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
- promotedAttrs := settings.PromoteResourceAttributes.promotedAttributes(resourceAttrs)
-
promoteScope := settings.PromoteScopeMetadata && scope.name != ""
- scopeLabelCount := 0
- if promoteScope {
- // Include name, version and schema URL.
- scopeLabelCount = scope.attributes.Len() + 3
- }
-
- // Calculate the maximum possible number of labels we could return so we can preallocate l.
- maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + scopeLabelCount + len(extras)/2
-
- if haveServiceName {
- maxLabelCount++
- }
- if haveInstanceID {
- maxLabelCount++
- }
- if settings.EnableTypeAndUnitLabels {
- maxLabelCount += 2
- }
// Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized.
- labels := make([]prompb.Label, 0, maxLabelCount)
+ c.scratchBuilder.Reset()
+
// XXX: Should we always drop service namespace/service name/service instance ID from the labels
// (as they get mapped to other Prometheus labels)?
attributes.Range(func(key string, value pcommon.Value) bool {
if !slices.Contains(ignoreAttrs, key) {
- labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
+ c.scratchBuilder.Add(key, value.AsString())
}
return true
})
- sort.Stable(ByLabelName(labels))
-
- // map ensures no duplicate label names.
- l := make(map[string]string, maxLabelCount)
- labelNamer := otlptranslator.LabelNamer{UTF8Allowed: settings.AllowUTF8}
- for _, label := range labels {
- finalKey := labelNamer.Build(label.Name)
- if existingValue, alreadyExists := l[finalKey]; alreadyExists {
- l[finalKey] = existingValue + ";" + label.Value
- } else {
- l[finalKey] = label.Value
+ c.scratchBuilder.Sort()
+ sortedLabels := c.scratchBuilder.Labels()
+
+ labelNamer := otlptranslator.LabelNamer{
+ UTF8Allowed: settings.AllowUTF8,
+ UnderscoreLabelSanitization: settings.LabelNameUnderscoreSanitization,
+ PreserveMultipleUnderscores: settings.LabelNamePreserveMultipleUnderscores,
+ }
+
+ if settings.AllowUTF8 {
+ // UTF8 is allowed, so conflicts aren't possible.
+ c.builder.Reset(sortedLabels)
+ } else {
+ // Now that we have sorted and filtered the labels, build the actual list
+ // of labels, and handle conflicts by appending values.
+ c.builder.Reset(labels.EmptyLabels())
+ var sortErr error
+ sortedLabels.Range(func(l labels.Label) {
+ if sortErr != nil {
+ return
+ }
+ finalKey, err := labelNamer.Build(l.Name)
+ if err != nil {
+ sortErr = err
+ return
+ }
+ if existingValue := c.builder.Get(finalKey); existingValue != "" {
+ c.builder.Set(finalKey, existingValue+";"+l.Value)
+ } else {
+ c.builder.Set(finalKey, l.Value)
+ }
+ })
+ if sortErr != nil {
+ return labels.EmptyLabels(), sortErr
}
}
- for _, lbl := range promotedAttrs {
- normalized := labelNamer.Build(lbl.Name)
- if _, exists := l[normalized]; !exists {
- l[normalized] = lbl.Value
- }
+ err := settings.PromoteResourceAttributes.addPromotedAttributes(c.builder, resourceAttrs, labelNamer)
+ if err != nil {
+ return labels.EmptyLabels(), err
}
if promoteScope {
+ var rangeErr error
scope.attributes.Range(func(k string, v pcommon.Value) bool {
- name := labelNamer.Build("otel_scope_" + k)
- l[name] = v.AsString()
+ name, err := labelNamer.Build("otel_scope_" + k)
+ if err != nil {
+ rangeErr = err
+ return false
+ }
+ c.builder.Set(name, v.AsString())
return true
})
+ if rangeErr != nil {
+ return labels.EmptyLabels(), rangeErr
+ }
// Scope Name, Version and Schema URL are added after attributes to ensure they are not overwritten by attributes.
- l["otel_scope_name"] = scope.name
- l["otel_scope_version"] = scope.version
- l["otel_scope_schema_url"] = scope.schemaURL
+ c.builder.Set("otel_scope_name", scope.name)
+ c.builder.Set("otel_scope_version", scope.version)
+ c.builder.Set("otel_scope_schema_url", scope.schemaURL)
}
if settings.EnableTypeAndUnitLabels {
unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8}
- if metadata.Type != prompb.MetricMetadata_UNKNOWN {
- l["__type__"] = strings.ToLower(metadata.Type.String())
+ if meta.Type != model.MetricTypeUnknown {
+ c.builder.Set(model.MetricTypeLabel, strings.ToLower(string(meta.Type)))
}
- if metadata.Unit != "" {
- l["__unit__"] = unitNamer.Build(metadata.Unit)
+ if meta.Unit != "" {
+ c.builder.Set(model.MetricUnitLabel, unitNamer.Build(meta.Unit))
}
}
@@ -206,19 +162,19 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s
if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok {
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
}
- l[model.JobLabel] = val
+ c.builder.Set(model.JobLabel, val)
}
// Map service.instance.id to instance.
if haveInstanceID {
- l[model.InstanceLabel] = instance.AsString()
+ c.builder.Set(model.InstanceLabel, instance.AsString())
}
for key, value := range settings.ExternalLabels {
// External labels have already been sanitized.
- if _, alreadyExists := l[key]; alreadyExists {
+ if existingValue := c.builder.Get(key); existingValue != "" {
// Skip external labels if they are overridden by metric attributes.
continue
}
- l[key] = value
+ c.builder.Set(key, value)
}
for i := 0; i < len(extras); i += 2 {
@@ -227,23 +183,21 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope s
}
name := extras[i]
- _, found := l[name]
- if found && logOnOverwrite {
+ if existingValue := c.builder.Get(name); existingValue != "" && logOnOverwrite {
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
}
// internal labels should be maintained.
if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" {
- name = labelNamer.Build(name)
+ var err error
+ name, err = labelNamer.Build(name)
+ if err != nil {
+ return labels.EmptyLabels(), err
+ }
}
- l[name] = extras[i+1]
+ c.builder.Set(name, extras[i+1])
}
- labels = labels[:0]
- for k, v := range l {
- labels = append(labels, prompb.Label{Name: k, Value: v})
- }
-
- return labels
+ return c.builder.Labels(), nil
}
func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporality, bool, error) {
@@ -269,7 +223,7 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
// https://github.com/prometheus/prometheus/issues/13485.
func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope,
+ resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -278,41 +232,48 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
- baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata)
+ startTimestamp := convertTimeStamp(pt.StartTimestamp())
+ baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
+ if err != nil {
+ return err
+ }
+
+ baseName := meta.MetricFamilyName
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
// treat sum as a sample in an individual TimeSeries
- sum := &prompb.Sample{
- Value: pt.Sum(),
- Timestamp: timestamp,
- }
+ val := pt.Sum()
if pt.Flags().NoRecordedValue() {
- sum.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
- sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels)
- c.addSample(sum, sumlabels)
+ sumlabels := c.addLabels(baseName+sumStr, baseLabels)
+ if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ return err
+ }
}
// treat count as a sample in an individual TimeSeries
- count := &prompb.Sample{
- Value: float64(pt.Count()),
- Timestamp: timestamp,
- }
+ val := float64(pt.Count())
if pt.Flags().NoRecordedValue() {
- count.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
- countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels)
- c.addSample(count, countlabels)
+ countlabels := c.addLabels(baseName+countStr, baseLabels)
+ if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ return err
+ }
+ exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
+ if err != nil {
+ return err
+ }
+ nextExemplarIdx := 0
// cumulative count for conversion to cumulative histogram
var cumulativeCount uint64
- var bucketBounds []bucketBoundsData
-
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -321,116 +282,101 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
- bucket := &prompb.Sample{
- Value: float64(cumulativeCount),
- Timestamp: timestamp,
+
+ // Find exemplars that belong to this bucket. Both exemplars and
+ // buckets are sorted in ascending order.
+ var currentBucketExemplars []exemplar.Exemplar
+ for ; nextExemplarIdx < len(exemplars); nextExemplarIdx++ {
+ ex := exemplars[nextExemplarIdx]
+ if ex.Value > bound {
+ // This exemplar belongs in a higher bucket.
+ break
+ }
+ currentBucketExemplars = append(currentBucketExemplars, ex)
}
+ val := float64(cumulativeCount)
if pt.Flags().NoRecordedValue() {
- bucket.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
- labels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr)
- ts := c.addSample(bucket, labels)
-
- bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound})
+ labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
+ if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil {
+ return err
+ }
}
// add le=+Inf bucket
- infBucket := &prompb.Sample{
- Timestamp: timestamp,
- }
+ val = float64(pt.Count())
if pt.Flags().NoRecordedValue() {
- infBucket.Value = math.Float64frombits(value.StaleNaN)
- } else {
- infBucket.Value = float64(pt.Count())
+ val = math.Float64frombits(value.StaleNaN)
}
- infLabels := createLabels(metadata.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr)
- ts := c.addSample(infBucket, infLabels)
-
- bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)})
- if err := c.addExemplars(ctx, pt, bucketBounds); err != nil {
+ infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
+ if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil {
return err
}
-
- startTimestamp := pt.StartTimestamp()
- if settings.ExportCreatedMetric && startTimestamp != 0 {
- labels := createLabels(metadata.MetricFamilyName+createdSuffix, baseLabels)
- c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp())
- }
}
return nil
}
-type exemplarType interface {
- pmetric.ExponentialHistogramDataPoint | pmetric.HistogramDataPoint | pmetric.NumberDataPoint
- Exemplars() pmetric.ExemplarSlice
-}
-
-func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, pt T) ([]prompb.Exemplar, error) {
- promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len())
- for i := 0; i < pt.Exemplars().Len(); i++ {
- if err := everyN.checkContext(ctx); err != nil {
+func (c *PrometheusConverter) getPromExemplars(ctx context.Context, exemplars pmetric.ExemplarSlice) ([]exemplar.Exemplar, error) {
+ if exemplars.Len() == 0 {
+ return nil, nil
+ }
+ outputExemplars := make([]exemplar.Exemplar, 0, exemplars.Len())
+ for i := 0; i < exemplars.Len(); i++ {
+ if err := c.everyN.checkContext(ctx); err != nil {
return nil, err
}
- exemplar := pt.Exemplars().At(i)
+ ex := exemplars.At(i)
exemplarRunes := 0
- promExemplar := prompb.Exemplar{
- Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()),
+ ts := timestamp.FromTime(ex.Timestamp().AsTime())
+ newExemplar := exemplar.Exemplar{
+ Ts: ts,
+ HasTs: ts != 0,
}
- switch exemplar.ValueType() {
+ c.scratchBuilder.Reset()
+ switch ex.ValueType() {
case pmetric.ExemplarValueTypeInt:
- promExemplar.Value = float64(exemplar.IntValue())
+ newExemplar.Value = float64(ex.IntValue())
case pmetric.ExemplarValueTypeDouble:
- promExemplar.Value = exemplar.DoubleValue()
+ newExemplar.Value = ex.DoubleValue()
default:
- return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType())
+ return nil, fmt.Errorf("unsupported exemplar value type: %v", ex.ValueType())
}
- if traceID := exemplar.TraceID(); !traceID.IsEmpty() {
+ if traceID := ex.TraceID(); !traceID.IsEmpty() {
val := hex.EncodeToString(traceID[:])
exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val)
- promLabel := prompb.Label{
- Name: traceIDKey,
- Value: val,
- }
- promExemplar.Labels = append(promExemplar.Labels, promLabel)
+ c.scratchBuilder.Add(traceIDKey, val)
}
- if spanID := exemplar.SpanID(); !spanID.IsEmpty() {
+ if spanID := ex.SpanID(); !spanID.IsEmpty() {
val := hex.EncodeToString(spanID[:])
exemplarRunes += utf8.RuneCountInString(spanIDKey) + utf8.RuneCountInString(val)
- promLabel := prompb.Label{
- Name: spanIDKey,
- Value: val,
- }
- promExemplar.Labels = append(promExemplar.Labels, promLabel)
+ c.scratchBuilder.Add(spanIDKey, val)
}
- attrs := exemplar.FilteredAttributes()
- labelsFromAttributes := make([]prompb.Label, 0, attrs.Len())
+ attrs := ex.FilteredAttributes()
attrs.Range(func(key string, value pcommon.Value) bool {
- val := value.AsString()
- exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val)
- promLabel := prompb.Label{
- Name: key,
- Value: val,
- }
-
- labelsFromAttributes = append(labelsFromAttributes, promLabel)
-
+ exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(value.AsString())
return true
})
+
+ // Only append filtered attributes if it does not cause exemplar
+ // labels to exceed the max number of runes.
if exemplarRunes <= maxExemplarRunes {
- // only append filtered attributes if it does not cause exemplar
- // labels to exceed the max number of runes
- promExemplar.Labels = append(promExemplar.Labels, labelsFromAttributes...)
+ attrs.Range(func(key string, value pcommon.Value) bool {
+ c.scratchBuilder.Add(key, value.AsString())
+ return true
+ })
}
-
- promExemplars = append(promExemplars, promExemplar)
+ c.scratchBuilder.Sort()
+ newExemplar.Labels = c.scratchBuilder.Labels()
+ outputExemplars = append(outputExemplars, newExemplar)
}
- return promExemplars, nil
+ return outputExemplars, nil
}
// findMinAndMaxTimestamps returns the minimum of minTimestamp and the earliest timestamp in metric and
@@ -479,7 +425,7 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p
}
func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
- settings Settings, metadata prompb.MetricMetadata, scope scope,
+ settings Settings, scope scope, meta Metadata,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -488,143 +434,72 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
- baseLabels := createAttributes(resource, pt.Attributes(), scope, settings, nil, false, metadata)
+ startTimestamp := convertTimeStamp(pt.StartTimestamp())
+ baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
+ if err != nil {
+ return err
+ }
+
+ baseName := meta.MetricFamilyName
// treat sum as a sample in an individual TimeSeries
- sum := &prompb.Sample{
- Value: pt.Sum(),
- Timestamp: timestamp,
- }
+ val := pt.Sum()
if pt.Flags().NoRecordedValue() {
- sum.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
// sum and count of the summary should append suffix to baseName
- sumlabels := createLabels(metadata.MetricFamilyName+sumStr, baseLabels)
- c.addSample(sum, sumlabels)
+ sumlabels := c.addLabels(baseName+sumStr, baseLabels)
+ if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ return err
+ }
// treat count as a sample in an individual TimeSeries
- count := &prompb.Sample{
- Value: float64(pt.Count()),
- Timestamp: timestamp,
- }
+ val = float64(pt.Count())
if pt.Flags().NoRecordedValue() {
- count.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
+ }
+ countlabels := c.addLabels(baseName+countStr, baseLabels)
+ if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ return err
}
- countlabels := createLabels(metadata.MetricFamilyName+countStr, baseLabels)
- c.addSample(count, countlabels)
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
- quantile := &prompb.Sample{
- Value: qt.Value(),
- Timestamp: timestamp,
- }
+ val = qt.Value()
if pt.Flags().NoRecordedValue() {
- quantile.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
- qtlabels := createLabels(metadata.MetricFamilyName, baseLabels, quantileStr, percentileStr)
- c.addSample(quantile, qtlabels)
- }
-
- startTimestamp := pt.StartTimestamp()
- if settings.ExportCreatedMetric && startTimestamp != 0 {
- createdLabels := createLabels(metadata.MetricFamilyName+createdSuffix, baseLabels)
- c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
+ qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr)
+ if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
+ return err
+ }
}
}
return nil
}
-// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name.
+// addLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name.
// If extras are provided, corresponding label pairs are also added to the returned slice.
// If extras is uneven length, the last (unpaired) extra will be ignored.
-func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label {
- extraLabelCount := len(extras) / 2
- labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
- copy(labels, baseLabels)
+func (c *PrometheusConverter) addLabels(name string, baseLabels labels.Labels, extras ...string) labels.Labels {
+ c.builder.Reset(baseLabels)
n := len(extras)
n -= n % 2
for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 {
- labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
- }
-
- labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
- return labels
-}
-
-// addTypeAndUnitLabels appends type and unit labels to the given labels slice.
-func addTypeAndUnitLabels(labels []prompb.Label, metadata prompb.MetricMetadata, settings Settings) []prompb.Label {
- unitNamer := otlptranslator.UnitNamer{UTF8Allowed: settings.AllowUTF8}
-
- labels = slices.DeleteFunc(labels, func(l prompb.Label) bool {
- return l.Name == "__type__" || l.Name == "__unit__"
- })
-
- labels = append(labels, prompb.Label{Name: "__type__", Value: strings.ToLower(metadata.Type.String())})
- labels = append(labels, prompb.Label{Name: "__unit__", Value: unitNamer.Build(metadata.Unit)})
-
- return labels
-}
-
-// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false.
-// Otherwise it creates a new one and returns that, and true.
-func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) {
- h := timeSeriesSignature(lbls)
- ts := c.unique[h]
- if ts != nil {
- if isSameMetric(ts, lbls) {
- // We already have this metric
- return ts, false
- }
-
- // Look for a matching conflict
- for _, cTS := range c.conflicts[h] {
- if isSameMetric(cTS, lbls) {
- // We already have this metric
- return cTS, false
- }
- }
-
- // New conflict
- ts = &prompb.TimeSeries{
- Labels: lbls,
- }
- c.conflicts[h] = append(c.conflicts[h], ts)
- return ts, true
- }
-
- // This metric is new
- ts = &prompb.TimeSeries{
- Labels: lbls,
- }
- c.unique[h] = ts
- return ts, true
-}
-
-// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist.
-// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp,
-// both converted to milliseconds.
-func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp, timestamp pcommon.Timestamp) {
- ts, created := c.getOrCreateTimeSeries(lbls)
- if created {
- ts.Samples = []prompb.Sample{
- {
- // convert ns to ms
- Value: float64(convertTimeStamp(startTimestamp)),
- Timestamp: convertTimeStamp(timestamp),
- },
- }
+ c.builder.Set(extras[extrasIdx], extras[extrasIdx+1])
}
+ c.builder.Set(model.MetricNameLabel, name)
+ return c.builder.Labels()
}
// addResourceTargetInfo converts the resource to the target info metric.
-func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) {
+func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time) error {
if settings.DisableTargetInfo {
- return
+ return nil
}
attributes := resource.Attributes()
@@ -642,7 +517,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies
}
if nonIdentifyingAttrsCount == 0 {
// If we only have job + instance, then target_info isn't useful, so don't add it.
- return
+ return nil
}
name := targetMetricName
@@ -655,18 +530,28 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies
// Do not pass identifying attributes as ignoreAttrs below.
identifyingAttrs = nil
}
- labels := createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, prompb.MetricMetadata{}, model.MetricNameLabel, name)
+ meta := Metadata{
+ Metadata: metadata.Metadata{
+ Type: model.MetricTypeGauge,
+ Help: "Target metadata",
+ },
+ MetricFamilyName: name,
+ }
+ // TODO: should target info have the __type__ metadata label?
+ lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name)
+ if err != nil {
+ return err
+ }
haveIdentifier := false
- for _, l := range labels {
+ lbls.Range(func(l labels.Label) {
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
haveIdentifier = true
- break
}
- }
+ })
if !haveIdentifier {
// We need at least one identifying label to generate target_info.
- return
+ return nil
}
// Generate target_info samples starting at earliestTimestamp and ending at latestTimestamp,
@@ -677,17 +562,41 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earlies
settings.LookbackDelta = defaultLookbackDelta
}
interval := settings.LookbackDelta / 2
- ts, _ := converter.getOrCreateTimeSeries(labels)
+
+ // Deduplicate target_info samples with the same labelset and timestamp across
+ // multiple resources in the same batch.
+ labelsHash := lbls.Hash()
+
+ var key targetInfoKey
for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) {
- ts.Samples = append(ts.Samples, prompb.Sample{
- Value: float64(1),
- Timestamp: timestamp.UnixMilli(),
- })
+ timestampMs := timestamp.UnixMilli()
+ key = targetInfoKey{
+ labelsHash: labelsHash,
+ timestamp: timestampMs,
+ }
+ if _, exists := c.seenTargetInfo[key]; exists {
+ // Skip duplicate.
+ continue
+ }
+
+ c.seenTargetInfo[key] = struct{}{}
+ if err := c.appender.AppendSample(lbls, meta, 0, timestampMs, float64(1), nil); err != nil {
+ return err
+ }
}
- ts.Samples = append(ts.Samples, prompb.Sample{
- Value: float64(1),
- Timestamp: latestTimestamp.UnixMilli(),
- })
+
+ // Append the final sample at latestTimestamp.
+ finalTimestampMs := latestTimestamp.UnixMilli()
+ key = targetInfoKey{
+ labelsHash: labelsHash,
+ timestamp: finalTimestampMs,
+ }
+ if _, exists := c.seenTargetInfo[key]; exists {
+ return nil
+ }
+
+ c.seenTargetInfo[key] = struct{}{}
+ return c.appender.AppendSample(lbls, meta, 0, finalTimestampMs, float64(1), nil)
}
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
index 421526926e8..5606fa4d91a 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
@@ -27,7 +27,6 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/value"
- "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -36,7 +35,8 @@ const defaultZeroThreshold = 1e-128
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
// as native histogram samples.
func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope,
+ resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
+ scope scope, meta Metadata,
) (annotations.Annotations, error) {
var annots annotations.Annotations
for x := 0; x < dataPoints.Len(); x++ {
@@ -46,32 +46,36 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
pt := dataPoints.At(x)
- histogram, ws, err := exponentialToNativeHistogram(pt, temporality)
+ hp, ws, err := exponentialToNativeHistogram(pt, temporality)
annots.Merge(ws)
if err != nil {
return annots, err
}
- lbls := createAttributes(
+ lbls, err := c.createAttributes(
resource,
pt.Attributes(),
scope,
settings,
nil,
true,
- metadata,
+ meta,
model.MetricNameLabel,
- metadata.MetricFamilyName,
+ meta.MetricFamilyName,
)
-
- ts, _ := c.getOrCreateTimeSeries(lbls)
- ts.Histograms = append(ts.Histograms, histogram)
-
- exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt)
if err != nil {
return annots, err
}
- ts.Exemplars = append(ts.Exemplars, exemplars...)
+ ts := convertTimeStamp(pt.Timestamp())
+ ct := convertTimeStamp(pt.StartTimestamp())
+ exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
+ if err != nil {
+ return annots, err
+ }
+ // OTel exponential histograms are always Int Histograms.
+ if err = c.appender.AppendHistogram(lbls, meta, ct, ts, hp, exemplars); err != nil {
+ return annots, err
+ }
}
return annots, nil
@@ -79,19 +83,19 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
// exponentialToNativeHistogram translates an OTel Exponential Histogram data point
// to a Prometheus Native Histogram.
-func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) {
+func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) {
var annots annotations.Annotations
scale := p.Scale()
- if scale < -4 {
- return prompb.Histogram{}, annots,
+ if scale < histogram.ExponentialSchemaMin {
+ return nil, annots,
fmt.Errorf("cannot convert exponential to native histogram."+
- " Scale must be >= -4, was %d", scale)
+ " Scale must be >= %d, was %d", histogram.ExponentialSchemaMin, scale)
}
var scaleDown int32
- if scale > 8 {
- scaleDown = scale - 8
- scale = 8
+ if scale > histogram.ExponentialSchemaMax {
+ scaleDown = scale - histogram.ExponentialSchemaMax
+ scale = histogram.ExponentialSchemaMax
}
pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true)
@@ -102,45 +106,40 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo
// Sending a sample that triggers counter reset but with ResetHint==NO
// would lead to Prometheus panic as it does not double check the hint.
// Thus we're explicitly saying UNKNOWN here, which is always safe.
- // TODO: using created time stamp should be accurate, but we
+ // TODO: using start timestamp should be accurate, but we
// need to know here if it was used for the detection.
// Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303
// Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232
- resetHint := prompb.Histogram_UNKNOWN
+ resetHint := histogram.UnknownCounterReset
if temporality == pmetric.AggregationTemporalityDelta {
// If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting.
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/).
// This might be changed to a different hint name as gauge type might be misleading for samples that should be
// summed over time.
- resetHint = prompb.Histogram_GAUGE
+ resetHint = histogram.GaugeType
}
-
- h := prompb.Histogram{
- ResetHint: resetHint,
- Schema: scale,
-
- ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: p.ZeroCount()},
+ h := &histogram.Histogram{
+ CounterResetHint: resetHint,
+ Schema: scale,
// TODO use zero_threshold, if set, see
// https://github.com/open-telemetry/opentelemetry-proto/pull/441
- ZeroThreshold: defaultZeroThreshold,
-
- PositiveSpans: pSpans,
- PositiveDeltas: pDeltas,
- NegativeSpans: nSpans,
- NegativeDeltas: nDeltas,
-
- Timestamp: convertTimeStamp(p.Timestamp()),
+ ZeroThreshold: defaultZeroThreshold,
+ ZeroCount: p.ZeroCount(),
+ PositiveSpans: pSpans,
+ PositiveBuckets: pDeltas,
+ NegativeSpans: nSpans,
+ NegativeBuckets: nDeltas,
}
if p.Flags().NoRecordedValue() {
h.Sum = math.Float64frombits(value.StaleNaN)
- h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN}
+ h.Count = value.StaleNaN
} else {
if p.HasSum() {
h.Sum = p.Sum()
}
- h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()}
+ h.Count = p.Count()
if p.Count() == 0 && h.Sum != 0 {
annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum))
}
@@ -165,13 +164,13 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint, tempo
//
// When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets,
// the bucket indexes are not scaled, and the indices are not adjusted by 1.
-func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]prompb.BucketSpan, []int64) {
+func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]histogram.Span, []int64) {
if len(bucketCounts) == 0 {
return nil, nil
}
var (
- spans []prompb.BucketSpan
+ spans []histogram.Span
deltas []int64
count int64
prevCount int64
@@ -194,12 +193,12 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
initialOffset = initialOffset>>scaleDown + 1
}
- spans = append(spans, prompb.BucketSpan{
+ spans = append(spans, histogram.Span{
Offset: initialOffset,
Length: 0,
})
- for i := 0; i < numBuckets; i++ {
+ for i := range numBuckets {
nextBucketIdx := (int32(i)+offset)>>scaleDown + 1
if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet.
count += int64(bucketCounts[i])
@@ -215,14 +214,14 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
// We have to create a new span, because we have found a gap
// of more than two buckets. The constant 2 is copied from the logic in
// https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296
- spans = append(spans, prompb.BucketSpan{
+ spans = append(spans, histogram.Span{
Offset: gap,
Length: 0,
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
- for j := int32(0); j < gap; j++ {
+ for range gap {
appendDelta(0)
}
}
@@ -237,14 +236,14 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
// We have to create a new span, because we have found a gap
// of more than two buckets. The constant 2 is copied from the logic in
// https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296
- spans = append(spans, prompb.BucketSpan{
+ spans = append(spans, histogram.Span{
Offset: gap,
Length: 0,
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
- for j := int32(0); j < gap; j++ {
+ for range gap {
appendDelta(0)
}
}
@@ -254,7 +253,8 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
}
func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
- resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, temporality pmetric.AggregationTemporality, scope scope,
+ resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
+ scope scope, meta Metadata,
) (annotations.Annotations, error) {
var annots annotations.Annotations
@@ -265,38 +265,41 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
pt := dataPoints.At(x)
- histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality)
+ hp, ws, err := explicitHistogramToCustomBucketsHistogram(pt, temporality)
annots.Merge(ws)
if err != nil {
return annots, err
}
- lbls := createAttributes(
+ lbls, err := c.createAttributes(
resource,
pt.Attributes(),
scope,
settings,
nil,
true,
- metadata,
+ meta,
model.MetricNameLabel,
- metadata.MetricFamilyName,
+ meta.MetricFamilyName,
)
-
- ts, _ := c.getOrCreateTimeSeries(lbls)
- ts.Histograms = append(ts.Histograms, histogram)
-
- exemplars, err := getPromExemplars[pmetric.HistogramDataPoint](ctx, &c.everyN, pt)
if err != nil {
return annots, err
}
- ts.Exemplars = append(ts.Exemplars, exemplars...)
+ ts := convertTimeStamp(pt.Timestamp())
+ ct := convertTimeStamp(pt.StartTimestamp())
+ exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
+ if err != nil {
+ return annots, err
+ }
+ if err = c.appender.AppendHistogram(lbls, meta, ct, ts, hp, exemplars); err != nil {
+ return annots, err
+ }
}
return annots, nil
}
-func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (prompb.Histogram, annotations.Annotations, error) {
+func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, temporality pmetric.AggregationTemporality) (*histogram.Histogram, annotations.Annotations, error) {
var annots annotations.Annotations
buckets := p.BucketCounts().AsRaw()
@@ -309,27 +312,26 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem
// Sending a sample that triggers counter reset but with ResetHint==NO
// would lead to Prometheus panic as it does not double check the hint.
// Thus we're explicitly saying UNKNOWN here, which is always safe.
- // TODO: using created time stamp should be accurate, but we
+ // TODO: using start timestamp should be accurate, but we
// need to know here if it was used for the detection.
// Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303
// Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232
- resetHint := prompb.Histogram_UNKNOWN
+ resetHint := histogram.UnknownCounterReset
if temporality == pmetric.AggregationTemporalityDelta {
// If the histogram has delta temporality, set the reset hint to gauge to avoid unnecessary chunk cutting.
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/).
// This might be changed to a different hint name as gauge type might be misleading for samples that should be
// summed over time.
- resetHint = prompb.Histogram_GAUGE
+ resetHint = histogram.GaugeType
}
// TODO(carrieedwards): Add setting to limit maximum bucket count
- h := prompb.Histogram{
- ResetHint: resetHint,
- Schema: histogram.CustomBucketsSchema,
-
- PositiveSpans: positiveSpans,
- PositiveDeltas: positiveDeltas,
+ h := &histogram.Histogram{
+ CounterResetHint: resetHint,
+ Schema: histogram.CustomBucketsSchema,
+ PositiveSpans: positiveSpans,
+ PositiveBuckets: positiveDeltas,
// Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound
// of the last element in the explicit_bounds array.
// This is similar to the custom_values array in native histograms with custom buckets.
@@ -337,18 +339,16 @@ func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint, tem
// can be mapped directly to the custom_values array.
// See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469
CustomValues: p.ExplicitBounds().AsRaw(),
-
- Timestamp: convertTimeStamp(p.Timestamp()),
}
if p.Flags().NoRecordedValue() {
h.Sum = math.Float64frombits(value.StaleNaN)
- h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN}
+ h.Count = value.StaleNaN
} else {
if p.HasSum() {
h.Sum = p.Sum()
}
- h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()}
+ h.Count = p.Count()
if p.Count() == 0 && h.Sum != 0 {
annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum))
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
index 6488e1e7d11..5e575e61744 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"math"
- "sort"
"time"
"github.com/prometheus/otlptranslator"
@@ -30,7 +29,8 @@ import (
"go.uber.org/multierr"
"github.com/prometheus/prometheus/config"
- "github.com/prometheus/prometheus/prompb"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -43,7 +43,6 @@ type Settings struct {
Namespace string
ExternalLabels map[string]string
DisableTargetInfo bool
- ExportCreatedMetric bool
AddMetricSuffixes bool
AllowUTF8 bool
PromoteResourceAttributes *PromoteResourceAttributes
@@ -55,20 +54,35 @@ type Settings struct {
// PromoteScopeMetadata controls whether to promote OTel scope metadata to metric labels.
PromoteScopeMetadata bool
EnableTypeAndUnitLabels bool
+ // LabelNameUnderscoreSanitization controls whether to enable prepending of 'key' to labels
+ // starting with '_'. Reserved labels starting with `__` are not modified.
+ LabelNameUnderscoreSanitization bool
+ // LabelNamePreserveMultipleUnderscores enables preserving of multiple
+ // consecutive underscores in label names when AllowUTF8 is false.
+ LabelNamePreserveMultipleUnderscores bool
}
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
type PrometheusConverter struct {
- unique map[uint64]*prompb.TimeSeries
- conflicts map[uint64][]*prompb.TimeSeries
- everyN everyNTimes
- metadata []prompb.MetricMetadata
+ everyN everyNTimes
+ scratchBuilder labels.ScratchBuilder
+ builder *labels.Builder
+ appender CombinedAppender
+ // seenTargetInfo tracks target_info samples within a batch to prevent duplicates.
+ seenTargetInfo map[targetInfoKey]struct{}
}
-func NewPrometheusConverter() *PrometheusConverter {
+// targetInfoKey uniquely identifies a target_info sample by its labelset and timestamp.
+type targetInfoKey struct {
+ labelsHash uint64
+ timestamp int64
+}
+
+func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter {
return &PrometheusConverter{
- unique: map[uint64]*prompb.TimeSeries{},
- conflicts: map[uint64][]*prompb.TimeSeries{},
+ scratchBuilder: labels.NewScratchBuilder(0),
+ builder: labels.NewBuilder(labels.EmptyLabels()),
+ appender: appender,
}
}
@@ -121,7 +135,9 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
WithMetricSuffixes: settings.AddMetricSuffixes,
UTF8Allowed: settings.AllowUTF8,
}
+ unitNamer := otlptranslator.UnitNamer{}
c.everyN = everyNTimes{n: 128}
+ c.seenTargetInfo = make(map[targetInfoKey]struct{})
resourceMetricsSlice := md.ResourceMetrics()
numMetrics := 0
@@ -131,7 +147,6 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
numMetrics += scopeMetricsSlice.At(j).Metrics().Len()
}
}
- c.metadata = make([]prompb.MetricMetadata, 0, numMetrics)
for i := 0; i < resourceMetricsSlice.Len(); i++ {
resourceMetrics := resourceMetricsSlice.At(i)
@@ -150,7 +165,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
for k := 0; k < metricSlice.Len(); k++ {
if err := c.everyN.checkContext(ctx); err != nil {
errs = multierr.Append(errs, err)
- return
+ return annots, errs
}
metric := metricSlice.At(k)
@@ -165,19 +180,26 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
// Cumulative temporality is always valid.
// Delta temporality is also valid if AllowDeltaTemporality is true.
// All other temporality values are invalid.
- (temporality != pmetric.AggregationTemporalityCumulative &&
- (!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) {
+ //nolint:staticcheck // QF1001 Applying De Morgan’s law would make the conditions harder to read.
+ !(temporality == pmetric.AggregationTemporalityCumulative ||
+ (settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
continue
}
- metadata := prompb.MetricMetadata{
- Type: otelMetricTypeToPromMetricType(metric),
- MetricFamilyName: namer.Build(TranslatorMetricFromOtelMetric(metric)),
- Help: metric.Description(),
- Unit: metric.Unit(),
+ promName, err := namer.Build(TranslatorMetricFromOtelMetric(metric))
+ if err != nil {
+ errs = multierr.Append(errs, err)
+ continue
+ }
+ meta := Metadata{
+ Metadata: metadata.Metadata{
+ Type: otelMetricTypeToPromMetricType(metric),
+ Unit: unitNamer.Build(metric.Unit()),
+ Help: metric.Description(),
+ },
+ MetricFamilyName: promName,
}
- c.metadata = append(c.metadata, metadata)
// handle individual metrics based on type
//exhaustive:enforce
@@ -188,10 +210,10 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil {
+ if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
case pmetric.MetricTypeSum:
@@ -200,10 +222,10 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, metric, settings, metadata, scope); err != nil {
+ if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
case pmetric.MetricTypeHistogram:
@@ -214,20 +236,20 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
}
if settings.ConvertHistogramsToNHCB {
ws, err := c.addCustomBucketsHistogramDataPoints(
- ctx, dataPoints, resource, settings, metadata, temporality, scope,
+ ctx, dataPoints, resource, settings, temporality, scope, meta,
)
annots.Merge(ws)
if err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
} else {
- if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil {
+ if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
}
@@ -242,15 +264,15 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
dataPoints,
resource,
settings,
- metadata,
temporality,
scope,
+ meta,
)
annots.Merge(ws)
if err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
case pmetric.MetricTypeSummary:
@@ -259,10 +281,10 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
}
- if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, metadata, scope); err != nil {
+ if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
errs = multierr.Append(errs, err)
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return
+ return annots, errs
}
}
default:
@@ -273,69 +295,13 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) {
// We have at least one metric sample for this resource.
// Generate a corresponding target_info series.
- addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c)
- }
- }
-
- return annots, errs
-}
-
-func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
- if len(ts.Labels) != len(lbls) {
- return false
- }
- for i, l := range ts.Labels {
- if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value {
- return false
- }
- }
- return true
-}
-
-// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value,
-// the exemplar is added to the bucket bound's time series, provided that the time series' has samples.
-func (c *PrometheusConverter) addExemplars(ctx context.Context, dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) error {
- if len(bucketBounds) == 0 {
- return nil
- }
-
- exemplars, err := getPromExemplars(ctx, &c.everyN, dataPoint)
- if err != nil {
- return err
- }
- if len(exemplars) == 0 {
- return nil
- }
-
- sort.Sort(byBucketBoundsData(bucketBounds))
- for _, exemplar := range exemplars {
- for _, bound := range bucketBounds {
- if err := c.everyN.checkContext(ctx); err != nil {
- return err
- }
- if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound {
- bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar)
- break
+ if err := c.addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime()); err != nil {
+ errs = multierr.Append(errs, err)
}
}
}
- return nil
-}
-
-// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it.
-// If there is no corresponding TimeSeries already, it's created.
-// The corresponding TimeSeries is returned.
-// If either lbls is nil/empty or sample is nil, nothing is done.
-func (c *PrometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries {
- if sample == nil || len(lbls) == 0 {
- // This shouldn't happen
- return nil
- }
-
- ts, _ := c.getOrCreateTimeSeries(lbls)
- ts.Samples = append(ts.Samples, *sample)
- return ts
+ return annots, errs
}
func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAttributes {
@@ -353,30 +319,42 @@ func NewPromoteResourceAttributes(otlpCfg config.OTLPConfig) *PromoteResourceAtt
}
}
-// promotedAttributes returns labels for promoted resourceAttributes.
-func (s *PromoteResourceAttributes) promotedAttributes(resourceAttributes pcommon.Map) []prompb.Label {
+// addPromotedAttributes adds labels for promoted resourceAttributes to the builder.
+func (s *PromoteResourceAttributes) addPromotedAttributes(builder *labels.Builder, resourceAttributes pcommon.Map, labelNamer otlptranslator.LabelNamer) error {
if s == nil {
return nil
}
- var promotedAttrs []prompb.Label
if s.promoteAll {
- promotedAttrs = make([]prompb.Label, 0, resourceAttributes.Len())
+ var err error
resourceAttributes.Range(func(name string, value pcommon.Value) bool {
if _, exists := s.attrs[name]; !exists {
- promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
- }
- return true
- })
- } else {
- promotedAttrs = make([]prompb.Label, 0, len(s.attrs))
- resourceAttributes.Range(func(name string, value pcommon.Value) bool {
- if _, exists := s.attrs[name]; exists {
- promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
+ var normalized string
+ normalized, err = labelNamer.Build(name)
+ if err != nil {
+ return false
+ }
+ if builder.Get(normalized) == "" {
+ builder.Set(normalized, value.AsString())
+ }
}
return true
})
+ return err
}
- sort.Stable(ByLabelName(promotedAttrs))
- return promotedAttrs
+ var err error
+ resourceAttributes.Range(func(name string, value pcommon.Value) bool {
+ if _, exists := s.attrs[name]; exists {
+ var normalized string
+ normalized, err = labelNamer.Build(name)
+ if err != nil {
+ return false
+ }
+ if builder.Get(normalized) == "" {
+ builder.Set(normalized, value.AsString())
+ }
+ }
+ return true
+ })
+ return err
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
index 68a28c0eca1..cdae9787365 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
@@ -25,11 +25,10 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
- "github.com/prometheus/prometheus/prompb"
)
func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- resource pcommon.Resource, settings Settings, metadata prompb.MetricMetadata, scope scope,
+ resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -37,39 +36,42 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
}
pt := dataPoints.At(x)
- labels := createAttributes(
+ labels, err := c.createAttributes(
resource,
pt.Attributes(),
scope,
settings,
nil,
true,
- metadata,
+ meta,
model.MetricNameLabel,
- metadata.MetricFamilyName,
+ meta.MetricFamilyName,
)
- sample := &prompb.Sample{
- // convert ns to ms
- Timestamp: convertTimeStamp(pt.Timestamp()),
+ if err != nil {
+ return err
}
+ var val float64
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
- sample.Value = float64(pt.IntValue())
+ val = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
- sample.Value = pt.DoubleValue()
+ val = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
- sample.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
+ }
+ ts := convertTimeStamp(pt.Timestamp())
+ ct := convertTimeStamp(pt.StartTimestamp())
+ if err := c.appender.AppendSample(labels, meta, ct, ts, val, nil); err != nil {
+ return err
}
-
- c.addSample(sample, labels)
}
return nil
}
func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
- resource pcommon.Resource, metric pmetric.Metric, settings Settings, metadata prompb.MetricMetadata, scope scope,
+ resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
) error {
for x := 0; x < dataPoints.Len(); x++ {
if err := c.everyN.checkContext(ctx); err != nil {
@@ -77,56 +79,38 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
}
pt := dataPoints.At(x)
- lbls := createAttributes(
+ lbls, err := c.createAttributes(
resource,
pt.Attributes(),
scope,
settings,
nil,
true,
- metadata,
+ meta,
model.MetricNameLabel,
- metadata.MetricFamilyName,
+ meta.MetricFamilyName,
)
- sample := &prompb.Sample{
- // convert ns to ms
- Timestamp: convertTimeStamp(pt.Timestamp()),
+ if err != nil {
+ return nil
}
+ var val float64
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
- sample.Value = float64(pt.IntValue())
+ val = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
- sample.Value = pt.DoubleValue()
+ val = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
- sample.Value = math.Float64frombits(value.StaleNaN)
+ val = math.Float64frombits(value.StaleNaN)
}
-
- ts := c.addSample(sample, lbls)
- if ts != nil {
- exemplars, err := getPromExemplars[pmetric.NumberDataPoint](ctx, &c.everyN, pt)
- if err != nil {
- return err
- }
- ts.Exemplars = append(ts.Exemplars, exemplars...)
+ ts := convertTimeStamp(pt.Timestamp())
+ ct := convertTimeStamp(pt.StartTimestamp())
+ exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
+ if err != nil {
+ return err
}
-
- // add created time series if needed
- if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
- startTimestamp := pt.StartTimestamp()
- if startTimestamp == 0 {
- return nil
- }
-
- createdLabels := make([]prompb.Label, len(lbls))
- copy(createdLabels, lbls)
- for i, l := range createdLabels {
- if l.Name == model.MetricNameLabel {
- createdLabels[i].Value = metadata.MetricFamilyName + createdSuffix
- break
- }
- }
- c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
+ if err := c.appender.AppendSample(lbls, meta, ct, ts, val, exemplars); err != nil {
+ return err
}
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
index 716a6cd6f93..49f96e00190 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
@@ -17,42 +17,41 @@
package prometheusremotewrite
import (
+ "github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pmetric"
-
- "github.com/prometheus/prometheus/prompb"
)
-func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType {
+func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) model.MetricType {
switch otelMetric.Type() {
case pmetric.MetricTypeGauge:
- return prompb.MetricMetadata_GAUGE
+ return model.MetricTypeGauge
case pmetric.MetricTypeSum:
- metricType := prompb.MetricMetadata_GAUGE
+ metricType := model.MetricTypeGauge
if otelMetric.Sum().IsMonotonic() {
- metricType = prompb.MetricMetadata_COUNTER
+ metricType = model.MetricTypeCounter
}
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
if otelMetric.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
- metricType = prompb.MetricMetadata_UNKNOWN
+ metricType = model.MetricTypeUnknown
}
return metricType
case pmetric.MetricTypeHistogram:
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
if otelMetric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
- return prompb.MetricMetadata_UNKNOWN
+ return model.MetricTypeUnknown
}
- return prompb.MetricMetadata_HISTOGRAM
+ return model.MetricTypeHistogram
case pmetric.MetricTypeSummary:
- return prompb.MetricMetadata_SUMMARY
+ return model.MetricTypeSummary
case pmetric.MetricTypeExponentialHistogram:
if otelMetric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityDelta {
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
// We don't have a proper way to flag delta metrics yet, therefore marking the metric type as unknown for now.
- return prompb.MetricMetadata_UNKNOWN
+ return model.MetricTypeUnknown
}
- return prompb.MetricMetadata_HISTOGRAM
+ return model.MetricTypeHistogram
}
- return prompb.MetricMetadata_UNKNOWN
+ return model.MetricTypeUnknown
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go
deleted file mode 100644
index abffbe61054..00000000000
--- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// Provenance-includes-location:
-// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
-
-package prometheusremotewrite
-
-import (
- "github.com/prometheus/prometheus/prompb"
-)
-
-// TimeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format.
-func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries {
- conflicts := 0
- for _, ts := range c.conflicts {
- conflicts += len(ts)
- }
- allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts)
- for _, ts := range c.unique {
- allTS = append(allTS, *ts)
- }
- for _, cTS := range c.conflicts {
- for _, ts := range cTS {
- allTS = append(allTS, *ts)
- }
- }
-
- return allTS
-}
-
-// Metadata returns a slice of the prompb.Metadata that were converted from OTel format.
-func (c *PrometheusConverter) Metadata() []prompb.MetricMetadata {
- return c.metadata
-}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
index db602b8dc36..25d3a94b6ac 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/gogo/protobuf/proto"
+ remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
@@ -40,6 +41,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+ "github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
@@ -59,6 +61,7 @@ const (
reasonTooOld = "too_old"
reasonDroppedSeries = "dropped_series"
reasonUnintentionalDroppedSeries = "unintentionally_dropped_series"
+ reasonNHCBNotSupported = "nhcb_in_rw1_not_supported"
)
type queueManagerMetrics struct {
@@ -81,6 +84,7 @@ type queueManagerMetrics struct {
droppedHistogramsTotal *prometheus.CounterVec
enqueueRetriesTotal prometheus.Counter
sentBatchDuration prometheus.Histogram
+ highestTimestamp *maxTimestamp
highestSentTimestamp *maxTimestamp
pendingSamples prometheus.Gauge
pendingExemplars prometheus.Gauge
@@ -227,12 +231,21 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
})
+ m.highestTimestamp = &maxTimestamp{
+ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "queue_highest_timestamp_seconds",
+ Help: "Highest timestamp that was enqueued, in seconds since epoch. Initialized to 0 when no data has been received yet.",
+ ConstLabels: constLabels,
+ }),
+ }
m.highestSentTimestamp = &maxTimestamp{
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_highest_sent_timestamp_seconds",
- Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
+ Help: "Highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
ConstLabels: constLabels,
}),
}
@@ -337,6 +350,7 @@ func (m *queueManagerMetrics) register() {
m.droppedHistogramsTotal,
m.enqueueRetriesTotal,
m.sentBatchDuration,
+ m.highestTimestamp,
m.highestSentTimestamp,
m.pendingSamples,
m.pendingExemplars,
@@ -372,6 +386,7 @@ func (m *queueManagerMetrics) unregister() {
m.reg.Unregister(m.droppedHistogramsTotal)
m.reg.Unregister(m.enqueueRetriesTotal)
m.reg.Unregister(m.sentBatchDuration)
+ m.reg.Unregister(m.highestTimestamp)
m.reg.Unregister(m.highestSentTimestamp)
m.reg.Unregister(m.pendingSamples)
m.reg.Unregister(m.pendingExemplars)
@@ -407,20 +422,21 @@ type QueueManager struct {
reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled.
reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until.
- logger *slog.Logger
- flushDeadline time.Duration
- cfg config.QueueConfig
- mcfg config.MetadataConfig
- externalLabels []labels.Label
- relabelConfigs []*relabel.Config
- sendExemplars bool
- sendNativeHistograms bool
- watcher *wlog.Watcher
- metadataWatcher *MetadataWatcher
+ logger *slog.Logger
+ flushDeadline time.Duration
+ cfg config.QueueConfig
+ mcfg config.MetadataConfig
+ externalLabels []labels.Label
+ relabelConfigs []*relabel.Config
+ sendExemplars bool
+ sendNativeHistograms bool
+ enableTypeAndUnitLabels bool
+ watcher *wlog.Watcher
+ metadataWatcher *MetadataWatcher
clientMtx sync.RWMutex
storeClient WriteClient
- protoMsg config.RemoteWriteProtoMsg
+ protoMsg remoteapi.WriteMessageType
compr compression.Type
seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
@@ -468,7 +484,8 @@ func NewQueueManager(
sm ReadyScrapeManager,
enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool,
- protoMsg config.RemoteWriteProtoMsg,
+ enableTypeAndUnitLabels bool,
+ protoMsg remoteapi.WriteMessageType,
) *QueueManager {
if logger == nil {
logger = promslog.NewNopLogger()
@@ -482,15 +499,16 @@ func NewQueueManager(
logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint())
t := &QueueManager{
- logger: logger,
- flushDeadline: flushDeadline,
- cfg: cfg,
- mcfg: mCfg,
- externalLabels: extLabelsSlice,
- relabelConfigs: relabelConfigs,
- storeClient: client,
- sendExemplars: enableExemplarRemoteWrite,
- sendNativeHistograms: enableNativeHistogramRemoteWrite,
+ logger: logger,
+ flushDeadline: flushDeadline,
+ cfg: cfg,
+ mcfg: mCfg,
+ externalLabels: extLabelsSlice,
+ relabelConfigs: relabelConfigs,
+ storeClient: client,
+ sendExemplars: enableExemplarRemoteWrite,
+ sendNativeHistograms: enableNativeHistogramRemoteWrite,
+ enableTypeAndUnitLabels: enableTypeAndUnitLabels,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
@@ -515,7 +533,7 @@ func NewQueueManager(
compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use.
}
- walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1
+ walMetadata := t.protoMsg != remoteapi.WriteV1MessageType
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
@@ -523,7 +541,7 @@ func NewQueueManager(
// with the new approach, which stores metadata as WAL records and
// ships them alongside series. If both mechanisms are set, the new one
// takes precedence by implicitly disabling the older one.
- if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 {
+ if t.mcfg.Send && t.protoMsg != remoteapi.WriteV1MessageType {
logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request")
t.mcfg.Send = false
}
@@ -540,7 +558,7 @@ func NewQueueManager(
// This is only used for the metadata_config.send setting and 1.x Remote Write.
func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
// no op for any newer proto format, which will cache metadata sent to it from the WAL watcher.
- if t.protoMsg != config.RemoteWriteProtoMsgV1 {
+ if t.protoMsg != remoteapi.WriteV1MessageType {
return
}
@@ -557,11 +575,8 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr
pBuf := proto.NewBuffer(nil)
numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend)))
- for i := 0; i < numSends; i++ {
- last := (i + 1) * t.mcfg.MaxSamplesPerSend
- if last > len(metadata) {
- last = len(metadata)
- }
+ for i := range numSends {
+ last := min((i+1)*t.mcfg.MaxSamplesPerSend, len(metadata))
err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf)
if err != nil {
t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend)))
@@ -818,6 +833,12 @@ outer:
t.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
continue
}
+ if t.protoMsg == remoteapi.WriteV1MessageType && h.H != nil && h.H.Schema == histogram.CustomBucketsSchema {
+ // We cannot send native histograms with custom buckets (NHCB) via remote write v1.
+ t.metrics.droppedHistogramsTotal.WithLabelValues(reasonNHCBNotSupported).Inc()
+ t.logger.Warn("Dropped native histogram with custom buckets (NHCB) as remote write v1 does not support itB", "ref", h.Ref)
+ continue
+ }
t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[h.Ref]
if !ok {
@@ -873,6 +894,12 @@ outer:
t.metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
continue
}
+ if t.protoMsg == remoteapi.WriteV1MessageType && h.FH != nil && h.FH.Schema == histogram.CustomBucketsSchema {
+ // We cannot send native histograms with custom buckets (NHCB) via remote write v1.
+ t.metrics.droppedHistogramsTotal.WithLabelValues(reasonNHCBNotSupported).Inc()
+ t.logger.Warn("Dropped float native histogram with custom buckets (NHCB) as remote write v1 does not support itB", "ref", h.Ref)
+ continue
+ }
t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[h.Ref]
if !ok {
@@ -946,10 +973,10 @@ func (t *QueueManager) Stop() {
defer t.logger.Info("Remote storage stopped.")
close(t.quit)
- t.wg.Wait()
// Wait for all QueueManager routines to end before stopping shards, metadata watcher, and WAL watcher. This
// is to ensure we don't end up executing a reshard and shards.stop() at the same time, which
// causes a closed channel panic.
+ t.wg.Wait()
t.shards.stop()
t.watcher.Stop()
if t.mcfg.Send {
@@ -982,7 +1009,7 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
// StoreMetadata keeps track of known series' metadata for lookups when sending samples to remote.
func (t *QueueManager) StoreMetadata(meta []record.RefMetadata) {
- if t.protoMsg == config.RemoteWriteProtoMsgV1 {
+ if t.protoMsg == remoteapi.WriteV1MessageType {
return
}
@@ -1241,7 +1268,7 @@ func (s *shards) start(n int) {
s.qm.metrics.numShards.Set(float64(n))
newQueues := make([]*queue, n)
- for i := 0; i < n; i++ {
+ for i := range n {
newQueues[i] = newQueue(s.qm.cfg.MaxSamplesPerSend, s.qm.cfg.Capacity)
}
@@ -1259,7 +1286,7 @@ func (s *shards) start(n int) {
s.exemplarsDroppedOnHardShutdown.Store(0)
s.histogramsDroppedOnHardShutdown.Store(0)
s.metadataDroppedOnHardShutdown.Store(0)
- for i := 0; i < n; i++ {
+ for i := range n {
go s.runShard(hardShutdownCtx, i, newQueues[i])
}
}
@@ -1330,7 +1357,10 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
case tHistogram, tFloatHistogram:
s.qm.metrics.pendingHistograms.Inc()
s.enqueuedHistograms.Inc()
+ default:
+ return true
}
+ s.qm.metrics.highestTimestamp.Set(float64(data.timestamp / 1000))
return true
}
}
@@ -1440,9 +1470,15 @@ func (q *queue) ReturnForReuse(batch []timeSeries) {
// FlushAndShutdown stops the queue and flushes any samples. No appends can be
// made after this is called.
func (q *queue) FlushAndShutdown(done <-chan struct{}) {
+loop:
for q.tryEnqueueingBatch(done) {
- time.Sleep(time.Second)
+ select {
+ case <-done:
+ break loop
+ case <-time.After(time.Second):
+ }
}
+
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
q.batch = nil
@@ -1532,9 +1568,9 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}
defer stop()
- sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, compr compression.Type, timer bool) {
+ sendBatch := func(batch []timeSeries, protoMsg remoteapi.WriteMessageType, compr compression.Type, timer bool) {
switch protoMsg {
- case config.RemoteWriteProtoMsgV1:
+ case remoteapi.WriteV1MessageType:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
if timer {
@@ -1542,9 +1578,12 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
}
_ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, encBuf, compr)
- case config.RemoteWriteProtoMsgV2:
- nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ case remoteapi.WriteV2MessageType:
+ nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, nUnexpectedMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms, s.qm.enableTypeAndUnitLabels)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ if nUnexpectedMetadata > 0 {
+ s.qm.logger.Warn("unexpected metadata sType in populateV2TimeSeries", "count", nUnexpectedMetadata)
+ }
_ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, encBuf, compr)
symbolTable.Reset()
}
@@ -1911,21 +1950,32 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2
return accumulatedStats, err
}
-func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
- var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
+func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms, enableTypeAndUnitLabels bool) (int, int, int, int, int) {
+ var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, nUnexpectedMetadata int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
- if d.metadata != nil {
+ switch {
+ case enableTypeAndUnitLabels:
+ m := schema.NewMetadataFromLabels(d.seriesLabels)
+ pendingData[nPending].Metadata.Type = writev2.FromMetadataType(m.Type)
+ pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(m.Unit)
+ pendingData[nPending].Metadata.HelpRef = 0 // Type and unit does not give us help.
+ // Use Help from d.metadata if available.
+ if d.metadata != nil {
+ pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
+ nPendingMetadata++
+ }
+ case d.metadata != nil:
pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(d.metadata.Unit)
nPendingMetadata++
- } else {
+ default:
// Safeguard against sending garbage in case of not having metadata
// for whatever reason.
- pendingData[nPending].Metadata.Type = writev2.Metadata_METRIC_TYPE_UNSPECIFIED
- pendingData[nPending].Metadata.HelpRef = 0
+ pendingData[nPending].Metadata.Type = writev2.FromMetadataType(model.MetricTypeUnknown)
pendingData[nPending].Metadata.UnitRef = 0
+ pendingData[nPending].Metadata.HelpRef = 0
}
if sendExemplars {
@@ -1960,11 +2010,10 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries,
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromFloatHistogram(d.timestamp, d.floatHistogram))
nPendingHistograms++
case tMetadata:
- // TODO: log or return an error?
- // we shouldn't receive metadata type data here, it should already be inserted into the timeSeries
+ nUnexpectedMetadata++
}
}
- return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata
+ return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, nUnexpectedMetadata
}
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
@@ -2026,11 +2075,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt
onRetry()
t.logger.Warn("Failed to send batch, retrying", "err", err)
- backoff = sleepDuration * 2
-
- if backoff > t.cfg.MaxBackoff {
- backoff = t.cfg.MaxBackoff
- }
+ backoff = min(sleepDuration*2, t.cfg.MaxBackoff)
try++
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go
index 881b5c28d12..e21d1538f50 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go
@@ -210,19 +210,19 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
}
// LabelValues implements storage.Querier and is a noop.
-func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// LabelNames implements storage.Querier and is a noop.
-func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+func (*querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// Close implements storage.Querier and is a noop.
-func (q *querier) Close() error {
+func (*querier) Close() error {
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
index ba6d100bdff..648c91c9558 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go
@@ -25,7 +25,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
@@ -64,7 +64,7 @@ type Storage struct {
}
// NewStorage returns a remote.Storage.
-func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
+func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, enableTypeAndUnitLabels bool) *Storage {
if l == nil {
l = promslog.NewNopLogger()
}
@@ -76,7 +76,7 @@ func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeC
deduper: deduper,
localStartTimeCallback: stCallback,
}
- s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
+ s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, enableTypeAndUnitLabels)
return s
}
@@ -145,7 +145,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
}
// StartTime implements the Storage interface.
-func (s *Storage) StartTime() (int64, error) {
+func (*Storage) StartTime() (int64, error) {
return int64(model.Latest), nil
}
@@ -219,7 +219,7 @@ func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
}
// Used for hashing configs and diff'ing hashes in ApplyConfig.
-func toHash(data interface{}) (string, error) {
+func toHash(data any) (string, error) {
bytes, err := yaml.Marshal(data)
if err != nil {
return "", err
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go
index f5c998874b9..6bc02bd6fed 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go
@@ -39,19 +39,19 @@ var (
Namespace: namespace,
Subsystem: subsystem,
Name: "samples_in_total",
- Help: "Samples in to remote storage, compare to samples out for queue managers.",
+ Help: "Samples in to remote storage, compare to samples out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_samples_dropped_total",
})
exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_in_total",
- Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
+ Help: "Exemplars in to remote storage, compare to exemplars out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_exemplars_dropped_total",
})
histogramsIn = promauto.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "histograms_in_total",
- Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.",
+ Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_histograms_dropped_total",
})
)
@@ -73,11 +73,12 @@ type WriteStorage struct {
quit chan struct{}
// For timestampTracker.
- highestTimestamp *maxTimestamp
+ highestTimestamp *maxTimestamp
+ enableTypeAndUnitLabels bool
}
// NewWriteStorage creates and runs a WriteStorage.
-func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
+func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, enableTypeAndUnitLabels bool) *WriteStorage {
if logger == nil {
logger = promslog.NewNopLogger()
}
@@ -98,9 +99,10 @@ func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string,
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
- Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
+ Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet. Deprecated, check prometheus_remote_storage_queue_highest_timestamp_seconds which is more accurate.",
}),
},
+ enableTypeAndUnitLabels: enableTypeAndUnitLabels,
}
if reg != nil {
reg.MustRegister(rws.highestTimestamp)
@@ -211,6 +213,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper,
rwConf.SendExemplars,
rwConf.SendNativeHistograms,
+ rws.enableTypeAndUnitLabels,
rwConf.ProtobufMessage,
)
// Keep track of which queues are new so we know which to start.
@@ -233,7 +236,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
}
// Appender implements storage.Storage.
-func (rws *WriteStorage) Appender(_ context.Context) storage.Appender {
+func (rws *WriteStorage) Appender(context.Context) storage.Appender {
return ×tampTracker{
writeStorage: rws,
highestRecvTimestamp: rws.highestTimestamp,
@@ -302,7 +305,7 @@ func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64
return 0, nil
}
-func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
+func (t *timestampTracker) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) {
t.exemplars++
return 0, nil
}
@@ -335,7 +338,7 @@ func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ la
return 0, nil
}
-func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
+func (*timestampTracker) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
// UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now.
return 0, nil
diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
index 81e2681088a..f8296b4a807 100644
--- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
+++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
@@ -20,11 +20,11 @@ import (
"io"
"log/slog"
"net/http"
- "strings"
"time"
"github.com/gogo/protobuf/proto"
deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
+ remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
@@ -41,9 +41,9 @@ import (
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+ "github.com/prometheus/prometheus/schema"
"github.com/prometheus/prometheus/storage"
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
- "github.com/prometheus/prometheus/util/compression"
)
type writeHandler struct {
@@ -53,27 +53,22 @@ type writeHandler struct {
samplesWithInvalidLabelsTotal prometheus.Counter
samplesAppendedWithoutMetadata prometheus.Counter
- acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
-
- ingestCTZeroSample bool
+ ingestCTZeroSample bool
+ enableTypeAndUnitLabels bool
+ appendMetadata bool
}
const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests with
-// the given message in acceptedProtoMsgs and writes them to the provided appendable.
+// the given message in acceptedMsgs and writes them to the provided appendable.
//
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
-func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg, ingestCTZeroSample bool) http.Handler {
- protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
- for _, acc := range acceptedProtoMsgs {
- protoMsgs[acc] = struct{}{}
- }
+func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedMsgs remoteapi.MessageTypes, ingestCTZeroSample, enableTypeAndUnitLabels, appendMetadata bool) http.Handler {
h := &writeHandler{
- logger: logger,
- appendable: appendable,
- acceptedProtoMsgs: protoMsgs,
+ logger: logger,
+ appendable: appendable,
samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
@@ -87,139 +82,72 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}),
- ingestCTZeroSample: ingestCTZeroSample,
+ ingestCTZeroSample: ingestCTZeroSample,
+ enableTypeAndUnitLabels: enableTypeAndUnitLabels,
+ appendMetadata: appendMetadata,
}
- return h
+ return remoteapi.NewWriteHandler(h, acceptedMsgs, remoteapi.WithWriteHandlerLogger(logger))
}
-func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
- contentType = strings.TrimSpace(contentType)
-
- parts := strings.Split(contentType, ";")
- if parts[0] != appProtoContentType {
- return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
- }
- // Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
- for _, p := range parts[1:] {
- pair := strings.Split(p, "=")
- if len(pair) != 2 {
- return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
- }
- if pair[0] == "proto" {
- ret := config.RemoteWriteProtoMsg(pair[1])
- if err := ret.Validate(); err != nil {
- return "", fmt.Errorf("got %v content type; %w", contentType, err)
- }
- return ret, nil
- }
- }
- // No "proto=" parameter, assuming v1.
- return config.RemoteWriteProtoMsgV1, nil
+// isHistogramValidationError checks if the error is a native histogram validation error.
+func isHistogramValidationError(err error) bool {
+ var e histogram.Error
+ return errors.As(err, &e)
}
-func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- contentType := r.Header.Get("Content-Type")
- if contentType == "" {
- // Don't break yolo 1.0 clients if not needed. This is similar to what we did
- // before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
- // We could give http.StatusUnsupportedMediaType, but let's assume 1.0 message by default.
- contentType = appProtoContentType
- }
-
- msgType, err := h.parseProtoMsg(contentType)
- if err != nil {
- h.logger.Error("Error decoding remote write request", "err", err)
- http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
- return
- }
-
- if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
- err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
- for k := range h.acceptedProtoMsgs {
- ret = append(ret, string(k))
- }
- return ret
- }())
- h.logger.Error("Error decoding remote write request", "err", err)
- http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
- }
-
- enc := r.Header.Get("Content-Encoding")
- if enc == "" {
- // Don't break yolo 1.0 clients if not needed. This is similar to what we did
- // before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
- // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default.
- } else if strings.ToLower(enc) != compression.Snappy {
- err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, compression.Snappy)
- h.logger.Error("Error decoding remote write request", "err", err)
- http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
- }
-
- // Read the request body.
+// Store implements remoteapi.writeStorage interface.
+func (h *writeHandler) Store(r *http.Request, msgType remoteapi.WriteMessageType) (*remoteapi.WriteResponse, error) {
+ // Store receives request with decompressed content in body.
body, err := io.ReadAll(r.Body)
if err != nil {
- h.logger.Error("Error decoding remote write request", "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- decompressed, err := compression.Decode(compression.Snappy, body, nil)
- if err != nil {
- // TODO(bwplotka): Add more context to responded error?
- h.logger.Error("Error decompressing remote write request", "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ h.logger.Error("Error reading remote write request body", "err", err.Error())
+ return nil, err
}
- // Now we have a decompressed buffer we can unmarshal it.
-
- if msgType == config.RemoteWriteProtoMsgV1 {
+ wr := remoteapi.NewWriteResponse()
+ if msgType == remoteapi.WriteV1MessageType {
// PRW 1.0 flow has different proto message and no partial write handling.
var req prompb.WriteRequest
- if err := proto.Unmarshal(decompressed, &req); err != nil {
+ if err := proto.Unmarshal(body, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ wr.SetStatusCode(http.StatusBadRequest)
+ return wr, err
}
if err = h.write(r.Context(), &req); err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
// Indicated an out-of-order sample is a bad request to prevent retries.
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ wr.SetStatusCode(http.StatusBadRequest)
+ return wr, err
+ case isHistogramValidationError(err):
+ wr.SetStatusCode(http.StatusBadRequest)
+ return wr, err
default:
- h.logger.Error("Error while remote writing the v1 request", "err", err.Error())
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ wr.SetStatusCode(http.StatusInternalServerError)
+ return wr, err
}
}
- w.WriteHeader(http.StatusNoContent)
- return
+ return wr, nil
}
// Remote Write 2.x proto message handling.
var req writev2.Request
- if err := proto.Unmarshal(decompressed, &req); err != nil {
+ if err := proto.Unmarshal(body, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ wr.SetStatusCode(http.StatusBadRequest)
+ return wr, err
}
respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
-
- // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
- respStats.SetHeaders(w)
-
+ // Add stats required X-Prometheus-Remote-Write-Written-* response headers.
+ wr.Add(respStats)
if err != nil {
- if errHTTPCode/5 == 100 { // 5xx
- h.logger.Error("Error while remote writing the v2 request", "err", err.Error())
- }
- http.Error(w, err.Error(), errHTTPCode)
- return
+ wr.SetStatusCode(errHTTPCode)
+ return wr, err
}
- w.WriteHeader(http.StatusNoContent)
+ return wr, nil
}
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
@@ -227,7 +155,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
samplesWithInvalidLabels := 0
samplesAppended := 0
- app := &timeLimitAppender{
+ app := &remoteWriteAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
@@ -341,13 +269,13 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
//
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
// Once we have 5xx type of error, we immediately stop and rollback all appends.
-func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
- app := &timeLimitAppender{
+func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ remoteapi.WriteResponseStats, errHTTPCode int, _ error) {
+ app := &remoteWriteAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
- s := WriteResponseStats{}
+ s := remoteapi.WriteResponseStats{}
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s)
if err != nil {
if errHTTPCode/5 == 100 {
@@ -356,14 +284,14 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri
if rerr := app.Rollback(); rerr != nil {
h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr)
}
- return WriteResponseStats{}, errHTTPCode, err
+ return remoteapi.WriteResponseStats{}, errHTTPCode, err
}
// Non-retriable (e.g. bad request error case). Can be partially written.
commitErr := app.Commit()
if commitErr != nil {
// Bad requests does not matter as we have internal error (retryable).
- return WriteResponseStats{}, http.StatusInternalServerError, commitErr
+ return remoteapi.WriteResponseStats{}, http.StatusInternalServerError, commitErr
}
// Bad request error happened, but rest of data (if any) was written.
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
@@ -372,13 +300,13 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri
// All good just commit.
if err := app.Commit(); err != nil {
- return WriteResponseStats{}, http.StatusInternalServerError, err
+ return remoteapi.WriteResponseStats{}, http.StatusInternalServerError, err
}
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return s, 0, nil
}
-func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
+func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *remoteapi.WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
var (
badRequestErrs []error
outOfOrderExemplarErrs, samplesWithInvalidLabels int
@@ -386,7 +314,24 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
b = labels.NewScratchBuilder(0)
)
for _, ts := range req.Timeseries {
- ls := ts.ToLabels(&b, req.Symbols)
+ ls, err := ts.ToLabels(&b, req.Symbols)
+ if err != nil {
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing labels for series %v: %w", ts.LabelsRefs, err))
+ samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
+ continue
+ }
+
+ m := ts.ToMetadata(req.Symbols)
+ if h.enableTypeAndUnitLabels && (m.Type != model.MetricTypeUnknown || m.Unit != "") {
+ slb := labels.NewScratchBuilder(ls.Len() + 2) // +2 for __type__ and __unit__
+ ls.Range(func(l labels.Label) {
+ slb.Add(l.Name, l.Value)
+ })
+ schema.Metadata{Type: m.Type, Unit: m.Unit}.AddToLabels(&slb)
+ slb.Sort()
+ ls = slb.Labels()
+ }
+
// Validate series labels early.
// NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
// specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
@@ -400,22 +345,26 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
continue
}
+ // Validate that the TimeSeries has at least one sample or histogram.
+ if len(ts.Samples) == 0 && len(ts.Histograms) == 0 {
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("TimeSeries must contain at least one sample or histogram for series %v", ls.String()))
+ continue
+ }
+
allSamplesSoFar := rs.AllSamples()
var ref storage.SeriesRef
-
- // Samples.
- if h.ingestCTZeroSample && len(ts.Samples) > 0 && ts.Samples[0].Timestamp != 0 && ts.CreatedTimestamp != 0 {
- // CT only needs to be ingested for the first sample, it will be considered
- // out of order for the rest.
- ref, err = app.AppendCTZeroSample(ref, ls, ts.Samples[0].Timestamp, ts.CreatedTimestamp)
- if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) {
- // Even for the first sample OOO is a common scenario because
- // we can't tell if a CT was already ingested in a previous request.
- // We ignore the error.
- h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp)
- }
- }
for _, s := range ts.Samples {
+ if h.ingestCTZeroSample && s.StartTimestamp != 0 && s.Timestamp != 0 {
+ ref, err = app.AppendCTZeroSample(ref, ls, s.Timestamp, s.StartTimestamp)
+ // We treat OOO errors specially as it's a common scenario given:
+ // * We can't tell if ST was already ingested in a previous request.
+ // * We don't check if ST changed for stream of samples (we typically have one though),
+ // as it's checked in the AppendSTZeroSample reliably.
+ if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) {
+ h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", s.StartTimestamp, "timestamp", s.Timestamp)
+ }
+ }
+
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
if err == nil {
rs.Samples++
@@ -436,15 +385,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
// Native Histograms.
for _, hp := range ts.Histograms {
- if h.ingestCTZeroSample && hp.Timestamp != 0 && ts.CreatedTimestamp != 0 {
- // Differently from samples, we need to handle CT for each histogram instead of just the first one.
- // This is because histograms and float histograms are stored separately, even if they have the same labels.
- ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp)
+ if h.ingestCTZeroSample && hp.StartTimestamp != 0 && hp.Timestamp != 0 {
+ ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, hp.StartTimestamp)
+ // We treat OOO errors specially as it's a common scenario given:
+ // * We can't tell if ST was already ingested in a previous request.
+ // * We don't check if ST changed for stream of samples (we typically have one though),
+ // as it's checked in the ingestSTZeroSample reliably.
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) {
- // Even for the first sample OOO is a common scenario because
- // we can't tell if a CT was already ingested in a previous request.
- // We ignore the error.
- h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp)
+ h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", hp.StartTimestamp, "timestamp", hp.Timestamp)
}
}
if hp.IsFloatHistogram() {
@@ -467,12 +415,21 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
+ if isHistogramValidationError(err) {
+ h.logger.Error("Invalid histogram received", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
+ continue
+ }
return 0, http.StatusInternalServerError, err
}
// Exemplars.
for _, ep := range ts.Exemplars {
- e := ep.ToExemplar(&b, req.Symbols)
+ e, err := ep.ToExemplar(&b, req.Symbols)
+ if err != nil {
+ badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing exemplar for series %v: %w", ls.String(), err))
+ continue
+ }
ref, err = app.AppendExemplar(ref, ls, e)
if err == nil {
rs.Exemplars++
@@ -490,12 +447,15 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
}
- m := ts.ToMetadata(req.Symbols)
- if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
- h.logger.Debug("error while updating metadata from remote write", "err", err)
- // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
- // we don't report remote write error either. We increment metric instead.
- samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
+ // Only update metadata in WAL if the metadata-wal-records feature is enabled.
+ // Without this feature, metadata is not persisted to WAL.
+ if h.appendMetadata {
+ if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
+ h.logger.Debug("error while updating metadata from remote write", "err", err)
+ // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
+ // we don't report remote write error either. We increment metric instead.
+ samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
+ }
}
}
@@ -511,14 +471,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
}
-// handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp.
-// It doesn't return errors in case of out of order CT.
-func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) {
+// handleHistogramZeroSample appends ST as a zero-value sample with st value as the sample timestamp.
+// It doesn't return errors in case of out of order ST.
+func (*writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, st int64) (storage.SeriesRef, error) {
var err error
if hist.IsFloatHistogram() {
- ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram())
+ ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, st, nil, hist.ToFloatHistogram())
} else {
- ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil)
+ ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, st, hist.ToIntHistogram(), nil)
}
return ref, err
}
@@ -535,25 +495,29 @@ type OTLPOptions struct {
LookbackDelta time.Duration
// Add type and unit labels to the metrics.
EnableTypeAndUnitLabels bool
+ // IngestCTZeroSample enables writing zero samples based on the start time
+ // of metrics.
+ IngestCTZeroSample bool
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
-func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
+func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
if opts.NativeDelta && opts.ConvertDelta {
// This should be validated when iterating through feature flags, so not expected to fail here.
panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
}
ex := &rwExporter{
- writeHandler: &writeHandler{
- logger: logger,
- appendable: appendable,
- },
+ logger: logger,
+ appendable: appendable,
config: configFunc,
allowDeltaTemporality: opts.NativeDelta,
lookbackDelta: opts.LookbackDelta,
+ ingestCTZeroSample: opts.IngestCTZeroSample,
enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
+ // Register metrics.
+ metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
}
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
@@ -587,45 +551,55 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl
}
type rwExporter struct {
- *writeHandler
+ logger *slog.Logger
+ appendable storage.Appendable
config func() config.Config
allowDeltaTemporality bool
lookbackDelta time.Duration
+ ingestCTZeroSample bool
enableTypeAndUnitLabels bool
+
+ // Metrics.
+ metrics otlptranslator.CombinedAppenderMetrics
}
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
otlpCfg := rw.config().OTLPConfig
-
- converter := otlptranslator.NewPrometheusConverter()
-
+ app := &remoteWriteAppender{
+ Appender: rw.appendable.Appender(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ }
+ combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestCTZeroSample, rw.metrics)
+ converter := otlptranslator.NewPrometheusConverter(combinedAppender)
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
- AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
- AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
- PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
- KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
- ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
- PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
- AllowDeltaTemporality: rw.allowDeltaTemporality,
- LookbackDelta: rw.lookbackDelta,
- EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
+ AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
+ AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
+ PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
+ KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
+ ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
+ PromoteScopeMetadata: otlpCfg.PromoteScopeMetadata,
+ AllowDeltaTemporality: rw.allowDeltaTemporality,
+ LookbackDelta: rw.lookbackDelta,
+ EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels,
+ LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization,
+ LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores,
})
- if err != nil {
- rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
- }
+
+ defer func() {
+ if err != nil {
+ _ = app.Rollback()
+ return
+ }
+ err = app.Commit()
+ }()
ws, _ := annots.AsStrings("", 0, 0)
if len(ws) > 0 {
rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
}
-
- err = rw.write(ctx, &prompb.WriteRequest{
- Timeseries: converter.TimeSeries(),
- Metadata: converter.Metadata(),
- })
return err
}
-func (rw *rwExporter) Capabilities() consumer.Capabilities {
+func (*rwExporter) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: false}
}
@@ -696,13 +670,13 @@ func hasDelta(md pmetric.Metrics) bool {
return false
}
-type timeLimitAppender struct {
+type remoteWriteAppender struct {
storage.Appender
maxTime int64
}
-func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
+func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
@@ -714,11 +688,18 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
return ref, nil
}
-func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
+func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
+ if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
+ h = h.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
+ fh = fh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh)
if err != nil {
return 0, err
@@ -726,7 +707,7 @@ func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.La
return ref, nil
}
-func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
+func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts > app.maxTime {
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
}
diff --git a/vendor/github.com/prometheus/prometheus/storage/series.go b/vendor/github.com/prometheus/prometheus/storage/series.go
index e61b2259370..2fff56785a4 100644
--- a/vendor/github.com/prometheus/prometheus/storage/series.go
+++ b/vendor/github.com/prometheus/prometheus/storage/series.go
@@ -65,7 +65,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sampl
if err != nil {
return &ChunkSeriesEntry{
Lset: lset,
- ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator {
+ ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
return errChunksIterator{err: err}
},
}
@@ -169,7 +169,7 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType {
return it.samples.Get(it.idx).Type()
}
-func (it *listSeriesIterator) Err() error { return nil }
+func (*listSeriesIterator) Err() error { return nil }
type listSeriesIteratorWithCopy struct {
*listSeriesIterator
@@ -223,7 +223,7 @@ func (it *listChunkSeriesIterator) Next() bool {
return it.idx < len(it.chks)
}
-func (it *listChunkSeriesIterator) Err() error { return nil }
+func (*listChunkSeriesIterator) Err() error { return nil }
type chunkSetToSeriesSet struct {
ChunkSeriesSet
@@ -432,9 +432,9 @@ type errChunksIterator struct {
err error
}
-func (e errChunksIterator) At() chunks.Meta { return chunks.Meta{} }
-func (e errChunksIterator) Next() bool { return false }
-func (e errChunksIterator) Err() error { return e.err }
+func (errChunksIterator) At() chunks.Meta { return chunks.Meta{} }
+func (errChunksIterator) Next() bool { return false }
+func (e errChunksIterator) Err() error { return e.err }
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go
index 87ca32b3469..ea7e93b18c1 100644
--- a/vendor/github.com/prometheus/prometheus/template/template.go
+++ b/vendor/github.com/prometheus/prometheus/template/template.go
@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
html_template "html/template"
+ "maps"
"math"
"net"
"net/url"
@@ -59,7 +60,7 @@ func init() {
// A version of vector that's easier to use from templates.
type sample struct {
Labels map[string]string
- Value interface{}
+ Value any
}
type queryResult []*sample
@@ -110,7 +111,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer
type Expander struct {
text string
name string
- data interface{}
+ data any
funcMap text_template.FuncMap
options []string
}
@@ -120,7 +121,7 @@ func NewTemplateExpander(
ctx context.Context,
text string,
name string,
- data interface{},
+ data any,
timestamp model.Time,
queryFunc QueryFunc,
externalURL *url.URL,
@@ -146,14 +147,14 @@ func NewTemplateExpander(
"label": func(label string, s *sample) string {
return s.Labels[label]
},
- "value": func(s *sample) interface{} {
+ "value": func(s *sample) any {
return s.Value
},
"strvalue": func(s *sample) string {
return s.Labels["__value__"]
},
- "args": func(args ...interface{}) map[string]interface{} {
- result := make(map[string]interface{})
+ "args": func(args ...any) map[string]any {
+ result := make(map[string]any)
for i, a := range args {
result[fmt.Sprintf("arg%d", i)] = a
}
@@ -199,7 +200,7 @@ func NewTemplateExpander(
}
return host
},
- "humanize": func(i interface{}) (string, error) {
+ "humanize": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
@@ -228,7 +229,7 @@ func NewTemplateExpander(
}
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
- "humanize1024": func(i interface{}) (string, error) {
+ "humanize1024": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
@@ -247,7 +248,7 @@ func NewTemplateExpander(
return fmt.Sprintf("%.4g%s", v, prefix), nil
},
"humanizeDuration": common_templates.HumanizeDuration,
- "humanizePercentage": func(i interface{}) (string, error) {
+ "humanizePercentage": func(i any) (string, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return "", err
@@ -255,7 +256,7 @@ func NewTemplateExpander(
return fmt.Sprintf("%.4g%%", v*100), nil
},
"humanizeTimestamp": common_templates.HumanizeTimestamp,
- "toTime": func(i interface{}) (*time.Time, error) {
+ "toTime": func(i any) (*time.Time, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
@@ -263,7 +264,7 @@ func NewTemplateExpander(
return floatToTime(v)
},
- "toDuration": func(i interface{}) (*time.Duration, error) {
+ "toDuration": func(i any) (*time.Duration, error) {
v, err := common_templates.ConvertToFloat(i)
if err != nil {
return nil, err
@@ -287,18 +288,19 @@ func NewTemplateExpander(
}
return float64(time.Duration(v)) / float64(time.Second), nil
},
+ "urlQueryEscape": url.QueryEscape,
},
options: options,
}
}
// AlertTemplateData returns the interface to be used in expanding the template.
-func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, smpl promql.Sample) interface{} {
+func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, smpl promql.Sample) any {
res := struct {
Labels map[string]string
ExternalLabels map[string]string
ExternalURL string
- Value interface{}
+ Value any
}{
Labels: labels,
ExternalLabels: externalLabels,
@@ -316,9 +318,7 @@ func AlertTemplateData(labels, externalLabels map[string]string, externalURL str
// Funcs adds the functions in fm to the Expander's function map.
// Existing functions will be overwritten in case of conflict.
func (te Expander) Funcs(fm text_template.FuncMap) {
- for k, v := range fm {
- te.funcMap[k] = v
- }
+ maps.Copy(te.funcMap, fm)
}
// Expand expands a template in text (non-HTML) mode.
@@ -369,7 +369,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
tmpl.Option(te.options...)
tmpl.Funcs(html_template.FuncMap{
- "tmpl": func(name string, data interface{}) (html_template.HTML, error) {
+ "tmpl": func(name string, data any) (html_template.HTML, error) {
var buffer bytes.Buffer
err := tmpl.ExecuteTemplate(&buffer, name, data)
return html_template.HTML(buffer.String()), err
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
index 5eb8a649a9d..14137f12cca 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
@@ -71,7 +71,6 @@ func (w *BlockWriter) initHead() error {
opts := DefaultHeadOptions()
opts.ChunkRange = w.blockSize
opts.ChunkDirRoot = w.chunkDir
- opts.EnableNativeHistograms.Store(true)
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
if err != nil {
return fmt.Errorf("tsdb.NewHead: %w", err)
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
index 7082f34c3f4..8cccb189fa8 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
@@ -223,17 +223,17 @@ type mockSeriesIterator struct {
currIndex int
}
-func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone }
+func (*mockSeriesIterator) Seek(int64) ValueType { return ValNone }
func (it *mockSeriesIterator) At() (int64, float64) {
return it.timeStamps[it.currIndex], it.values[it.currIndex]
}
-func (it *mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+func (*mockSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
return math.MinInt64, nil
}
-func (it *mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+func (*mockSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return math.MinInt64, nil
}
@@ -249,7 +249,7 @@ func (it *mockSeriesIterator) Next() ValueType {
return ValNone
}
-func (it *mockSeriesIterator) Err() error { return nil }
+func (*mockSeriesIterator) Err() error { return nil }
// NewNopIterator returns a new chunk iterator that does not hold any data.
func NewNopIterator() Iterator {
@@ -288,17 +288,17 @@ type pool struct {
func NewPool() Pool {
return &pool{
xor: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &XORChunk{b: bstream{}}
},
},
histogram: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &HistogramChunk{b: bstream{}}
},
},
floatHistogram: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &FloatHistogramChunk{b: bstream{}}
},
},
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
index 7f3b2a5968f..8002dd0d4e9 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go
@@ -41,7 +41,7 @@ type FloatHistogramChunk struct {
// NewFloatHistogramChunk returns a new chunk with float histogram encoding.
func NewFloatHistogramChunk() *FloatHistogramChunk {
- b := make([]byte, 3, 128)
+ b := make([]byte, histogramHeaderSize, chunkAllocationSize)
return &FloatHistogramChunk{b: bstream{stream: b, count: 0}}
}
@@ -58,7 +58,7 @@ type xorValue struct {
}
// Encoding returns the encoding type.
-func (c *FloatHistogramChunk) Encoding() Encoding {
+func (*FloatHistogramChunk) Encoding() Encoding {
return EncFloatHistogram
}
@@ -72,25 +72,10 @@ func (c *FloatHistogramChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
-// Layout returns the histogram layout. Only call this on chunks that have at
-// least one sample.
-func (c *FloatHistogramChunk) Layout() (
- schema int32, zeroThreshold float64,
- negativeSpans, positiveSpans []histogram.Span,
- customValues []float64,
- err error,
-) {
- if c.NumSamples() == 0 {
- panic("FloatHistogramChunk.Layout() called on an empty chunk")
- }
- b := newBReader(c.Bytes()[2:])
- return readHistogramChunkLayout(&b)
-}
-
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *FloatHistogramChunk) GetCounterResetHeader() CounterResetHeader {
- return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
+ return CounterResetHeader(c.Bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
@@ -104,6 +89,9 @@ func (c *FloatHistogramChunk) Compact() {
// Appender implements the Chunk interface.
func (c *FloatHistogramChunk) Appender() (Appender, error) {
+ if len(c.b.stream) == histogramHeaderSize { // Avoid allocating an Iterator when chunk is empty.
+ return &FloatHistogramAppender{b: &c.b, t: math.MinInt64, sum: xorValue{leading: 0xff}, cnt: xorValue{leading: 0xff}, zCnt: xorValue{leading: 0xff}}, nil
+ }
it := c.iterator(nil)
// To get an appender, we must know the state it would have if we had
@@ -148,11 +136,6 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
nBuckets: nBuckets,
sum: it.sum,
}
- if it.numTotal == 0 {
- a.sum.leading = 0xff
- a.cnt.leading = 0xff
- a.zCnt.leading = 0xff
- }
return a, nil
}
@@ -170,14 +153,11 @@ func (c *FloatHistogramChunk) iterator(it Iterator) *floatHistogramIterator {
func newFloatHistogramIterator(b []byte) *floatHistogramIterator {
it := &floatHistogramIterator{
- br: newBReader(b),
+ br: newBReader(b[histogramHeaderSize:]),
numTotal: binary.BigEndian.Uint16(b),
t: math.MinInt64,
}
- // The first 3 bytes contain chunk headers.
- // We skip that for actual samples.
- _, _ = it.br.readBits(24)
- it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+ it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
return it
}
@@ -202,11 +182,11 @@ type FloatHistogramAppender struct {
}
func (a *FloatHistogramAppender) GetCounterResetHeader() CounterResetHeader {
- return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
+ return CounterResetHeader(a.b.bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
- a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
+ a.b.bytes()[histogramFlagPos] = (a.b.bytes()[histogramFlagPos] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *FloatHistogramAppender) NumSamples() int {
@@ -215,7 +195,7 @@ func (a *FloatHistogramAppender) NumSamples() int {
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
-func (a *FloatHistogramAppender) Append(int64, float64) {
+func (*FloatHistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
@@ -250,59 +230,59 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
okToAppend, counterReset bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.CounterResetHint == histogram.CounterReset {
// Always honor the explicit counter reset hint.
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.Count < a.cnt.value {
// There has been a counter reset.
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
- if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+ if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
if h.ZeroCount < a.zCnt.value {
// There has been a counter reset since ZeroThreshold didn't change.
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
var ok bool
positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterReset = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterReset
}
// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
@@ -498,31 +478,31 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(a.sum.value) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
- if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
- return
+ if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
@@ -568,7 +548,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
if numPBuckets > 0 {
a.pBuckets = make([]xorValue, numPBuckets)
- for i := 0; i < numPBuckets; i++ {
+ for i := range numPBuckets {
a.pBuckets[i] = xorValue{
value: h.PositiveBuckets[i],
leading: 0xff,
@@ -579,7 +559,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa
}
if numNBuckets > 0 {
a.nBuckets = make([]xorValue, numNBuckets)
- for i := 0; i < numNBuckets; i++ {
+ for i := range numNBuckets {
a.nBuckets[i] = xorValue{
value: h.NegativeBuckets[i],
leading: 0xff,
@@ -682,13 +662,13 @@ func (a *FloatHistogramAppender) recode(
happ.appendFloatHistogram(tOld, hOld)
}
- happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
+ happ.setCounterResetHeader(CounterResetHeader(byts[histogramFlagPos] & CounterResetHeaderMask))
return hc, app
}
// recodeHistogram converts the current histogram (in-place) to accommodate an expansion of the set of
// (positive and/or negative) buckets used.
-func (a *FloatHistogramAppender) recodeHistogram(
+func (*FloatHistogramAppender) recodeHistogram(
fh *histogram.FloatHistogram,
pBackwardInter, nBackwardInter []Insert,
) {
@@ -702,7 +682,7 @@ func (a *FloatHistogramAppender) recodeHistogram(
}
}
-func (a *FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+func (*FloatHistogramAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float histogram chunk")
}
@@ -872,11 +852,11 @@ func (it *floatHistogramIterator) Seek(t int64) ValueType {
return ValFloatHistogram
}
-func (it *floatHistogramIterator) At() (int64, float64) {
+func (*floatHistogramIterator) At() (int64, float64) {
panic("cannot call floatHistogramIterator.At")
}
-func (it *floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+func (*floatHistogramIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic("cannot call floatHistogramIterator.AtHistogram")
}
@@ -886,7 +866,7 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
}
if fh == nil {
it.atFloatHistogramCalled = true
- return it.t, &histogram.FloatHistogram{
+ fh = &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt.value,
ZeroCount: it.zCnt.value,
@@ -899,6 +879,14 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
NegativeBuckets: it.nBuckets,
CustomValues: it.customValues,
}
+ if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ fh = fh.Copy()
+ fh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ return it.t, fh
}
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@@ -923,6 +911,13 @@ func (it *floatHistogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
// Custom values are interned. The single copy is in this iterator.
fh.CustomValues = it.customValues
+ if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ fh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
return it.t, fh
}
@@ -937,11 +932,11 @@ func (it *floatHistogramIterator) Err() error {
func (it *floatHistogramIterator) Reset(b []byte) {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
- it.br = newBReader(b[3:])
+ it.br = newBReader(b[histogramHeaderSize:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
- it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+ it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
it.t, it.tDelta = 0, 0
it.cnt, it.zCnt, it.sum = xorValue{}, xorValue{}, xorValue{}
@@ -974,6 +969,12 @@ func (it *floatHistogramIterator) Next() ValueType {
it.err = err
return ValNone
}
+
+ if !histogram.IsKnownSchema(schema) {
+ it.err = histogram.UnknownSchemaError(schema)
+ return ValNone
+ }
+
it.schema = schema
it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
index 4ba0c467d82..cc1d771235d 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
@@ -42,7 +42,7 @@ type HistogramChunk struct {
// NewHistogramChunk returns a new chunk with histogram encoding of the given
// size.
func NewHistogramChunk() *HistogramChunk {
- b := make([]byte, 3, 128)
+ b := make([]byte, histogramHeaderSize, chunkAllocationSize)
return &HistogramChunk{b: bstream{stream: b, count: 0}}
}
@@ -51,7 +51,7 @@ func (c *HistogramChunk) Reset(stream []byte) {
}
// Encoding returns the encoding type.
-func (c *HistogramChunk) Encoding() Encoding {
+func (*HistogramChunk) Encoding() Encoding {
return EncHistogram
}
@@ -65,21 +65,6 @@ func (c *HistogramChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
-// Layout returns the histogram layout. Only call this on chunks that have at
-// least one sample.
-func (c *HistogramChunk) Layout() (
- schema int32, zeroThreshold float64,
- negativeSpans, positiveSpans []histogram.Span,
- customValues []float64,
- err error,
-) {
- if c.NumSamples() == 0 {
- panic("HistogramChunk.Layout() called on an empty chunk")
- }
- b := newBReader(c.Bytes()[2:])
- return readHistogramChunkLayout(&b)
-}
-
// CounterResetHeader defines the first 2 bits of the chunk header.
type CounterResetHeader byte
@@ -93,15 +78,18 @@ const (
// UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not.
// An explicit counter reset detection needs to happen during query time.
UnknownCounterReset CounterResetHeader = 0b00000000
+ // CounterResetHeaderMask is the mask to get the counter reset header bits.
+ CounterResetHeaderMask byte = 0b11000000
+ // Position within the header bytes at the start of the stream.
+ histogramFlagPos = 2
+ // Total header size.
+ histogramHeaderSize = 3
)
-// CounterResetHeaderMask is the mask to get the counter reset header bits.
-const CounterResetHeaderMask byte = 0b11000000
-
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
// header.
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
- return CounterResetHeader(c.Bytes()[2] & CounterResetHeaderMask)
+ return CounterResetHeader(c.Bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
// Compact implements the Chunk interface.
@@ -115,6 +103,9 @@ func (c *HistogramChunk) Compact() {
// Appender implements the Chunk interface.
func (c *HistogramChunk) Appender() (Appender, error) {
+ if len(c.b.stream) == histogramHeaderSize { // Avoid allocating an Iterator when chunk is empty.
+ return &HistogramAppender{b: &c.b, t: math.MinInt64, leading: 0xff}, nil
+ }
it := c.iterator(nil)
// To get an appender, we must know the state it would have if we had
@@ -149,9 +140,6 @@ func (c *HistogramChunk) Appender() (Appender, error) {
leading: it.leading,
trailing: it.trailing,
}
- if it.numTotal == 0 {
- a.leading = 0xff
- }
return a, nil
}
@@ -165,14 +153,11 @@ func countSpans(spans []histogram.Span) int {
func newHistogramIterator(b []byte) *histogramIterator {
it := &histogramIterator{
- br: newBReader(b),
+ br: newBReader(b[histogramHeaderSize:]),
numTotal: binary.BigEndian.Uint16(b),
t: math.MinInt64,
}
- // The first 3 bytes contain chunk headers.
- // We skip that for actual samples.
- _, _ = it.br.readBits(24)
- it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+ it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
return it
}
@@ -221,11 +206,11 @@ type HistogramAppender struct {
}
func (a *HistogramAppender) GetCounterResetHeader() CounterResetHeader {
- return CounterResetHeader(a.b.bytes()[2] & CounterResetHeaderMask)
+ return CounterResetHeader(a.b.bytes()[histogramFlagPos] & CounterResetHeaderMask)
}
func (a *HistogramAppender) setCounterResetHeader(cr CounterResetHeader) {
- a.b.bytes()[2] = (a.b.bytes()[2] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
+ a.b.bytes()[histogramFlagPos] = (a.b.bytes()[histogramFlagPos] & (^CounterResetHeaderMask)) | (byte(cr) & CounterResetHeaderMask)
}
func (a *HistogramAppender) NumSamples() int {
@@ -234,7 +219,7 @@ func (a *HistogramAppender) NumSamples() int {
// Append implements Appender. This implementation panics because normal float
// samples must never be appended to a histogram chunk.
-func (a *HistogramAppender) Append(int64, float64) {
+func (*HistogramAppender) Append(int64, float64) {
panic("appended a float sample to a histogram chunk")
}
@@ -275,29 +260,29 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
) {
counterResetHint = NotCounterReset
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.CounterResetHint == histogram.CounterReset {
// Always honor the explicit counter reset hint.
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
counterResetHint = UnknownCounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.Count < a.cnt {
// There has been a counter reset.
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
@@ -306,34 +291,34 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
// as long as https://github.com/prometheus/prometheus/issues/15346 is still open.
// TODO: consider adding the counter reset detection here once #15346 is fixed.
counterResetHint = UnknownCounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
- if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
+ if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
if h.ZeroCount < a.zCnt {
// There has been a counter reset since ZeroThreshold didn't change.
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
var ok bool
positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
if !ok {
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
if !ok {
counterResetHint = CounterReset
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, okToAppend, counterResetHint
}
// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
@@ -530,31 +515,31 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
okToAppend bool,
) {
if a.NumSamples() > 0 && a.GetCounterResetHeader() != GaugeType {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(h.Sum) {
// This is a stale sample whose buckets and spans don't matter.
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if value.IsStaleNaN(a.sum) {
// If the last sample was stale, then we can only accept stale
// samples in this chunk.
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
- if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.FloatBucketsMatch(h.CustomValues, a.customValues) {
- return
+ if histogram.IsCustomBucketsSchema(h.Schema) && !histogram.CustomBucketBoundsMatch(h.CustomValues, a.customValues) {
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
positiveInserts, backwardPositiveInserts, positiveSpans = expandSpansBothWays(a.pSpans, h.PositiveSpans)
negativeInserts, backwardNegativeInserts, negativeSpans = expandSpansBothWays(a.nSpans, h.NegativeSpans)
okToAppend = true
- return
+ return positiveInserts, negativeInserts, backwardPositiveInserts, backwardNegativeInserts, positiveSpans, negativeSpans, okToAppend
}
// appendHistogram appends a histogram to the chunk. The caller must ensure that
@@ -725,13 +710,13 @@ func (a *HistogramAppender) recode(
happ.appendHistogram(tOld, hOld)
}
- happ.setCounterResetHeader(CounterResetHeader(byts[2] & CounterResetHeaderMask))
+ happ.setCounterResetHeader(CounterResetHeader(byts[histogramFlagPos] & CounterResetHeaderMask))
return hc, app
}
// recodeHistogram converts the current histogram (in-place) to accommodate an
// expansion of the set of (positive and/or negative) buckets used.
-func (a *HistogramAppender) recodeHistogram(
+func (*HistogramAppender) recodeHistogram(
h *histogram.Histogram,
pBackwardInserts, nBackwardInserts []Insert,
) {
@@ -749,7 +734,7 @@ func (a *HistogramAppender) writeSumDelta(v float64) {
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
}
-func (a *HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+func (*HistogramAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a histogram chunk")
}
@@ -926,7 +911,7 @@ func (it *histogramIterator) Seek(t int64) ValueType {
return ValHistogram
}
-func (it *histogramIterator) At() (int64, float64) {
+func (*histogramIterator) At() (int64, float64) {
panic("cannot call histogramIterator.At")
}
@@ -936,7 +921,7 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
}
if h == nil {
it.atHistogramCalled = true
- return it.t, &histogram.Histogram{
+ h = &histogram.Histogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: it.cnt,
ZeroCount: it.zCnt,
@@ -949,6 +934,14 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
NegativeBuckets: it.nBuckets,
CustomValues: it.customValues,
}
+ if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ h = h.Copy()
+ h.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ return it.t, h
}
h.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@@ -973,6 +966,13 @@ func (it *histogramIterator) AtHistogram(h *histogram.Histogram) (int64, *histog
// Custom values are interned. The single copy is here in the iterator.
h.CustomValues = it.customValues
+ if h.Schema > histogram.ExponentialSchemaMax && h.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ h.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
return it.t, h
}
@@ -982,7 +982,7 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
}
if fh == nil {
it.atFloatHistogramCalled = true
- return it.t, &histogram.FloatHistogram{
+ fh = &histogram.FloatHistogram{
CounterResetHint: counterResetHint(it.counterResetHeader, it.numRead),
Count: float64(it.cnt),
ZeroCount: float64(it.zCnt),
@@ -995,6 +995,14 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
NegativeBuckets: it.nFloatBuckets,
CustomValues: it.customValues,
}
+ if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ fh = fh.Copy()
+ fh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+ return it.t, fh
}
fh.CounterResetHint = counterResetHint(it.counterResetHeader, it.numRead)
@@ -1027,6 +1035,13 @@ func (it *histogramIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int
// Custom values are interned. The single copy is here in the iterator.
fh.CustomValues = it.customValues
+ if fh.Schema > histogram.ExponentialSchemaMax && fh.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // chunk is from a newer Prometheus version that supports higher
+ // resolution.
+ fh.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
return it.t, fh
}
@@ -1041,11 +1056,11 @@ func (it *histogramIterator) Err() error {
func (it *histogramIterator) Reset(b []byte) {
// The first 3 bytes contain chunk headers.
// We skip that for actual samples.
- it.br = newBReader(b[3:])
+ it.br = newBReader(b[histogramHeaderSize:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
- it.counterResetHeader = CounterResetHeader(b[2] & CounterResetHeaderMask)
+ it.counterResetHeader = CounterResetHeader(b[histogramFlagPos] & CounterResetHeaderMask)
it.t, it.cnt, it.zCnt = 0, 0, 0
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0
@@ -1092,6 +1107,12 @@ func (it *histogramIterator) Next() ValueType {
it.err = err
return ValNone
}
+
+ if !histogram.IsKnownSchema(schema) {
+ it.err = histogram.UnknownSchemaError(schema)
+ return ValNone
+ }
+
it.schema = schema
it.zThreshold = zeroThreshold
it.pSpans, it.nSpans = posSpans, negSpans
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
index 5ee783fd683..22bc4a6d3dc 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
@@ -40,33 +40,33 @@ func readHistogramChunkLayout(b *bstreamReader) (
) {
zeroThreshold, err = readZeroThreshold(b)
if err != nil {
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
v, err := readVarbitInt(b)
if err != nil {
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
schema = int32(v)
positiveSpans, err = readHistogramChunkLayoutSpans(b)
if err != nil {
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
negativeSpans, err = readHistogramChunkLayoutSpans(b)
if err != nil {
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
if histogram.IsCustomBucketsSchema(schema) {
customValues, err = readHistogramChunkLayoutCustomBounds(b)
if err != nil {
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
}
- return
+ return schema, zeroThreshold, positiveSpans, negativeSpans, customValues, err
}
func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) {
@@ -550,5 +550,5 @@ func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []h
addBucket(insertIdx)
consumeInsert()
}
- return
+ return mergedSpans
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
index 574edec48b3..00ba027dda6 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
@@ -64,7 +64,7 @@ func putVarbitInt(b *bstream, val int64) {
// readVarbitInt reads an int64 encoded with putVarbitInt.
func readVarbitInt(b *bstreamReader) (int64, error) {
var d byte
- for i := 0; i < 8; i++ {
+ for range 8 {
d <<= 1
bit, err := b.readBitFast()
if err != nil {
@@ -169,7 +169,7 @@ func putVarbitUint(b *bstream, val uint64) {
// readVarbitUint reads a uint64 encoded with putVarbitUint.
func readVarbitUint(b *bstreamReader) (uint64, error) {
var d byte
- for i := 0; i < 8; i++ {
+ for range 8 {
d <<= 1
bit, err := b.readBitFast()
if err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
index ac75a5994bb..29e21107055 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
@@ -52,6 +52,8 @@ import (
)
const (
+ chunkHeaderSize = 2
+ chunkAllocationSize = 128
chunkCompactCapacityThreshold = 32
)
@@ -62,7 +64,7 @@ type XORChunk struct {
// NewXORChunk returns a new chunk with XOR encoding.
func NewXORChunk() *XORChunk {
- b := make([]byte, 2, 128)
+ b := make([]byte, chunkHeaderSize, chunkAllocationSize)
return &XORChunk{b: bstream{stream: b, count: 0}}
}
@@ -71,7 +73,7 @@ func (c *XORChunk) Reset(stream []byte) {
}
// Encoding returns the encoding type.
-func (c *XORChunk) Encoding() Encoding {
+func (*XORChunk) Encoding() Encoding {
return EncXOR
}
@@ -98,6 +100,9 @@ func (c *XORChunk) Compact() {
// It is not valid to call Appender() multiple times concurrently or to use multiple
// Appenders on the same chunk.
func (c *XORChunk) Appender() (Appender, error) {
+ if len(c.b.stream) == chunkHeaderSize { // Avoid allocating an Iterator when chunk is empty.
+ return &xorAppender{b: &c.b, t: math.MinInt64, leading: 0xff}, nil
+ }
it := c.iterator(nil)
// To get an appender we must know the state it would have if we had
@@ -117,9 +122,6 @@ func (c *XORChunk) Appender() (Appender, error) {
leading: it.leading,
trailing: it.trailing,
}
- if it.numTotal == 0 {
- a.leading = 0xff
- }
return a, nil
}
@@ -131,7 +133,7 @@ func (c *XORChunk) iterator(it Iterator) *xorIterator {
return &xorIterator{
// The first 2 bytes contain chunk headers.
// We skip that for actual samples.
- br: newBReader(c.b.bytes()[2:]),
+ br: newBReader(c.b.bytes()[chunkHeaderSize:]),
numTotal: binary.BigEndian.Uint16(c.b.bytes()),
t: math.MinInt64,
}
@@ -223,11 +225,11 @@ func (a *xorAppender) writeVDelta(v float64) {
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
}
-func (a *xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
+func (*xorAppender) AppendHistogram(*HistogramAppender, int64, *histogram.Histogram, bool) (Chunk, bool, Appender, error) {
panic("appended a histogram sample to a float chunk")
}
-func (a *xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
+func (*xorAppender) AppendFloatHistogram(*FloatHistogramAppender, int64, *histogram.FloatHistogram, bool) (Chunk, bool, Appender, error) {
panic("appended a float histogram sample to a float chunk")
}
@@ -263,11 +265,11 @@ func (it *xorIterator) At() (int64, float64) {
return it.t, it.val
}
-func (it *xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
+func (*xorIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic("cannot call xorIterator.AtHistogram")
}
-func (it *xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
+func (*xorIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
panic("cannot call xorIterator.AtFloatHistogram")
}
@@ -282,7 +284,7 @@ func (it *xorIterator) Err() error {
func (it *xorIterator) Reset(b []byte) {
// The first 2 bytes contain chunk headers.
// We skip that for actual samples.
- it.br = newBReader(b[2:])
+ it.br = newBReader(b[chunkHeaderSize:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
@@ -330,7 +332,7 @@ func (it *xorIterator) Next() ValueType {
var d byte
// read delta-of-delta
- for i := 0; i < 4; i++ {
+ for range 4 {
d <<= 1
bit, err := it.br.readBitFast()
if err != nil {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
index 034106238e3..8b8f5d0f812 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go
@@ -197,7 +197,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
// Used in tests to compare the content of chunks.
func ChunkMetasToSamples(chunks []Meta) (result []Sample) {
if len(chunks) == 0 {
- return
+ return result
}
for _, chunk := range chunks {
@@ -218,7 +218,7 @@ func ChunkMetasToSamples(chunks []Meta) (result []Sample) {
}
}
}
- return
+ return result
}
// Iterator iterates over the chunks of a single time series.
@@ -408,7 +408,7 @@ func (w *Writer) cut() error {
// Uncached IO is implemented using direct I/O for now.
wbuf, err = fileutil.NewDirectIOWriter(f, size)
} else {
- wbuf, err = fileutil.NewBufioWriterWithSeek(f, size)
+ wbuf, err = fileutil.NewBufioWriterWithSize(f, size)
}
if err != nil {
return err
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
index 876b42cb26a..41fce69c723 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
@@ -172,7 +172,7 @@ func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool {
// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,
// including all meta data before and after the chunk data.
// Head chunk format: https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/head_chunks.md#chunk
-func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
+func (*chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
// Headers.
bytes := uint64(SeriesRefSize) + 2*MintMaxtSize + ChunkEncodingSize
@@ -283,16 +283,16 @@ const (
OutOfOrderMask = uint8(0b10000000)
)
-func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+func (*ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
enc := uint8(sourceEncoding) | OutOfOrderMask
return chunkenc.Encoding(enc)
}
-func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
+func (*ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
return (uint8(e) & OutOfOrderMask) != 0
}
-func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+func (*ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
restored := uint8(sourceEncoding) & (^OutOfOrderMask)
return chunkenc.Encoding(restored)
}
@@ -1109,7 +1109,7 @@ type chunkBuffer struct {
func newChunkBuffer() *chunkBuffer {
cb := &chunkBuffer{}
- for i := 0; i < inBufferShards; i++ {
+ for i := range inBufferShards {
cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk)
}
return cb
@@ -1133,7 +1133,7 @@ func (cb *chunkBuffer) get(ref ChunkDiskMapperRef) chunkenc.Chunk {
}
func (cb *chunkBuffer) clear() {
- for i := 0; i < inBufferShards; i++ {
+ for i := range inBufferShards {
cb.inBufferChunksMtxs[i].Lock()
cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk)
cb.inBufferChunksMtxs[i].Unlock()
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go
index 7828fd08605..49e88d63205 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go
@@ -42,7 +42,7 @@ import (
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 {
ranges := make([]int64, 0, steps)
curRange := minSize
- for i := 0; i < steps; i++ {
+ for range steps {
ranges = append(ranges, curRange)
curRange *= int64(stepSize)
}
@@ -178,7 +178,7 @@ type LeveledCompactorOptions struct {
type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder
-func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder {
+func DefaultPostingsDecoderFactory(*BlockMeta) index.PostingsDecoder {
return index.DecodePostingsRaw
}
@@ -562,6 +562,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
start := time.Now()
uid := ulid.MustNew(ulid.Now(), rand.Reader)
+ c.logger.Info("write block started", "mint", mint, "maxt", maxt, "ulid", uid)
meta := &BlockMeta{
ULID: uid,
@@ -596,7 +597,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
}
c.logger.Info(
- "write block",
+ "write block completed",
"mint", meta.MinTime,
"maxt", meta.MaxTime,
"ulid", meta.ULID,
@@ -761,7 +762,7 @@ type DefaultBlockPopulator struct{}
// PopulateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block.
// It expects sorted blocks input by mint.
-func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
+func (DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) {
if len(blocks) == 0 {
return errors.New("cannot populate block from no readers")
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go
index 4d21d4dc12d..c57ae84c9c0 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/db.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go
@@ -176,9 +176,6 @@ type Options struct {
// Disables isolation between reads and in-flight appends.
IsolationDisabled bool
- // EnableNativeHistograms enables the ingestion of native histograms.
- EnableNativeHistograms bool
-
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
// This can change during run-time, so this value from here should only be used
// while initialising.
@@ -267,6 +264,10 @@ type DB struct {
autoCompactMtx sync.Mutex
autoCompact bool
+ // retentionMtx protects access to retention configuration values that can
+ // be updated at runtime through config file changes.
+ retentionMtx sync.RWMutex
+
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
@@ -964,7 +965,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
headOpts.MaxExemplars.Store(opts.MaxExemplars)
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
- headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
headOpts.EnableSharding = opts.EnableSharding
@@ -1157,6 +1157,20 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
oooTimeWindow := int64(0)
if conf.StorageConfig.TSDBConfig != nil {
oooTimeWindow = conf.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
+
+ // Update retention configuration if provided.
+ if conf.StorageConfig.TSDBConfig.Retention != nil {
+ db.retentionMtx.Lock()
+ if conf.StorageConfig.TSDBConfig.Retention.Time > 0 {
+ db.opts.RetentionDuration = int64(conf.StorageConfig.TSDBConfig.Retention.Time)
+ db.metrics.retentionDuration.Set((time.Duration(db.opts.RetentionDuration) * time.Millisecond).Seconds())
+ }
+ if conf.StorageConfig.TSDBConfig.Retention.Size > 0 {
+ db.opts.MaxBytes = int64(conf.StorageConfig.TSDBConfig.Retention.Size)
+ db.metrics.maxBytes.Set(float64(db.opts.MaxBytes))
+ }
+ db.retentionMtx.Unlock()
+ }
}
if oooTimeWindow < 0 {
oooTimeWindow = 0
@@ -1191,14 +1205,18 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
return nil
}
-// EnableNativeHistograms enables the native histogram feature.
-func (db *DB) EnableNativeHistograms() {
- db.head.EnableNativeHistograms()
+// getRetentionDuration returns the current retention duration in a thread-safe manner.
+func (db *DB) getRetentionDuration() int64 {
+ db.retentionMtx.RLock()
+ defer db.retentionMtx.RUnlock()
+ return db.opts.RetentionDuration
}
-// DisableNativeHistograms disables the native histogram feature.
-func (db *DB) DisableNativeHistograms() {
- db.head.DisableNativeHistograms()
+// getMaxBytes returns the current max bytes setting in a thread-safe manner.
+func (db *DB) getMaxBytes() int64 {
+ db.retentionMtx.RLock()
+ defer db.retentionMtx.RUnlock()
+ return db.opts.MaxBytes
}
// dbAppender wraps the DB's head appender and triggers compactions on commit
@@ -1420,6 +1438,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error {
// Each ULID in the result corresponds to a block in a unique time range.
// The db.cmtx mutex should be held before calling this method.
func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID, err error) {
+ db.logger.Info("out-of-order compaction started")
start := time.Now()
blockSize := oooHead.ChunkRange()
@@ -1747,15 +1766,16 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
// set in the db options.
func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) {
// Time retention is disabled or no blocks to work with.
- if len(blocks) == 0 || db.opts.RetentionDuration == 0 {
- return
+ retentionDuration := db.getRetentionDuration()
+ if len(blocks) == 0 || retentionDuration == 0 {
+ return deletable
}
deletable = make(map[ulid.ULID]struct{})
for i, block := range blocks {
// The difference between the first block and this block is greater than or equal to
// the retention period so any blocks after that are added as deletable.
- if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime >= db.opts.RetentionDuration {
+ if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime >= retentionDuration {
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = struct{}{}
}
@@ -1770,8 +1790,9 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
// set in the db options.
func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) {
// Size retention is disabled or no blocks to work with.
- if len(blocks) == 0 || db.opts.MaxBytes <= 0 {
- return
+ maxBytes := db.getMaxBytes()
+ if len(blocks) == 0 || maxBytes <= 0 {
+ return deletable
}
deletable = make(map[ulid.ULID]struct{})
@@ -1781,7 +1802,7 @@ func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
blocksSize := db.Head().Size()
for i, block := range blocks {
blocksSize += block.Size()
- if blocksSize > db.opts.MaxBytes {
+ if blocksSize > maxBytes {
// Add this and all following blocks for deletion.
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = struct{}{}
@@ -1926,7 +1947,7 @@ func OverlappingBlocks(bm []BlockMeta) Overlaps {
return overlapGroups
}
-func (db *DB) String() string {
+func (*DB) String() string {
return "HEAD"
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
index 7b5ac26cf1a..cdbcd5cde69 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
@@ -140,11 +140,11 @@ func (ce *CircularExemplarStorage) Appender() *CircularExemplarStorage {
return ce
}
-func (ce *CircularExemplarStorage) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) {
+func (ce *CircularExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return ce, nil
}
-func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQuerier, error) {
+func (ce *CircularExemplarStorage) Querier(context.Context) (storage.ExemplarQuerier, error) {
return ce, nil
}
@@ -308,7 +308,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
var buf [1024]byte
- for i := int64(0); i < count; i++ {
+ for i := range count {
idx := (startIndex + i) % int64(len(oldBuffer))
if oldBuffer[idx].ref != nil {
ce.migrate(&oldBuffer[idx], buf[:])
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
index 1672a92d4c9..ad039d2231c 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/dir.go
@@ -22,6 +22,10 @@ func DirSize(dir string) (int64, error) {
var size int64
err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error {
if err != nil {
+ // Ignore missing files that may have been deleted during the walk.
+ if os.IsNotExist(err) {
+ return nil
+ }
return err
}
if !info.IsDir() {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_force.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_force.go
index e2f811b9f2a..bb65403911f 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_force.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_force.go
@@ -23,6 +23,6 @@ func NewDirectIOWriter(f *os.File, size int) (BufWriter, error) {
return newDirectIOWriter(f, size)
}
-func NewBufioWriterWithSeek(f *os.File, size int) (BufWriter, error) {
+func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return NewDirectIOWriter(f, size)
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_linux.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_linux.go
index 7406cc1594b..a1d5f9577db 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_linux.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_linux.go
@@ -20,7 +20,7 @@ import (
"os"
)
-func NewBufioWriterWithSeek(f *os.File, size int) (BufWriter, error) {
+func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return &writer{bufio.NewWriterSize(f, size)}, nil
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go
index fb0b28fcc3c..a03782fe428 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/direct_io_unsupported.go
@@ -20,10 +20,10 @@ import (
"os"
)
-func NewBufioWriterWithSeek(f *os.File, size int) (BufWriter, error) {
+func NewBufioWriterWithSize(f *os.File, size int) (BufWriter, error) {
return &writer{bufio.NewWriterSize(f, size)}, nil
}
-func NewDirectIOWriter(_ *os.File, _ int) (BufWriter, error) {
+func NewDirectIOWriter(*os.File, int) (BufWriter, error) {
return nil, errDirectIOUnsupported
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go
index 574305a287b..2c71977b1a9 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go
@@ -86,12 +86,14 @@ type Head struct {
exemplarMetrics *ExemplarMetrics
exemplars ExemplarStorage
logger *slog.Logger
- appendPool zeropool.Pool[[]record.RefSample]
+ refSeriesPool zeropool.Pool[[]record.RefSeries]
+ floatsPool zeropool.Pool[[]record.RefSample]
exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef]
histogramsPool zeropool.Pool[[]record.RefHistogramSample]
floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample]
metadataPool zeropool.Pool[[]record.RefMetadata]
seriesPool zeropool.Pool[[]*memSeries]
+ typeMapPool zeropool.Pool[map[chunks.HeadSeriesRef]sampleType]
bytesPool zeropool.Pool[[]byte]
memChunkPool sync.Pool
@@ -110,7 +112,7 @@ type Head struct {
series *stripeSeries
walExpiriesMtx sync.Mutex
- walExpiries map[chunks.HeadSeriesRef]int // Series no longer in the head, and what WAL segment they must be kept until.
+ walExpiries map[chunks.HeadSeriesRef]int64 // Series no longer in the head, and what time they must be kept until.
// TODO(codesome): Extend MemPostings to return only OOOPostings, Set OOOStatus, ... Like an additional map of ooo postings.
postings *index.MemPostings // Postings lists for terms.
@@ -159,9 +161,6 @@ type HeadOptions struct {
OutOfOrderTimeWindow atomic.Int64
OutOfOrderCapMax atomic.Int64
- // EnableNativeHistograms enables the ingestion of native histograms.
- EnableNativeHistograms atomic.Bool
-
ChunkRange int64
// ChunkDirRoot is the parent directory of the chunks directory.
ChunkDirRoot string
@@ -268,7 +267,7 @@ func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *H
logger: l,
opts: opts,
memChunkPool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &memChunk{}
},
},
@@ -337,7 +336,7 @@ func (h *Head) resetInMemoryState() error {
h.exemplars = es
h.postings = index.NewUnorderedMemPostings()
h.tombstones = tombstones.NewMemTombstones()
- h.walExpiries = map[chunks.HeadSeriesRef]int{}
+ h.walExpiries = map[chunks.HeadSeriesRef]int64{}
h.chunkRange.Store(h.opts.ChunkRange)
h.minTime.Store(math.MaxInt64)
h.maxTime.Store(math.MinInt64)
@@ -704,8 +703,7 @@ func (h *Head) Init(minValidTime int64) error {
snapshotLoaded = true
chunkSnapshotLoadDuration = time.Since(start)
h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String())
- }
- if err != nil {
+ } else {
snapIdx, snapOffset = -1, 0
refSeries = make(map[chunks.HeadSeriesRef]*memSeries)
@@ -791,7 +789,7 @@ func (h *Head) Init(minValidTime int64) error {
// A corrupted checkpoint is a hard error for now and requires user
// intervention. There's likely little data that can be recovered anyway.
- if err := h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks, endAt); err != nil {
+ if err := h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks); err != nil {
return fmt.Errorf("backfill checkpoint: %w", err)
}
h.updateWALReplayStatusRead(startFrom)
@@ -825,7 +823,7 @@ func (h *Head) Init(minValidTime int64) error {
if err != nil {
return fmt.Errorf("segment reader (offset=%d): %w", offset, err)
}
- err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks, endAt)
+ err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks)
if err := sr.Close(); err != nil {
h.logger.Warn("Error while closing the wal segments reader", "err", err)
}
@@ -1049,16 +1047,6 @@ func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wlog.WL) {
h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow)
}
-// EnableNativeHistograms enables the native histogram feature.
-func (h *Head) EnableNativeHistograms() {
- h.opts.EnableNativeHistograms.Store(true)
-}
-
-// DisableNativeHistograms disables the native histogram feature.
-func (h *Head) DisableNativeHistograms() {
- h.opts.EnableNativeHistograms.Store(false)
-}
-
// PostingsCardinalityStats returns highest cardinality stats by label and value names.
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
cacheKey := statsByLabelName + ";" + strconv.Itoa(limit)
@@ -1282,7 +1270,7 @@ func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64)
return false, false, 0
}
-func (h *Head) getWALExpiry(id chunks.HeadSeriesRef) (int, bool) {
+func (h *Head) getWALExpiry(id chunks.HeadSeriesRef) (int64, bool) {
h.walExpiriesMtx.Lock()
defer h.walExpiriesMtx.Unlock()
@@ -1290,24 +1278,27 @@ func (h *Head) getWALExpiry(id chunks.HeadSeriesRef) (int, bool) {
return keepUntil, ok
}
-func (h *Head) setWALExpiry(id chunks.HeadSeriesRef, keepUntil int) {
+// updateWALExpiry updates the WAL expiry for a series, keeping the higher of the current value and keepUntil.
+func (h *Head) updateWALExpiry(id chunks.HeadSeriesRef, keepUntil int64) {
h.walExpiriesMtx.Lock()
defer h.walExpiriesMtx.Unlock()
- h.walExpiries[id] = keepUntil
+ h.walExpiries[id] = max(keepUntil, h.walExpiries[id])
}
-// keepSeriesInWALCheckpoint is used to determine whether a series record should be kept in the checkpoint
-// last is the last WAL segment that was considered for checkpointing.
-func (h *Head) keepSeriesInWALCheckpoint(id chunks.HeadSeriesRef, last int) bool {
- // Keep the record if the series exists in the head.
- if h.series.getByID(id) != nil {
- return true
- }
+// keepSeriesInWALCheckpointFn returns a function that is used to determine whether a series record should be kept in the checkpoint.
+// mint is the time before which data in the WAL is being truncated.
+func (h *Head) keepSeriesInWALCheckpointFn(mint int64) func(id chunks.HeadSeriesRef) bool {
+ return func(id chunks.HeadSeriesRef) bool {
+ // Keep the record if the series exists in the head.
+ if h.series.getByID(id) != nil {
+ return true
+ }
- // Keep the record if the series has an expiry set.
- keepUntil, ok := h.getWALExpiry(id)
- return ok && keepUntil > last
+ // Keep the record if the series has an expiry set.
+ keepUntil, ok := h.getWALExpiry(id)
+ return ok && keepUntil >= mint
+ }
}
// truncateWAL removes old data before mint from the WAL.
@@ -1344,7 +1335,7 @@ func (h *Head) truncateWAL(mint int64) error {
}
h.metrics.checkpointCreationTotal.Inc()
- if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, h.keepSeriesInWALCheckpoint, mint); err != nil {
+ if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, h.keepSeriesInWALCheckpointFn(mint), mint); err != nil {
h.metrics.checkpointCreationFail.Inc()
var cerr *chunks.CorruptionErr
if errors.As(err, &cerr) {
@@ -1359,11 +1350,10 @@ func (h *Head) truncateWAL(mint int64) error {
h.logger.Error("truncating segments failed", "err", err)
}
- // The checkpoint is written and segments before it is truncated, so stop
- // tracking expired series.
+ // The checkpoint is written and data before mint is truncated, so stop tracking expired series.
h.walExpiriesMtx.Lock()
- for ref, segment := range h.walExpiries {
- if segment <= last {
+ for ref, keepUntil := range h.walExpiries {
+ if keepUntil < mint {
delete(h.walExpiries, ref)
}
}
@@ -1414,6 +1404,7 @@ func (h *Head) truncateOOO(lastWBLFile int, newMinOOOMmapRef chunks.ChunkDiskMap
// truncateSeriesAndChunkDiskMapper is a helper function for truncateMemory and truncateOOO.
// It runs GC on the Head and truncates the ChunkDiskMapper accordingly.
func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error {
+ h.logger.Info("Head GC started", "caller", caller)
start := time.Now()
headMaxt := h.MaxTime()
actualMint, minOOOTime, minMmapFile := h.gc()
@@ -1633,16 +1624,13 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
h.tombstones.TruncateBefore(mint)
if h.wal != nil {
- _, last, _ := wlog.Segments(h.wal.Dir())
h.walExpiriesMtx.Lock()
- // Keep series records until we're past segment 'last'
- // because the WAL will still have samples records with
- // this ref ID. If we didn't keep these series records then
- // on start up when we replay the WAL, or any other code
- // that reads the WAL, wouldn't be able to use those
- // samples since we would have no labels for that ref ID.
+ // Samples for deleted series are likely still in the WAL, so flag that the deleted series records should be kept during
+ // WAL checkpointing while the WAL contains data through actualInOrderMint.
+ // If we didn't keep these series records then on start up when we replay the WAL, or any other code that reads the WAL,
+ // wouldn't be able to use those samples since we would have no labels for that ref ID.
for ref := range deleted {
- h.walExpiries[chunks.HeadSeriesRef(ref)] = last
+ h.walExpiries[chunks.HeadSeriesRef(ref)] = actualInOrderMint
}
h.walExpiriesMtx.Unlock()
}
@@ -1745,37 +1733,36 @@ func (h *Head) Close() error {
// String returns an human readable representation of the TSDB head. It's important to
// keep this function in order to avoid the struct dump when the head is stringified in
// errors or logs.
-func (h *Head) String() string {
+func (*Head) String() string {
return "head"
}
func (h *Head) getOrCreate(hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) {
- // Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create
- // a new series on every sample inserted via Add(), which causes allocations
- // and makes our series IDs rather random and harder to compress in postings.
s := h.series.getByHash(hash, lset)
if s != nil {
return s, false, nil
}
- // Optimistically assume that we are the first one to create the series.
- id := chunks.HeadSeriesRef(h.lastSeriesID.Inc())
-
- return h.getOrCreateWithID(id, hash, lset, pendingCommit)
+ return h.getOrCreateWithOptionalID(0, hash, lset, pendingCommit)
}
-func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) {
- s, created, err := h.series.getOrSet(hash, lset, func() *memSeries {
- shardHash := uint64(0)
- if h.opts.EnableSharding {
- shardHash = labels.StableHash(lset)
- }
+// If id is zero, one will be allocated.
+func (h *Head) getOrCreateWithOptionalID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels, pendingCommit bool) (*memSeries, bool, error) {
+ if preCreationErr := h.series.seriesLifecycleCallback.PreCreation(lset); preCreationErr != nil {
+ return nil, false, preCreationErr
+ }
+ if id == 0 {
+ // Note this id is wasted in the case where a concurrent operation creates the same series first.
+ id = chunks.HeadSeriesRef(h.lastSeriesID.Inc())
+ }
- return newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled, pendingCommit)
- })
- if err != nil {
- return nil, false, err
+ shardHash := uint64(0)
+ if h.opts.EnableSharding {
+ shardHash = labels.StableHash(lset)
}
+ optimisticallyCreatedSeries := newMemSeries(lset, id, shardHash, h.opts.IsolationDisabled, pendingCommit)
+
+ s, created := h.series.setUnlessAlreadySet(hash, lset, optimisticallyCreatedSeries)
if !created {
return s, false, nil
}
@@ -2073,43 +2060,23 @@ func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries {
return series
}
-func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries func() *memSeries) (*memSeries, bool, error) {
- // PreCreation is called here to avoid calling it inside the lock.
- // It is not necessary to call it just before creating a series,
- // rather it gives a 'hint' whether to create a series or not.
- preCreationErr := s.seriesLifecycleCallback.PreCreation(lset)
-
- // Create the series, unless the PreCreation() callback as failed.
- // If failed, we'll not allow to create a new series anyway.
- var series *memSeries
- if preCreationErr == nil {
- series = createSeries()
- }
-
+func (s *stripeSeries) setUnlessAlreadySet(hash uint64, lset labels.Labels, series *memSeries) (*memSeries, bool) {
i := hash & uint64(s.size-1)
s.locks[i].Lock()
-
if prev := s.hashes[i].get(hash, lset); prev != nil {
s.locks[i].Unlock()
- return prev, false, nil
- }
- if preCreationErr == nil {
- s.hashes[i].set(hash, series)
+ return prev, false
}
+ s.hashes[i].set(hash, series)
s.locks[i].Unlock()
- if preCreationErr != nil {
- // The callback prevented creation of series.
- return nil, false, preCreationErr
- }
-
i = uint64(series.ref) & uint64(s.size-1)
s.locks[i].Lock()
s.series[i][series.ref] = series
s.locks[i].Unlock()
- return series, true, nil
+ return series, true
}
func (s *stripeSeries) postCreation(lset labels.Labels) {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
index fa44f752f25..8740d2f5ad5 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go
@@ -148,7 +148,7 @@ func (a *initAppender) Rollback() error {
}
// Appender returns a new Appender on the database.
-func (h *Head) Appender(_ context.Context) storage.Appender {
+func (h *Head) Appender(context.Context) storage.Appender {
h.metrics.activeAppenders.Inc()
// The head cache might not have a starting point yet. The init appender
@@ -164,13 +164,6 @@ func (h *Head) Appender(_ context.Context) storage.Appender {
func (h *Head) appender() *headAppender {
minValidTime := h.appendableMinValidTime()
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
-
- // Allocate the exemplars buffer only if exemplars are enabled.
- var exemplarsBuf []exemplarWithSeriesRef
- if h.opts.EnableExemplarStorage {
- exemplarsBuf = h.getExemplarBuffer()
- }
-
return &headAppender{
head: h,
minValidTime: minValidTime,
@@ -178,12 +171,9 @@ func (h *Head) appender() *headAppender {
maxt: math.MinInt64,
headMaxt: h.MaxTime(),
oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
- samples: h.getAppendBuffer(),
- sampleSeries: h.getSeriesBuffer(),
- exemplars: exemplarsBuf,
- histograms: h.getHistogramBuffer(),
- floatHistograms: h.getFloatHistogramBuffer(),
- metadata: h.getMetadataBuffer(),
+ seriesRefs: h.getRefSeriesBuffer(),
+ series: h.getSeriesBuffer(),
+ typesInBatch: h.getTypeMap(),
appendID: appendID,
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
}
@@ -213,16 +203,28 @@ func (h *Head) AppendableMinValidTime() (int64, bool) {
return h.appendableMinValidTime(), true
}
-func (h *Head) getAppendBuffer() []record.RefSample {
- b := h.appendPool.Get()
+func (h *Head) getRefSeriesBuffer() []record.RefSeries {
+ b := h.refSeriesPool.Get()
+ if b == nil {
+ return make([]record.RefSeries, 0, 512)
+ }
+ return b
+}
+
+func (h *Head) putRefSeriesBuffer(b []record.RefSeries) {
+ h.refSeriesPool.Put(b[:0])
+}
+
+func (h *Head) getFloatBuffer() []record.RefSample {
+ b := h.floatsPool.Get()
if b == nil {
return make([]record.RefSample, 0, 512)
}
return b
}
-func (h *Head) putAppendBuffer(b []record.RefSample) {
- h.appendPool.Put(b[:0])
+func (h *Head) putFloatBuffer(b []record.RefSample) {
+ h.floatsPool.Put(b[:0])
}
func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef {
@@ -295,6 +297,19 @@ func (h *Head) putSeriesBuffer(b []*memSeries) {
h.seriesPool.Put(b[:0])
}
+func (h *Head) getTypeMap() map[chunks.HeadSeriesRef]sampleType {
+ b := h.typeMapPool.Get()
+ if b == nil {
+ return make(map[chunks.HeadSeriesRef]sampleType)
+ }
+ return b
+}
+
+func (h *Head) putTypeMap(b map[chunks.HeadSeriesRef]sampleType) {
+ clear(b)
+ h.typeMapPool.Put(b)
+}
+
func (h *Head) getBytesBuffer() []byte {
b := h.bytesPool.Get()
if b == nil {
@@ -312,17 +327,30 @@ type exemplarWithSeriesRef struct {
exemplar exemplar.Exemplar
}
-type headAppender struct {
- head *Head
- minValidTime int64 // No samples below this timestamp are allowed.
- mint, maxt int64
- headMaxt int64 // We track it here to not take the lock for every sample appended.
- oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
+// sampleType describes sample types we need to distinguish for append batching.
+// We need separate types for everything that goes into a different WAL record
+// type or into a different chunk encoding.
+type sampleType byte
- seriesRefs []record.RefSeries // New series records held by this appender.
- series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
- samples []record.RefSample // New float samples held by this appender.
- sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
+const (
+ stNone sampleType = iota // To mark that the sample type does not matter.
+ stFloat // All simple floats (counters, gauges, untyped). Goes to `floats`.
+ stHistogram // Native integer histograms with a standard exponential schema. Goes to `histograms`.
+ stCustomBucketHistogram // Native integer histograms with custom bucket boundaries. Goes to `histograms`.
+ stFloatHistogram // Native float histograms. Goes to `floatHistograms`.
+ stCustomBucketFloatHistogram // Native float histograms with custom bucket boundaries. Goes to `floatHistograms`.
+)
+
+// appendBatch is used to partition all the appended data into batches that are
+// "type clean", i.e. every series receives only samples of one type within the
+// batch. Types in this regard are defined by the sampleType enum above.
+// TODO(beorn7): The same concept could be extended to make sure every series in
+// the batch has at most one metadata record. This is currently not implemented
+// because it is unclear if it is needed at all. (Maybe we will remove metadata
+// records altogether, see issue #15911.)
+type appendBatch struct {
+ floats []record.RefSample // New float samples held by this appender.
+ floatSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms []record.RefHistogramSample // New histogram samples held by this appender.
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender.
@@ -330,6 +358,42 @@ type headAppender struct {
metadata []record.RefMetadata // New metadata held by this appender.
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
+}
+
+// close returns all the slices to the pools in Head and nil's them.
+func (b *appendBatch) close(h *Head) {
+ h.putFloatBuffer(b.floats)
+ b.floats = nil
+ h.putSeriesBuffer(b.floatSeries)
+ b.floatSeries = nil
+ h.putHistogramBuffer(b.histograms)
+ b.histograms = nil
+ h.putSeriesBuffer(b.histogramSeries)
+ b.histogramSeries = nil
+ h.putFloatHistogramBuffer(b.floatHistograms)
+ b.floatHistograms = nil
+ h.putSeriesBuffer(b.floatHistogramSeries)
+ b.floatHistogramSeries = nil
+ h.putMetadataBuffer(b.metadata)
+ b.metadata = nil
+ h.putSeriesBuffer(b.metadataSeries)
+ b.metadataSeries = nil
+ h.putExemplarBuffer(b.exemplars)
+ b.exemplars = nil
+}
+
+type headAppender struct {
+ head *Head
+ minValidTime int64 // No samples below this timestamp are allowed.
+ mint, maxt int64
+ headMaxt int64 // We track it here to not take the lock for every sample appended.
+ oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
+
+ seriesRefs []record.RefSeries // New series records held by this appender.
+ series []*memSeries // New series held by this appender (using corresponding slices indexes from seriesRefs)
+ batches []*appendBatch // Holds all the other data to append. (In regular cases, there should be only one of these.)
+
+ typesInBatch map[chunks.HeadSeriesRef]sampleType // Which (one) sample type each series holds in the most recent batch.
appendID, cleanupAppendIDsBelow uint64
closed bool
@@ -357,21 +421,27 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
}
}
- s.Lock()
if value.IsStaleNaN(v) {
- // TODO(krajorama): reorganize Commit() to handle samples in append order
- // not floats first and then histograms. Then we could do this conversion
- // in commit. This code should move into Commit().
- switch {
- case s.lastHistogramValue != nil:
- s.Unlock()
+ // If we have added a sample before with this same appender, we
+ // can check the previously used type and turn a stale float
+ // sample into a stale histogram sample or stale float histogram
+ // sample as appropriate. This prevents an unnecessary creation
+ // of a new batch. However, since other appenders might append
+ // to the same series concurrently, this is not perfect but just
+ // an optimization for the more likely case.
+ switch a.typesInBatch[s.ref] {
+ case stHistogram, stCustomBucketHistogram:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
- case s.lastFloatHistogramValue != nil:
- s.Unlock()
+ case stFloatHistogram, stCustomBucketFloatHistogram:
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
}
+ // Note that a series reference not yet in the map will come out
+ // as stNone, but since we do not handle that case separately,
+ // we do not need to check for the difference between "unknown
+ // series" and "known series with stNone".
}
+ s.Lock()
defer s.Unlock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
@@ -403,12 +473,13 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
a.maxt = t
}
- a.samples = append(a.samples, record.RefSample{
+ b := a.getCurrentBatch(stFloat, s.ref)
+ b.floats = append(b.floats, record.RefSample{
Ref: s.ref,
T: t,
V: v,
})
- a.sampleSeries = append(a.sampleSeries, s)
+ b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil
}
@@ -448,8 +519,9 @@ func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Lab
if ct > a.maxt {
a.maxt = ct
}
- a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0})
- a.sampleSeries = append(a.sampleSeries, s)
+ b := a.getCurrentBatch(stFloat, s.ref)
+ b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: ct, V: 0.0})
+ b.floatSeries = append(b.floatSeries, s)
return storage.SeriesRef(s.ref), nil
}
@@ -476,6 +548,86 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo
return s, created, nil
}
+// getCurrentBatch returns the current batch if it fits the provided sampleType
+// for the provided series. Otherwise, it adds a new batch and returns it.
+func (a *headAppender) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
+ h := a.head
+
+ newBatch := func() *appendBatch {
+ b := appendBatch{
+ floats: h.getFloatBuffer(),
+ floatSeries: h.getSeriesBuffer(),
+ histograms: h.getHistogramBuffer(),
+ histogramSeries: h.getSeriesBuffer(),
+ floatHistograms: h.getFloatHistogramBuffer(),
+ floatHistogramSeries: h.getSeriesBuffer(),
+ metadata: h.getMetadataBuffer(),
+ metadataSeries: h.getSeriesBuffer(),
+ }
+
+ // Allocate the exemplars buffer only if exemplars are enabled.
+ if h.opts.EnableExemplarStorage {
+ b.exemplars = h.getExemplarBuffer()
+ }
+ clear(a.typesInBatch)
+ switch st {
+ case stHistogram, stFloatHistogram, stCustomBucketHistogram, stCustomBucketFloatHistogram:
+ // We only record histogram sample types in the map.
+ // Floats are implicit.
+ a.typesInBatch[s] = st
+ }
+ a.batches = append(a.batches, &b)
+ return &b
+ }
+
+ // First batch ever. Create it.
+ if len(a.batches) == 0 {
+ return newBatch()
+ }
+
+ // TODO(beorn7): If we ever see that the a.typesInBatch map grows so
+ // large that it matters for total memory consumption, we could limit
+ // the batch size here, i.e. cut a new batch even without a type change.
+ // Something like:
+ // if len(a.typesInBatch > limit) {
+ // return newBatch()
+ // }
+
+ lastBatch := a.batches[len(a.batches)-1]
+ if st == stNone {
+ // Type doesn't matter, last batch will always do.
+ return lastBatch
+ }
+ prevST, ok := a.typesInBatch[s]
+ switch {
+ case prevST == st:
+ // An old series of some histogram type with the same type being appended.
+ // Continue the batch.
+ return lastBatch
+ case !ok && st == stFloat:
+ // A new float series, or an old float series that gets floats appended.
+ // Note that we do not track stFloat in typesInBatch.
+ // Continue the batch.
+ return lastBatch
+ case st == stFloat:
+ // A float being appended to a histogram series.
+ // Start a new batch.
+ return newBatch()
+ case !ok:
+ // A new series of some histogram type, or some histogram type
+ // being appended to on old float series. Even in the latter
+ // case, we don't need to start a new batch because histograms
+ // after floats are fine.
+ // Add new sample type to the map and continue batch.
+ a.typesInBatch[s] = st
+ return lastBatch
+ default:
+ // One histogram type changed to another.
+ // Start a new batch.
+ return newBatch()
+ }
+}
+
// appendable checks whether the given sample is valid for appending to the series.
// If the sample is valid and in-order, it returns false with no error.
// If the sample belongs to the out-of-order chunk, it returns true with no error.
@@ -638,16 +790,13 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
return 0, err
}
- a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e})
+ b := a.getCurrentBatch(stNone, chunks.HeadSeriesRef(ref))
+ b.exemplars = append(b.exemplars, exemplarWithSeriesRef{ref, e})
return storage.SeriesRef(s.ref), nil
}
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
- if !a.head.opts.EnableNativeHistograms.Load() {
- return 0, storage.ErrNativeHistogramsDisabled
- }
-
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
if a.oooTimeWindow == 0 && t < a.minValidTime {
@@ -667,11 +816,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
}
}
- var created bool
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
- s, created, err = a.getOrCreate(lset)
+ s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
@@ -680,14 +828,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
switch {
case h != nil:
s.Lock()
-
- // TODO(krajorama): reorganize Commit() to handle samples in append order
- // not floats first and then histograms. Then we would not need to do this.
- // This whole "if" should be removed.
- if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
- s.lastHistogramValue = &histogram.Histogram{}
- }
-
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@@ -707,22 +847,19 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
}
return 0, err
}
- a.histograms = append(a.histograms, record.RefHistogramSample{
+ st := stHistogram
+ if h.UsesCustomBuckets() {
+ st = stCustomBucketHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
H: h,
})
- a.histogramSeries = append(a.histogramSeries, s)
+ b.histogramSeries = append(b.histogramSeries, s)
case fh != nil:
s.Lock()
-
- // TODO(krajorama): reorganize Commit() to handle samples in append order
- // not floats first and then histograms. Then we would not need to do this.
- // This whole "if" should be removed.
- if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
- s.lastFloatHistogramValue = &histogram.FloatHistogram{}
- }
-
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@@ -742,12 +879,17 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
}
return 0, err
}
- a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
+ st := stFloatHistogram
+ if fh.UsesCustomBuckets() {
+ st = stCustomBucketFloatHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: t,
FH: fh,
})
- a.floatHistogramSeries = append(a.floatHistogramSeries, s)
+ b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
if t < a.mint {
@@ -761,19 +903,14 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
}
func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
- if !a.head.opts.EnableNativeHistograms.Load() {
- return 0, storage.ErrNativeHistogramsDisabled
- }
-
if ct >= t {
return 0, storage.ErrCTNewerThanSample
}
- var created bool
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
var err error
- s, created, err = a.getOrCreate(lset)
+ s, _, err = a.getOrCreate(lset)
if err != nil {
return 0, err
}
@@ -781,16 +918,15 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
switch {
case h != nil:
- zeroHistogram := &histogram.Histogram{}
- s.Lock()
-
- // TODO(krajorama): reorganize Commit() to handle samples in append order
- // not floats first and then histograms. Then we would not need to do this.
- // This whole "if" should be removed.
- if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
- s.lastHistogramValue = zeroHistogram
+ zeroHistogram := &histogram.Histogram{
+ // The CTZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ CustomValues: h.CustomValues,
}
-
+ s.Lock()
// For CTZeroSamples OOO is not allowed.
// We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow)
@@ -812,23 +948,27 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
s.pendingCommit = true
s.Unlock()
- a.histograms = append(a.histograms, record.RefHistogramSample{
+ st := stHistogram
+ if h.UsesCustomBuckets() {
+ st = stCustomBucketHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.histograms = append(b.histograms, record.RefHistogramSample{
Ref: s.ref,
T: ct,
H: zeroHistogram,
})
- a.histogramSeries = append(a.histogramSeries, s)
+ b.histogramSeries = append(b.histogramSeries, s)
case fh != nil:
- zeroFloatHistogram := &histogram.FloatHistogram{}
- s.Lock()
-
- // TODO(krajorama): reorganize Commit() to handle samples in append order
- // not floats first and then histograms. Then we would not need to do this.
- // This whole "if" should be removed.
- if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil {
- s.lastFloatHistogramValue = zeroFloatHistogram
+ zeroFloatHistogram := &histogram.FloatHistogram{
+ // The CTZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: fh.Schema,
+ ZeroThreshold: fh.ZeroThreshold,
+ CustomValues: fh.CustomValues,
}
-
+ s.Lock()
// We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow) // OOO is not allowed for CTZeroSamples.
if err != nil {
@@ -849,12 +989,17 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
s.pendingCommit = true
s.Unlock()
- a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
+ st := stFloatHistogram
+ if fh.UsesCustomBuckets() {
+ st = stCustomBucketFloatHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: ct,
FH: zeroFloatHistogram,
})
- a.floatHistogramSeries = append(a.floatHistogramSeries, s)
+ b.floatHistogramSeries = append(b.floatHistogramSeries, s)
}
if ct > a.maxt {
@@ -883,13 +1028,14 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
s.Unlock()
if hasNewMetadata {
- a.metadata = append(a.metadata, record.RefMetadata{
+ b := a.getCurrentBatch(stNone, s.ref)
+ b.metadata = append(b.metadata, record.RefMetadata{
Ref: s.ref,
Type: record.GetMetricType(meta.Type),
Unit: meta.Unit,
Help: meta.Help,
})
- a.metadataSeries = append(a.metadataSeries, s)
+ b.metadataSeries = append(b.metadataSeries, s)
}
return ref, nil
@@ -926,66 +1072,70 @@ func (a *headAppender) log() error {
return fmt.Errorf("log series: %w", err)
}
}
- if len(a.metadata) > 0 {
- rec = enc.Metadata(a.metadata, buf)
- buf = rec[:0]
+ for _, b := range a.batches {
+ if len(b.metadata) > 0 {
+ rec = enc.Metadata(b.metadata, buf)
+ buf = rec[:0]
- if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log metadata: %w", err)
- }
- }
- if len(a.samples) > 0 {
- rec = enc.Samples(a.samples, buf)
- buf = rec[:0]
-
- if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log samples: %w", err)
- }
- }
- if len(a.histograms) > 0 {
- var customBucketsHistograms []record.RefHistogramSample
- rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf)
- buf = rec[:0]
- if len(rec) > 0 {
if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log histograms: %w", err)
+ return fmt.Errorf("log metadata: %w", err)
}
}
+ // It's important to do (float) Samples before histogram samples
+ // to end up with the correct order.
+ if len(b.floats) > 0 {
+ rec = enc.Samples(b.floats, buf)
+ buf = rec[:0]
- if len(customBucketsHistograms) > 0 {
- rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf)
if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log custom buckets histograms: %w", err)
+ return fmt.Errorf("log samples: %w", err)
}
}
- }
- if len(a.floatHistograms) > 0 {
- var customBucketsFloatHistograms []record.RefFloatHistogramSample
- rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf)
- buf = rec[:0]
- if len(rec) > 0 {
- if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log float histograms: %w", err)
+ if len(b.histograms) > 0 {
+ var customBucketsHistograms []record.RefHistogramSample
+ rec, customBucketsHistograms = enc.HistogramSamples(b.histograms, buf)
+ buf = rec[:0]
+ if len(rec) > 0 {
+ if err := a.head.wal.Log(rec); err != nil {
+ return fmt.Errorf("log histograms: %w", err)
+ }
+ }
+
+ if len(customBucketsHistograms) > 0 {
+ rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf)
+ if err := a.head.wal.Log(rec); err != nil {
+ return fmt.Errorf("log custom buckets histograms: %w", err)
+ }
}
}
+ if len(b.floatHistograms) > 0 {
+ var customBucketsFloatHistograms []record.RefFloatHistogramSample
+ rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(b.floatHistograms, buf)
+ buf = rec[:0]
+ if len(rec) > 0 {
+ if err := a.head.wal.Log(rec); err != nil {
+ return fmt.Errorf("log float histograms: %w", err)
+ }
+ }
- if len(customBucketsFloatHistograms) > 0 {
- rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf)
- if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log custom buckets float histograms: %w", err)
+ if len(customBucketsFloatHistograms) > 0 {
+ rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf)
+ if err := a.head.wal.Log(rec); err != nil {
+ return fmt.Errorf("log custom buckets float histograms: %w", err)
+ }
}
}
- }
- // Exemplars should be logged after samples (float/native histogram/etc),
- // otherwise it might happen that we send the exemplars in a remote write
- // batch before the samples, which in turn means the exemplar is rejected
- // for missing series, since series are created due to samples.
- if len(a.exemplars) > 0 {
- rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf)
- buf = rec[:0]
+ // Exemplars should be logged after samples (float/native histogram/etc),
+ // otherwise it might happen that we send the exemplars in a remote write
+ // batch before the samples, which in turn means the exemplar is rejected
+ // for missing series, since series are created due to samples.
+ if len(b.exemplars) > 0 {
+ rec = enc.Exemplars(exemplarsForEncoding(b.exemplars), buf)
+ buf = rec[:0]
- if err := a.head.wal.Log(rec); err != nil {
- return fmt.Errorf("log exemplars: %w", err)
+ if err := a.head.wal.Log(rec); err != nil {
+ return fmt.Errorf("log exemplars: %w", err)
+ }
}
}
return nil
@@ -1034,10 +1184,10 @@ type appenderCommitContext struct {
enc record.Encoder
}
-// commitExemplars adds all exemplars from headAppender to the head's exemplar storage.
-func (a *headAppender) commitExemplars() {
+// commitExemplars adds all exemplars from the provided batch to the head's exemplar storage.
+func (a *headAppender) commitExemplars(b *appendBatch) {
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
- for _, e := range a.exemplars {
+ for _, e := range b.exemplars {
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
if s == nil {
// This is very unlikely to happen, but we have seen it in the wild.
@@ -1141,9 +1291,9 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
}
}
-// commitSamples processes and commits the samples in the headAppender to the series.
-// It handles both in-order and out-of-order samples, updating the appenderCommitContext
-// with the results of the append operations.
+// commitFloats processes and commits the samples in the provided batch to the
+// series. It handles both in-order and out-of-order samples, updating the
+// appenderCommitContext with the results of the append operations.
//
// The function iterates over the samples in the headAppender and attempts to append each sample
// to its corresponding series. It handles various error cases such as out-of-order samples,
@@ -1160,14 +1310,74 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
// operations on the series after appending the samples.
//
// There are also specific functions to commit histograms and float histograms.
-func (a *headAppender) commitSamples(acc *appenderCommitContext) {
+func (a *headAppender) commitFloats(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
- for i, s := range a.samples {
- series = a.sampleSeries[i]
+ for i, s := range b.floats {
+ series = b.floatSeries[i]
series.Lock()
+ if value.IsStaleNaN(s.V) {
+ // If a float staleness marker had been appended for a
+ // series that got a histogram or float histogram
+ // appended before via this same appender, it would not
+ // show up here because we had already converted it. We
+ // end up here for two reasons: (1) This is the very
+ // first sample for this series appended via this
+ // appender. (2) A float sample was appended to this
+ // series before via this same appender.
+ //
+ // In either case, we need to check the previous sample
+ // in the memSeries to append the appropriately typed
+ // staleness marker. This is obviously so in case (1).
+ // In case (2), we would usually expect a float sample
+ // as the previous sample, but there might be concurrent
+ // appends that have added a histogram sample in the
+ // meantime. (This will probably lead to OOO shenanigans
+ // anyway, but that's a different story.)
+ //
+ // If the last sample in the memSeries is indeed a
+ // float, we don't have to do anything special here and
+ // just go on with the normal commit for a float sample.
+ // However, if the last sample in the memSeries is a
+ // histogram or float histogram, we have to convert the
+ // staleness marker to a histogram (or float histogram,
+ // respectively), and just add it at the end of the
+ // histograms (or float histograms) in the same batch,
+ // to be committed later in commitHistograms (or
+ // commitFloatHistograms). The latter is fine because we
+ // know there is no other histogram (or float histogram)
+ // sample for this same series in this same batch
+ // (because any such sample would have triggered a new
+ // batch).
+ switch {
+ case series.lastHistogramValue != nil:
+ b.histograms = append(b.histograms, record.RefHistogramSample{
+ Ref: series.ref,
+ T: s.T,
+ H: &histogram.Histogram{Sum: s.V},
+ })
+ b.histogramSeries = append(b.histogramSeries, series)
+ // This sample was counted as a float but is now a histogram.
+ acc.floatsAppended--
+ acc.histogramsAppended++
+ series.Unlock()
+ continue
+ case series.lastFloatHistogramValue != nil:
+ b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{
+ Ref: series.ref,
+ T: s.T,
+ FH: &histogram.FloatHistogram{Sum: s.V},
+ })
+ b.floatHistogramSeries = append(b.floatHistogramSeries, series)
+ // This sample was counted as a float but is now a float histogram.
+ acc.floatsAppended--
+ acc.histogramsAppended++
+ series.Unlock()
+ continue
+ }
+ }
oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil {
handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected)
@@ -1255,15 +1465,24 @@ func (a *headAppender) commitSamples(acc *appenderCommitContext) {
}
}
-// For details on the commitHistograms function, see the commitSamples docs.
-func (a *headAppender) commitHistograms(acc *appenderCommitContext) {
+// For details on the commitHistograms function, see the commitFloats docs.
+func (a *headAppender) commitHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
- for i, s := range a.histograms {
- series = a.histogramSeries[i]
+ for i, s := range b.histograms {
+ series = b.histogramSeries[i]
series.Lock()
+ // At this point, we could encounter a histogram staleness
+ // marker that should better be a float staleness marker or a
+ // float histogram staleness marker. This can only happen with
+ // concurrent appenders appending to the same series _and_ doing
+ // so in a mixed-type scenario. This case is expected to be very
+ // rare, so we do not bother here to convert the staleness
+ // marker. The worst case is that we need to cut a new chunk
+ // just for the staleness marker.
+
oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil {
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
@@ -1355,15 +1574,24 @@ func (a *headAppender) commitHistograms(acc *appenderCommitContext) {
}
}
-// For details on the commitFloatHistograms function, see the commitSamples docs.
-func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) {
+// For details on the commitFloatHistograms function, see the commitFloats docs.
+func (a *headAppender) commitFloatHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
- for i, s := range a.floatHistograms {
- series = a.floatHistogramSeries[i]
+ for i, s := range b.floatHistograms {
+ series = b.floatHistogramSeries[i]
series.Lock()
+ // At this point, we could encounter a float histogram staleness
+ // marker that should better be a float staleness marker or an
+ // integer histogram staleness marker. This can only happen with
+ // concurrent appenders appending to the same series _and_ doing
+ // so in a mixed-type scenario. This case is expected to be very
+ // rare, so we do not bother here to convert the staleness
+ // marker. The worst case is that we need to cut a new chunk
+ // just for the staleness marker.
+
oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow)
if err != nil {
handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected)
@@ -1455,14 +1683,14 @@ func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) {
}
}
-// commitMetadata commits the metadata for each series in the headAppender.
+// commitMetadata commits the metadata for each series in the provided batch.
// It iterates over the metadata slice and updates the corresponding series
// with the new metadata information. The series is locked during the update
// to ensure thread safety.
-func (a *headAppender) commitMetadata() {
+func commitMetadata(b *appendBatch) {
var series *memSeries
- for i, m := range a.metadata {
- series = a.metadataSeries[i]
+ for i, m := range b.metadata {
+ series = b.metadataSeries[i]
series.Lock()
series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help}
series.Unlock()
@@ -1483,75 +1711,88 @@ func (a *headAppender) Commit() (err error) {
if a.closed {
return ErrAppenderClosed
}
- defer func() { a.closed = true }()
+
+ h := a.head
+
+ defer func() {
+ if a.closed {
+ // Don't double-close in case Rollback() was called.
+ return
+ }
+ h.putRefSeriesBuffer(a.seriesRefs)
+ h.putSeriesBuffer(a.series)
+ h.putTypeMap(a.typesInBatch)
+ a.closed = true
+ }()
if err := a.log(); err != nil {
_ = a.Rollback() // Most likely the same error will happen again.
return fmt.Errorf("write to WAL: %w", err)
}
- if a.head.writeNotified != nil {
- a.head.writeNotified.Notify()
+ if h.writeNotified != nil {
+ h.writeNotified.Notify()
}
- a.commitExemplars()
-
- defer a.head.metrics.activeAppenders.Dec()
- defer a.head.putAppendBuffer(a.samples)
- defer a.head.putSeriesBuffer(a.sampleSeries)
- defer a.head.putExemplarBuffer(a.exemplars)
- defer a.head.putHistogramBuffer(a.histograms)
- defer a.head.putFloatHistogramBuffer(a.floatHistograms)
- defer a.head.putMetadataBuffer(a.metadata)
- defer a.head.iso.closeAppend(a.appendID)
-
acc := &appenderCommitContext{
- floatsAppended: len(a.samples),
- histogramsAppended: len(a.histograms) + len(a.floatHistograms),
- inOrderMint: math.MaxInt64,
- inOrderMaxt: math.MinInt64,
- oooMinT: math.MaxInt64,
- oooMaxT: math.MinInt64,
- oooCapMax: a.head.opts.OutOfOrderCapMax.Load(),
+ inOrderMint: math.MaxInt64,
+ inOrderMaxt: math.MinInt64,
+ oooMinT: math.MaxInt64,
+ oooMaxT: math.MinInt64,
+ oooCapMax: h.opts.OutOfOrderCapMax.Load(),
appendChunkOpts: chunkOpts{
- chunkDiskMapper: a.head.chunkDiskMapper,
- chunkRange: a.head.chunkRange.Load(),
- samplesPerChunk: a.head.opts.SamplesPerChunk,
+ chunkDiskMapper: h.chunkDiskMapper,
+ chunkRange: h.chunkRange.Load(),
+ samplesPerChunk: h.opts.SamplesPerChunk,
},
}
+ for _, b := range a.batches {
+ acc.floatsAppended += len(b.floats)
+ acc.histogramsAppended += len(b.histograms) + len(b.floatHistograms)
+ a.commitExemplars(b)
+ defer b.close(h)
+ }
+ defer h.metrics.activeAppenders.Dec()
+ defer h.iso.closeAppend(a.appendID)
+
defer func() {
for i := range acc.oooRecords {
- a.head.putBytesBuffer(acc.oooRecords[i][:0])
+ h.putBytesBuffer(acc.oooRecords[i][:0])
}
}()
- a.commitSamples(acc)
- a.commitHistograms(acc)
- a.commitFloatHistograms(acc)
- a.commitMetadata()
+ for _, b := range a.batches {
+ // Do not change the order of these calls. We depend on it for
+ // correct commit order of samples and for the staleness marker
+ // handling.
+ a.commitFloats(b, acc)
+ a.commitHistograms(b, acc)
+ a.commitFloatHistograms(b, acc)
+ commitMetadata(b)
+ }
// Unmark all series as pending commit after all samples have been committed.
a.unmarkCreatedSeriesAsPendingCommit()
- a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
- a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
- a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected))
- a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected))
- a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended))
- a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended))
- a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted))
- a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted))
- a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt)
- a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT)
+ h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected))
+ h.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected))
+ h.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected))
+ h.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected))
+ h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended))
+ h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended))
+ h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted))
+ h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted))
+ h.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt)
+ h.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT)
acc.collectOOORecords(a)
- if a.head.wbl != nil {
- if err := a.head.wbl.Log(acc.oooRecords...); err != nil {
+ if h.wbl != nil {
+ if err := h.wbl.Log(acc.oooRecords...); err != nil {
// TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging
// until we have found what samples become OOO. We can try having a metric for this failure.
// Returning the error here is not correct because we have already put the samples into the memory,
// hence the append/insert was a success.
- a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err)
+ h.logger.Error("Failed to log out of order samples into the WAL", "err", err)
}
}
return nil
@@ -1966,7 +2207,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count int) {
if s.headChunks == nil || s.headChunks.prev == nil {
// There is none or only one head chunk, so nothing to m-map here.
- return
+ return count
}
// Write chunks starting from the oldest one and stop before we get to current s.headChunks.
@@ -2001,37 +2242,43 @@ func (a *headAppender) Rollback() (err error) {
if a.closed {
return ErrAppenderClosed
}
- defer func() { a.closed = true }()
- defer a.head.metrics.activeAppenders.Dec()
- defer a.head.iso.closeAppend(a.appendID)
- defer a.head.putSeriesBuffer(a.sampleSeries)
- defer a.unmarkCreatedSeriesAsPendingCommit()
+ h := a.head
+ defer func() {
+ a.unmarkCreatedSeriesAsPendingCommit()
+ h.iso.closeAppend(a.appendID)
+ h.metrics.activeAppenders.Dec()
+ a.closed = true
+ h.putRefSeriesBuffer(a.seriesRefs)
+ h.putSeriesBuffer(a.series)
+ h.putTypeMap(a.typesInBatch)
+ }()
var series *memSeries
- for i := range a.samples {
- series = a.sampleSeries[i]
- series.Lock()
- series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
- series.pendingCommit = false
- series.Unlock()
- }
- for i := range a.histograms {
- series = a.histogramSeries[i]
- series.Lock()
- series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
- series.pendingCommit = false
- series.Unlock()
- }
- a.head.putAppendBuffer(a.samples)
- a.head.putExemplarBuffer(a.exemplars)
- a.head.putHistogramBuffer(a.histograms)
- a.head.putFloatHistogramBuffer(a.floatHistograms)
- a.head.putMetadataBuffer(a.metadata)
- a.samples = nil
- a.exemplars = nil
- a.histograms = nil
- a.metadata = nil
-
+ for _, b := range a.batches {
+ for i := range b.floats {
+ series = b.floatSeries[i]
+ series.Lock()
+ series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
+ series.pendingCommit = false
+ series.Unlock()
+ }
+ for i := range b.histograms {
+ series = b.histogramSeries[i]
+ series.Lock()
+ series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
+ series.pendingCommit = false
+ series.Unlock()
+ }
+ for i := range b.floatHistograms {
+ series = b.floatHistogramSeries[i]
+ series.Lock()
+ series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
+ series.pendingCommit = false
+ series.Unlock()
+ }
+ b.close(h)
+ }
+ a.batches = a.batches[:0]
// Series are created in the head memory regardless of rollback. Thus we have
// to log them to the WAL in any case.
return a.log()
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go
index 45bb2285f00..7e1eea8b055 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go
@@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels {
}
// RebuildSymbolTable is a no-op when not using dedupelabels.
-func (h *Head) RebuildSymbolTable(_ *slog.Logger) *labels.SymbolTable {
+func (*Head) RebuildSymbolTable(*slog.Logger) *labels.SymbolTable {
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go
index b653b5dc14c..8485d654357 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go
@@ -49,7 +49,7 @@ type headIndexReader struct {
mint, maxt int64
}
-func (h *headIndexReader) Close() error {
+func (*headIndexReader) Close() error {
return nil
}
@@ -560,7 +560,7 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *
// Iterate over the appendIDs, find the first one that the isolation state says not
// to return.
it := s.txs.iterator()
- for index := 0; index < appendIDsToConsider; index++ {
+ for index := range appendIDsToConsider {
appendID := it.At()
if appendID <= isoState.maxAppendID { // Easy check first.
if _, ok := isoState.incompleteAppends[appendID]; !ok {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
index 3e0dadb5268..3c9aa7980e6 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
@@ -76,7 +76,7 @@ func counterAddNonZero(v *prometheus.CounterVec, value float64, lvs ...string) {
}
}
-func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk, lastSegment int) (err error) {
+func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
// Track number of missing series records that were referenced by other records.
unknownSeriesRefs := &seriesRefSet{refs: make(map[chunks.HeadSeriesRef]struct{}), mtx: sync.Mutex{}}
// Track number of different records that referenced a series we don't know about
@@ -99,7 +99,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
- decoded = make(chan interface{}, 10)
+ decoded = make(chan any, 10)
decodeErr, seriesCreationErr error
)
@@ -107,7 +107,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// For CorruptionErr ensure to terminate all workers before exiting.
_, ok := err.(*wlog.CorruptionErr)
if ok || seriesCreationErr != nil {
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
@@ -116,7 +116,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
}()
wg.Add(concurrency)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].setup()
go func(wp *walSubsetProcessor) {
@@ -155,7 +155,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
go func() {
defer close(decoded)
var err error
- dec := record.NewDecoder(syms)
+ dec := record.NewDecoder(syms, h.logger)
for r.Next() {
switch dec.Type(r.Record()) {
case record.Series:
@@ -255,7 +255,7 @@ Outer:
switch v := d.(type) {
case []record.RefSeries:
for _, walSeries := range v {
- mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false)
+ mSeries, created, err := h.getOrCreateWithOptionalID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels, false)
if err != nil {
seriesCreationErr = err
break Outer
@@ -266,8 +266,6 @@ Outer:
}
if !created {
multiRef[walSeries.Ref] = mSeries.ref
- // Set the WAL expiry for the duplicate series, so it is kept in subsequent WAL checkpoints.
- h.setWALExpiry(walSeries.Ref, lastSegment)
}
idx := uint64(mSeries.ref) % uint64(concurrency)
@@ -283,7 +281,7 @@ Outer:
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
@@ -293,12 +291,14 @@ Outer:
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
+ // This is a sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
+ h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
@@ -314,6 +314,8 @@ Outer:
continue
}
if r, ok := multiRef[chunks.HeadSeriesRef(s.Ref)]; ok {
+ // This is a tombstone for a duplicate series, so we need to keep the series record at least until this record's timestamp.
+ h.updateWALExpiry(chunks.HeadSeriesRef(s.Ref), itv.Maxt)
s.Ref = storage.SeriesRef(r)
}
if m := h.series.getByID(chunks.HeadSeriesRef(s.Ref)); m == nil {
@@ -331,6 +333,8 @@ Outer:
continue
}
if r, ok := multiRef[e.Ref]; ok {
+ // This is an exemplar for a duplicate series, so we need to keep the series record at least until this record's timestamp.
+ h.updateWALExpiry(e.Ref, e.T)
e.Ref = r
}
exemplarsInput <- e
@@ -345,7 +349,7 @@ Outer:
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@@ -355,12 +359,14 @@ Outer:
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
+ // This is a histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
+ h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@@ -378,7 +384,7 @@ Outer:
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@@ -388,12 +394,14 @@ Outer:
continue // Before minValidTime: discard.
}
if r, ok := multiRef[sam.Ref]; ok {
+ // This is a float histogram sample for a duplicate series, so we need to keep the series record at least until this record's timestamp.
+ h.updateWALExpiry(sam.Ref, sam.T)
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@@ -437,7 +445,7 @@ Outer:
}
// Signal termination to each worker and wait for it to close its output channel.
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].closeAndDrain()
}
close(exemplarsInput)
@@ -540,7 +548,7 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
mSeries.nextAt = 0
mSeries.headChunks = nil
mSeries.app = nil
- return
+ return overlapped
}
type walSubsetProcessor struct {
@@ -727,7 +735,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
- decodedCh = make(chan interface{}, 10)
+ decodedCh = make(chan any, 10)
decodeErr error
)
@@ -737,7 +745,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
_, ok := err.(*wlog.CorruptionErr)
if ok {
err = &errLoadWbl{err: err}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
@@ -745,7 +753,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
}()
wg.Add(concurrency)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].setup()
go func(wp *wblSubsetProcessor) {
@@ -759,7 +767,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
go func() {
defer close(decodedCh)
- dec := record.NewDecoder(syms)
+ dec := record.NewDecoder(syms, h.logger)
for r.Next() {
var err error
rec := r.Record()
@@ -830,7 +838,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
@@ -842,7 +850,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(shards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
@@ -883,7 +891,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@@ -895,7 +903,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@@ -912,7 +920,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// of unused memory.
for len(samples) > 0 {
m := min(len(samples), 5000)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@@ -924,7 +932,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@@ -944,7 +952,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
}
// Signal termination to each worker and wait for it to close its output channel.
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
processors[i].closeAndDrain()
}
wg.Wait()
@@ -1155,7 +1163,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
switch enc {
case chunkenc.EncXOR:
// Backwards compatibility for old sampleBuf which had last 4 samples.
- for i := 0; i < 3; i++ {
+ for range 3 {
buf.PutBE64int64(0)
buf.PutBEFloat64(0)
}
@@ -1186,7 +1194,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
_ = dec.Be64int64() // Was chunkRange but now unused.
if dec.Uvarint() == 0 {
- return
+ return csr, err
}
csr.mc = &memChunk{}
@@ -1208,7 +1216,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
switch enc {
case chunkenc.EncXOR:
// Backwards-compatibility for old sampleBuf which had last 4 samples.
- for i := 0; i < 3; i++ {
+ for range 3 {
_ = dec.Be64int64()
_ = dec.Be64Float64()
}
@@ -1227,7 +1235,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
err = fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
- return
+ return csr, err
}
func encodeTombstonesToSnapshotRecord(tr tombstones.Reader) ([]byte, error) {
@@ -1321,7 +1329,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
)
// Add all series to the snapshot.
stripeSize := h.series.size
- for i := 0; i < stripeSize; i++ {
+ for i := range stripeSize {
h.series.locks[i].RLock()
for _, s := range h.series.series[i] {
@@ -1435,12 +1443,10 @@ func (h *Head) performChunkSnapshot() error {
startTime := time.Now()
stats, err := h.ChunkSnapshot()
elapsed := time.Since(startTime)
- if err == nil {
- h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir)
- }
if err != nil {
return fmt.Errorf("chunk snapshot: %w", err)
}
+ h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir)
return nil
}
@@ -1459,7 +1465,7 @@ func LastChunkSnapshot(dir string) (string, int, int, error) {
}
maxIdx, maxOffset := -1, -1
maxFileName := ""
- for i := 0; i < len(files); i++ {
+ for i := range files {
fi := files[i]
if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
@@ -1566,11 +1572,11 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
refSeries map[chunks.HeadSeriesRef]*memSeries
exemplarBuf []record.RefExemplar
syms = labels.NewSymbolTable() // New table for the whole snapshot.
- dec = record.NewDecoder(syms)
+ dec = record.NewDecoder(syms, h.logger)
)
wg.Add(concurrency)
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
go func(idx int, rc <-chan chunkSnapshotRecord) {
defer wg.Done()
defer func() {
@@ -1584,7 +1590,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
localRefSeries := shardedRefSeries[idx]
for csr := range rc {
- series, _, err := h.getOrCreateWithID(csr.ref, csr.lset.Hash(), csr.lset, false)
+ series, _, err := h.getOrCreateWithOptionalID(csr.ref, csr.lset.Hash(), csr.lset, false)
if err != nil {
errChan <- err
return
@@ -1670,9 +1676,7 @@ Outer:
refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries)
for _, shard := range shardedRefSeries {
- for k, v := range shard {
- refSeries[k] = v
- }
+ maps.Copy(refSeries, shard)
}
}
@@ -1739,9 +1743,7 @@ Outer:
// We had no exemplar record, so we have to build the map here.
refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries)
for _, shard := range shardedRefSeries {
- for k, v := range shard {
- refSeries[k] = v
- }
+ maps.Copy(refSeries, shard)
}
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go
index edcb92a719a..28eacd7c007 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go
@@ -15,7 +15,6 @@ package index
import (
"bufio"
- "bytes"
"context"
"encoding/binary"
"fmt"
@@ -142,8 +141,7 @@ type Writer struct {
lastSymbol string
symbolCache map[string]uint32 // From symbol to index in table.
- labelIndexes []labelIndexHashEntry // Label index offsets.
- labelNames map[string]uint64 // Label names, and their usage.
+ labelNames map[string]uint64 // Label names, and their usage.
// Hold last series to validate that clients insert new series in order.
lastSeries labels.Labels
@@ -393,9 +391,6 @@ func (w *Writer) ensureStage(s indexWriterStage) error {
if err := w.writePostingsToTmpFiles(); err != nil {
return err
}
- if err := w.writeLabelIndices(); err != nil {
- return err
- }
w.toc.Postings = w.f.pos
if err := w.writePostings(); err != nil {
@@ -403,9 +398,6 @@ func (w *Writer) ensureStage(s indexWriterStage) error {
}
w.toc.LabelIndicesTable = w.f.pos
- if err := w.writeLabelIndexesOffsetTable(); err != nil {
- return err
- }
w.toc.PostingsTable = w.f.pos
if err := w.writePostingsOffsetTable(); err != nil {
@@ -592,147 +584,6 @@ func (w *Writer) finishSymbols() error {
return nil
}
-func (w *Writer) writeLabelIndices() error {
- if err := w.fPO.Flush(); err != nil {
- return err
- }
-
- // Find all the label values in the tmp posting offset table.
- f, err := fileutil.OpenMmapFile(w.fPO.name)
- if err != nil {
- return err
- }
- defer f.Close()
-
- d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
- cnt := w.cntPO
- current := []byte{}
- values := []uint32{}
- for d.Err() == nil && cnt > 0 {
- cnt--
- d.Uvarint() // Keycount.
- name := d.UvarintBytes() // Label name.
- value := d.UvarintBytes() // Label value.
- d.Uvarint64() // Offset.
- if len(name) == 0 {
- continue // All index is ignored.
- }
-
- if !bytes.Equal(name, current) && len(values) > 0 {
- // We've reached a new label name.
- if err := w.writeLabelIndex(string(current), values); err != nil {
- return err
- }
- values = values[:0]
- }
- current = name
- sid, ok := w.symbolCache[string(value)]
- if !ok {
- return fmt.Errorf("symbol entry for %q does not exist", string(value))
- }
- values = append(values, sid)
- }
- if d.Err() != nil {
- return d.Err()
- }
-
- // Handle the last label.
- if len(values) > 0 {
- if err := w.writeLabelIndex(string(current), values); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *Writer) writeLabelIndex(name string, values []uint32) error {
- // Align beginning to 4 bytes for more efficient index list scans.
- if err := w.addPadding(4); err != nil {
- return err
- }
-
- w.labelIndexes = append(w.labelIndexes, labelIndexHashEntry{
- keys: []string{name},
- offset: w.f.pos,
- })
-
- startPos := w.f.pos
- // Leave 4 bytes of space for the length, which will be calculated later.
- if err := w.write([]byte("alen")); err != nil {
- return err
- }
- w.crc32.Reset()
-
- w.buf1.Reset()
- w.buf1.PutBE32int(1) // Number of names.
- w.buf1.PutBE32int(len(values))
- w.buf1.WriteToHash(w.crc32)
- if err := w.write(w.buf1.Get()); err != nil {
- return err
- }
-
- for _, v := range values {
- w.buf1.Reset()
- w.buf1.PutBE32(v)
- w.buf1.WriteToHash(w.crc32)
- if err := w.write(w.buf1.Get()); err != nil {
- return err
- }
- }
-
- // Write out the length.
- w.buf1.Reset()
- l := w.f.pos - startPos - 4
- if l > math.MaxUint32 {
- return fmt.Errorf("label index size exceeds 4 bytes: %d", l)
- }
- w.buf1.PutBE32int(int(l))
- if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
- return err
- }
-
- w.buf1.Reset()
- w.buf1.PutHashSum(w.crc32)
- return w.write(w.buf1.Get())
-}
-
-// writeLabelIndexesOffsetTable writes the label indices offset table.
-func (w *Writer) writeLabelIndexesOffsetTable() error {
- startPos := w.f.pos
- // Leave 4 bytes of space for the length, which will be calculated later.
- if err := w.write([]byte("alen")); err != nil {
- return err
- }
- w.crc32.Reset()
-
- w.buf1.Reset()
- w.buf1.PutBE32int(len(w.labelIndexes))
- w.buf1.WriteToHash(w.crc32)
- if err := w.write(w.buf1.Get()); err != nil {
- return err
- }
-
- for _, e := range w.labelIndexes {
- w.buf1.Reset()
- w.buf1.PutUvarint(len(e.keys))
- for _, k := range e.keys {
- w.buf1.PutUvarintStr(k)
- }
- w.buf1.PutUvarint64(e.offset)
- w.buf1.WriteToHash(w.crc32)
- if err := w.write(w.buf1.Get()); err != nil {
- return err
- }
- }
-
- // Write out the length.
- err := w.writeLengthAndHash(startPos)
- if err != nil {
- return fmt.Errorf("label indexes offset table length/crc32 write error: %w", err)
- }
- return nil
-}
-
// writePostingsOffsetTable writes the postings offset table.
func (w *Writer) writePostingsOffsetTable() error {
// Ensure everything is in the temporary file.
@@ -919,7 +770,7 @@ func (w *Writer) writePostingsToTmpFiles() error {
// See if label names we want are in the series.
numLabels := d.Uvarint()
- for i := 0; i < numLabels; i++ {
+ for range numLabels {
lno := uint32(d.Uvarint())
lvo := uint32(d.Uvarint())
@@ -1049,11 +900,6 @@ func (w *Writer) writePostings() error {
return nil
}
-type labelIndexHashEntry struct {
- keys []string
- offset uint64
-}
-
func (w *Writer) Close() error {
// Even if this fails, we need to close all the files.
ensureErr := w.ensureStage(idxStageDone)
@@ -1845,7 +1691,7 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma
// SortedPostings returns the given postings list reordered so that the backing series
// are sorted.
-func (r *Reader) SortedPostings(p Postings) Postings {
+func (*Reader) SortedPostings(p Postings) Postings {
return p
}
@@ -1920,7 +1766,7 @@ func (s *stringListIter) Next() bool {
return true
}
func (s stringListIter) At() string { return s.cur }
-func (s stringListIter) Err() error { return nil }
+func (stringListIter) Err() error { return nil }
// Decoder provides decoding methods for the v1 and v2 index file format.
//
@@ -1946,12 +1792,12 @@ func DecodePostingsRaw(d encoding.Decbuf) (int, Postings, error) {
// LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series.
// They are returned in the same order they're stored, which should be sorted lexicographically.
-func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) {
+func (*Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) {
d := encoding.Decbuf{B: b}
k := d.Uvarint()
offsets := make([]uint32, k)
- for i := 0; i < k; i++ {
+ for i := range k {
offsets[i] = uint32(d.Uvarint())
_ = d.Uvarint() // skip the label value
@@ -1968,7 +1814,7 @@ func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) (
d := encoding.Decbuf{B: b}
k := d.Uvarint()
- for i := 0; i < k; i++ {
+ for range k {
lno := uint32(d.Uvarint())
lvo := uint32(d.Uvarint())
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
index 75e3c2c1487..d5a17c3daac 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
@@ -47,7 +47,7 @@ const ensureOrderBatchSize = 1024
// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder().
var ensureOrderBatchPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
x := make([][]storage.SeriesRef, 0, ensureOrderBatchSize)
return &x // Return pointer type as preferred by Pool.
},
@@ -564,10 +564,10 @@ type errPostings struct {
err error
}
-func (e errPostings) Next() bool { return false }
-func (e errPostings) Seek(storage.SeriesRef) bool { return false }
-func (e errPostings) At() storage.SeriesRef { return 0 }
-func (e errPostings) Err() error { return e.err }
+func (errPostings) Next() bool { return false }
+func (errPostings) Seek(storage.SeriesRef) bool { return false }
+func (errPostings) At() storage.SeriesRef { return 0 }
+func (e errPostings) Err() error { return e.err }
var emptyPostings = errPostings{}
@@ -607,53 +607,54 @@ func Intersect(its ...Postings) Postings {
}
type intersectPostings struct {
- arr []Postings
- cur storage.SeriesRef
+ postings []Postings // These are the postings we will be intersecting.
+ current storage.SeriesRef // The current intersection, if Seek() or Next() has returned true.
}
func newIntersectPostings(its ...Postings) *intersectPostings {
- return &intersectPostings{arr: its}
+ return &intersectPostings{postings: its}
}
func (it *intersectPostings) At() storage.SeriesRef {
- return it.cur
+ return it.current
}
-func (it *intersectPostings) doNext() bool {
-Loop:
+func (it *intersectPostings) Seek(target storage.SeriesRef) bool {
for {
- for _, p := range it.arr {
- if !p.Seek(it.cur) {
+ allEqual := true
+ for _, p := range it.postings {
+ if !p.Seek(target) {
return false
}
- if p.At() > it.cur {
- it.cur = p.At()
- continue Loop
+ if p.At() > target {
+ target = p.At()
+ allEqual = false
}
}
- return true
+
+ // if all p.At() are all equal, we found an intersection.
+ if allEqual {
+ it.current = target
+ return true
+ }
}
}
func (it *intersectPostings) Next() bool {
- for _, p := range it.arr {
+ target := it.current
+ for _, p := range it.postings {
if !p.Next() {
return false
}
- if p.At() > it.cur {
- it.cur = p.At()
+ if p.At() > target {
+ target = p.At()
}
}
- return it.doNext()
-}
-
-func (it *intersectPostings) Seek(id storage.SeriesRef) bool {
- it.cur = id
- return it.doNext()
+ return it.Seek(target)
}
func (it *intersectPostings) Err() error {
- for _, p := range it.arr {
+ for _, p := range it.postings {
if p.Err() != nil {
return p.Err()
}
@@ -861,7 +862,7 @@ func (it *ListPostings) Seek(x storage.SeriesRef) bool {
return false
}
-func (it *ListPostings) Err() error {
+func (*ListPostings) Err() error {
return nil
}
@@ -914,7 +915,7 @@ func (it *bigEndianPostings) Seek(x storage.SeriesRef) bool {
return false
}
-func (it *bigEndianPostings) Err() error {
+func (*bigEndianPostings) Err() error {
return nil
}
@@ -1022,14 +1023,14 @@ func (h postingsWithIndexHeap) Less(i, j int) bool {
func (h *postingsWithIndexHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
// Push implements heap.Interface.
-func (h *postingsWithIndexHeap) Push(x interface{}) {
+func (h *postingsWithIndexHeap) Push(x any) {
*h = append(*h, x.(postingsWithIndex))
}
// Pop implements heap.Interface and pops the last element, which is NOT the min element,
// so this doesn't return the same heap.Pop()
// Although this method is implemented for correctness, we don't expect it to be used, see popIndex() method for details.
-func (h *postingsWithIndexHeap) Pop() interface{} {
+func (h *postingsWithIndexHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go
index 1035991e749..95d3cfa5ebb 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go
@@ -87,7 +87,7 @@ func newIsolation(disabled bool) *isolation {
appendsOpenList: appender,
readsOpen: isoState,
disabled: disabled,
- appendersPool: sync.Pool{New: func() interface{} { return &isolationAppender{} }},
+ appendersPool: sync.Pool{New: func() any { return &isolationAppender{} }},
}
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
index a3d6b3567b3..b3f5e2b6752 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
@@ -120,7 +120,7 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error
}
app, err = chunk.Appender()
if err != nil {
- return
+ return chks, err
}
}
switch encoding {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
index ddc5376df05..af8f9b1f83f 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
@@ -386,7 +386,7 @@ func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) {
return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil
}
-func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
+func (*OOOCompactionHead) Tombstones() (tombstones.Reader, error) {
return tombstones.NewMemTombstones(), nil
}
@@ -418,7 +418,7 @@ func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionH
}
}
-func (ch *OOOCompactionHead) Size() int64 { return 0 }
+func (*OOOCompactionHead) Size() int64 { return 0 }
func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint }
func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt }
func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange }
@@ -446,15 +446,15 @@ func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string,
return index.NewListPostings(ir.ch.postings), nil
}
-func (ir *OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
+func (*OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
return index.ErrPostings(errors.New("not supported"))
}
-func (ir *OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings {
+func (*OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings {
return index.ErrPostings(errors.New("not supported"))
}
-func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings {
+func (*OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings {
// This will already be sorted from the Postings() call above.
return p
}
@@ -484,31 +484,31 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks)
}
-func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) {
+func (*OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) {
+func (*OOOCompactionHeadIndexReader) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, _ bool, _ ...*labels.Matcher) (index.Postings, error) {
+func (*OOOCompactionHeadIndexReader) PostingsForMatchers(context.Context, bool, ...*labels.Matcher) (index.Postings, error) {
return nil, errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
+func (*OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
+func (*OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
return "", errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) {
+func (*OOOCompactionHeadIndexReader) LabelNamesFor(context.Context, index.Postings) ([]string, error) {
return nil, errors.New("not implemented")
}
-func (ir *OOOCompactionHeadIndexReader) Close() error {
+func (*OOOCompactionHeadIndexReader) Close() error {
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go
index 0943c760cd2..788991235f9 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go
@@ -588,7 +588,7 @@ func (b *blockBaseSeriesSet) Err() error {
return b.p.Err()
}
-func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
+func (*blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
// populateWithDelGenericSeriesIterator allows to iterate over given chunk
// metas. In each iteration it ensures that chunks are trimmed based on given
@@ -1266,4 +1266,4 @@ func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.
return cr.emptyChunk, nil, nil
}
-func (cr nopChunkReader) Close() error { return nil }
+func (nopChunkReader) Close() error { return nil }
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go
index 692976cdf84..561810a3a54 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go
@@ -18,7 +18,9 @@ package record
import (
"errors"
"fmt"
+ "log/slog"
"math"
+ "unsafe"
"github.com/prometheus/common/model"
@@ -202,15 +204,18 @@ type RefMmapMarker struct {
// Decoder decodes series, sample, metadata and tombstone records.
type Decoder struct {
builder labels.ScratchBuilder
+ logger *slog.Logger
}
-func NewDecoder(_ *labels.SymbolTable) Decoder { // FIXME remove t
- return Decoder{builder: labels.NewScratchBuilder(0)}
+func NewDecoder(_ *labels.SymbolTable, logger *slog.Logger) Decoder { // FIXME remove t (or use scratch builder with symbols)
+ b := labels.NewScratchBuilder(0)
+ b.SetUnsafeAdd(true)
+ return Decoder{builder: b, logger: logger}
}
// Type returns the type of the record.
// Returns RecordUnknown if no valid record type is found.
-func (d *Decoder) Type(rec []byte) Type {
+func (*Decoder) Type(rec []byte) Type {
if len(rec) < 1 {
return Unknown
}
@@ -247,7 +252,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
}
// Metadata appends metadata in rec to the given slice.
-func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) {
+func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Metadata {
@@ -262,7 +267,7 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e
// We can skip the rest of the fields (if we encounter any), but we must decode them anyway
// so we can correctly align with the start with the next metadata record.
var unit, help string
- for i := 0; i < numFields; i++ {
+ for range numFields {
fieldName := dec.UvarintStr()
fieldValue := dec.UvarintStr()
switch fieldName {
@@ -289,20 +294,24 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e
return metadata, nil
}
+func yoloString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
// DecodeLabels decodes one set of labels from buf.
func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels {
d.builder.Reset()
nLabels := dec.Uvarint()
- for i := 0; i < nLabels; i++ {
+ for range nLabels {
lName := dec.UvarintBytes()
lValue := dec.UvarintBytes()
- d.builder.UnsafeAddBytes(lName, lValue)
+ d.builder.Add(yoloString(lName), yoloString(lValue))
}
return d.builder.Labels()
}
// Samples appends samples in rec to the given slice.
-func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
+func (*Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Samples {
@@ -341,7 +350,7 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error)
}
// Tombstones appends tombstones in rec to the given slice.
-func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
+func (*Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Tombstones {
@@ -405,7 +414,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp
return exemplars, nil
}
-func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) {
+func (*Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != MmapMarkers {
@@ -457,6 +466,18 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
}
DecodeHistogram(&dec, rh.H)
+
+ if !histogram.IsKnownSchema(rh.H.Schema) {
+ d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.H.Schema, "timestamp", rh.T)
+ continue
+ }
+ if rh.H.Schema > histogram.ExponentialSchemaMax && rh.H.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // record is from a newer Prometheus version that supports higher
+ // resolution.
+ rh.H.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
histograms = append(histograms, rh)
}
@@ -549,6 +570,18 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
}
DecodeFloatHistogram(&dec, rh.FH)
+
+ if !histogram.IsKnownSchema(rh.FH.Schema) {
+ d.logger.Warn("skipping histogram with unknown schema in WAL record", "schema", rh.FH.Schema, "timestamp", rh.T)
+ continue
+ }
+ if rh.FH.Schema > histogram.ExponentialSchemaMax && rh.FH.Schema <= histogram.ExponentialSchemaMaxReserved {
+ // This is a very slow path, but it should only happen if the
+ // record is from a newer Prometheus version that supports higher
+ // resolution.
+ rh.FH.ReduceResolution(histogram.ExponentialSchemaMax)
+ }
+
histograms = append(histograms, rh)
}
@@ -622,7 +655,7 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
type Encoder struct{}
// Series appends the encoded series to b and returns the resulting slice.
-func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
+func (*Encoder) Series(series []RefSeries, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Series))
@@ -634,7 +667,7 @@ func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
}
// Metadata appends the encoded metadata to b and returns the resulting slice.
-func (e *Encoder) Metadata(metadata []RefMetadata, b []byte) []byte {
+func (*Encoder) Metadata(metadata []RefMetadata, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Metadata))
@@ -665,7 +698,7 @@ func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) {
}
// Samples appends the encoded samples to b and returns the resulting slice.
-func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
+func (*Encoder) Samples(samples []RefSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Samples))
@@ -689,7 +722,7 @@ func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
}
// Tombstones appends the encoded tombstones to b and returns the resulting slice.
-func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
+func (*Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Tombstones))
@@ -716,7 +749,7 @@ func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
return buf.Get()
}
-func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) {
+func (*Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) {
// Store base timestamp and base reference number of first sample.
// All samples encode their timestamp and ref as delta to those.
first := exemplars[0]
@@ -732,7 +765,7 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi
}
}
-func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
+func (*Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(MmapMarkers))
@@ -744,7 +777,7 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
return buf.Get()
}
-func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) {
+func (*Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(HistogramSamples))
@@ -778,7 +811,7 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([
return buf.Get(), customBucketHistograms
}
-func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
+func (*Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(CustomBucketsHistogramSamples))
@@ -843,7 +876,7 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) {
}
}
-func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) {
+func (*Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(FloatHistogramSamples))
@@ -878,7 +911,7 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b
return buf.Get(), customBucketsFloatHistograms
}
-func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
+func (*Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(CustomBucketsFloatHistogramSamples))
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go
index 9df70d3c298..bda565eae40 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go
@@ -353,7 +353,7 @@ func (in Intervals) Add(n Interval) Intervals {
return append(in, n)
}
// Find min and max indexes of intervals that overlap with the new interval.
- // Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference
+ // Intervals are closed [t1, t2] and t is discrete, so if neighbour intervals are 1 step difference
// to the new one, we can merge those together.
mini := 0
if n.Mint != math.MinInt64 { // Avoid overflow.
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go
index a923519ef77..64311a8c3b0 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go
@@ -20,7 +20,7 @@ import (
)
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
- for i := 0; i < n; i++ {
+ for i := range n {
h := GenerateTestHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
@@ -58,7 +58,7 @@ func GenerateTestHistogram(i int64) *histogram.Histogram {
}
func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) {
- for i := 0; i < n; i++ {
+ for i := range n {
h := GenerateTestCustomBucketsHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
@@ -83,7 +83,7 @@ func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram {
}
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
- for x := 0; x < n; x++ {
+ for x := range n {
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeHistogram(i))
}
@@ -97,7 +97,7 @@ func GenerateTestGaugeHistogram(i int64) *histogram.Histogram {
}
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
- for i := 0; i < n; i++ {
+ for i := range n {
h := GenerateTestFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
@@ -129,7 +129,7 @@ func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram {
}
func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) {
- for i := 0; i < n; i++ {
+ for i := range n {
h := GenerateTestCustomBucketsFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
@@ -154,7 +154,7 @@ func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram
}
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
- for x := 0; x < n; x++ {
+ for x := range n {
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeFloatHistogram(i))
}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
index 2c1b0c0534d..c26f3f1052c 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
@@ -93,7 +93,7 @@ const CheckpointPrefix = "checkpoint."
// segmented format as the original WAL itself.
// This makes it easy to read it through the WAL package and concatenate
// it with the original WAL.
-func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef, last int) bool, mint int64) (*CheckpointStats, error) {
+func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
stats := &CheckpointStats{}
var sgmReader io.ReadCloser
@@ -156,7 +156,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
exemplars []record.RefExemplar
metadata []record.RefMetadata
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
- dec = record.NewDecoder(st)
+ dec = record.NewDecoder(st, logger)
enc record.Encoder
buf []byte
recs [][]byte
@@ -181,7 +181,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
// Drop irrelevant series in place.
repl := series[:0]
for _, s := range series {
- if keep(s.Ref, to) {
+ if keep(s.Ref) {
repl = append(repl, s)
}
}
@@ -323,7 +323,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
// Only keep reference to the latest found metadata for each refID.
repl := 0
for _, m := range metadata {
- if keep(m.Ref, to) {
+ if keep(m.Ref) {
if _, ok := latestMetadataMap[m.Ref]; !ok {
repl++
}
@@ -410,7 +410,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
return nil, err
}
- for i := 0; i < len(files); i++ {
+ for i := range files {
fi := files[i]
if !strings.HasPrefix(fi.Name(), CheckpointPrefix) {
continue
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go
index 95bd554a76d..abb5ef97312 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go
@@ -494,7 +494,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
// Also used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
var (
- dec = record.NewDecoder(labels.NewSymbolTable()) // One table per WAL segment means it won't grow indefinitely.
+ dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // One table per WAL segment means it won't grow indefinitely.
series []record.RefSeries
samples []record.RefSample
samplesToSend []record.RefSample
@@ -563,7 +563,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
w.writer.AppendExemplars(exemplars)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
- // Skip if experimental "histograms over remote write" is not enabled.
+ // Skip if "native histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
@@ -591,7 +591,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
}
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
- // Skip if experimental "histograms over remote write" is not enabled.
+ // Skip if "native histograms over remote write" is not enabled.
if !w.sendHistograms {
break
}
@@ -647,7 +647,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
// Used with readCheckpoint - implements segmentReadFn.
func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error {
var (
- dec = record.NewDecoder(labels.NewSymbolTable()) // Needed for decoding; labels do not outlive this function.
+ dec = record.NewDecoder(labels.NewSymbolTable(), w.logger) // Needed for decoding; labels do not outlive this function.
series []record.RefSeries
)
for r.Next() && !isClosed(w.quit) {
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go
index dec41ad2c72..176531c4788 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go
@@ -788,12 +788,12 @@ func (w *WL) LastSegmentAndOffset() (seg, offset int, err error) {
_, seg, err = Segments(w.Dir())
if err != nil {
- return
+ return seg, offset, err
}
offset = (w.donePages * pageSize) + w.page.alloc
- return
+ return seg, offset, err
}
// Truncate drops all segments before i.
diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
index f8070ff3431..817f670b5e8 100644
--- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
+++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go
@@ -16,6 +16,7 @@ package annotations
import (
"errors"
"fmt"
+ "maps"
"github.com/prometheus/common/model"
@@ -55,9 +56,7 @@ func (a *Annotations) Merge(aa Annotations) Annotations {
}
*a = Annotations{}
}
- for key, val := range aa {
- (*a)[key] = val
- }
+ maps.Copy((*a), aa)
return *a
}
@@ -108,7 +107,7 @@ func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warning
if infoSkipped > 0 {
infos = append(infos, fmt.Sprintf("%d more info annotations omitted", infoSkipped))
}
- return
+ return warnings, infos
}
// CountWarningsAndInfo counts and returns the number of warnings and infos in the
@@ -122,7 +121,7 @@ func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
countInfo++
}
}
- return
+ return countWarnings, countInfo
}
//nolint:staticcheck,revive // error-naming.
@@ -135,26 +134,27 @@ var (
PromQLInfo = errors.New("PromQL info")
PromQLWarning = errors.New("PromQL warning")
- InvalidRatioWarning = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning)
- InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
- BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
- MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
- MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
- NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
- NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
- MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
- IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning)
- IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning)
+ InvalidRatioWarning = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning)
+ InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
+ BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
+ MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
+ MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms", PromQLWarning)
+ NativeHistogramNotCounterWarning = fmt.Errorf("%w: this native histogram metric is not a counter:", PromQLWarning)
+ NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning)
+ MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning)
+ IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning)
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
PossibleNonCounterLabelInfo = fmt.Errorf("%w: metric might not be a counter, __type__ label is not set to %q or %q", PromQLInfo, model.MetricTypeCounter, model.MetricTypeHistogram)
- HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
+ HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile)", PromQLInfo)
IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo)
HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo)
HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo)
- NativeHistogramQuantileNaNResultInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is NaN for metric name", PromQLInfo)
- NativeHistogramQuantileNaNSkewInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is skewed higher for metric name", PromQLInfo)
- NativeHistogramFractionNaNsInfo = fmt.Errorf("%w: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name", PromQLInfo)
+ NativeHistogramQuantileNaNResultInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is NaN", PromQLInfo)
+ NativeHistogramQuantileNaNSkewInfo = fmt.Errorf("%w: input to histogram_quantile has NaN observations, result is skewed higher", PromQLInfo)
+ NativeHistogramFractionNaNsInfo = fmt.Errorf("%w: input to histogram_fraction has NaN observations, which are excluded from all fractions", PromQLInfo)
+ HistogramCounterResetCollisionWarning = fmt.Errorf("%w: conflicting counter resets during histogram", PromQLWarning)
+ MismatchedCustomBucketsHistogramsInfo = fmt.Errorf("%w: mismatched custom buckets were reconciled during", PromQLInfo)
)
type annoErr struct {
@@ -174,6 +174,13 @@ func (e annoErr) Unwrap() error {
return e.Err
}
+func maybeAddMetricName(anno error, metricName string) error {
+ if metricName == "" {
+ return anno
+ }
+ return fmt.Errorf("%w for metric name %q", anno, metricName)
+}
+
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
// value, i.e. a float that is outside the range [0, 1] or NaN.
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
@@ -195,9 +202,10 @@ func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
// NewBadBucketLabelWarning is used when there is an error parsing the bucket label
// of a classic histogram.
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
+ anno := maybeAddMetricName(fmt.Errorf("%w of %q", BadBucketLabelWarning, label), metricName)
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName),
+ Err: anno,
}
}
@@ -225,7 +233,7 @@ func NewMixedFloatsHistogramsAggWarning(pos posrange.PositionRange) error {
func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName),
+ Err: maybeAddMetricName(MixedClassicNativeHistogramsWarning, metricName),
}
}
@@ -256,15 +264,6 @@ func NewMixedExponentialCustomHistogramsWarning(metricName string, pos posrange.
}
}
-// NewIncompatibleCustomBucketsHistogramsWarning is used when the queried series includes
-// custom buckets histograms with incompatible custom bounds.
-func NewIncompatibleCustomBucketsHistogramsWarning(metricName string, pos posrange.PositionRange) error {
- return annoErr{
- PositionRange: pos,
- Err: fmt.Errorf("%w %q", IncompatibleCustomBucketsHistogramsWarning, metricName),
- }
-}
-
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
// have the suffixes _total, _sum, _count, or _bucket.
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error {
@@ -288,7 +287,7 @@ func NewPossibleNonCounterLabelInfo(metricName, typeLabel string, pos posrange.P
func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName),
+ Err: maybeAddMetricName(HistogramQuantileForcedMonotonicityInfo, metricName),
}
}
@@ -331,20 +330,55 @@ func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.Pos
func NewNativeHistogramQuantileNaNResultInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w %q", NativeHistogramQuantileNaNResultInfo, metricName),
+ Err: maybeAddMetricName(NativeHistogramQuantileNaNResultInfo, metricName),
}
}
func NewNativeHistogramQuantileNaNSkewInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w %q", NativeHistogramQuantileNaNSkewInfo, metricName),
+ Err: maybeAddMetricName(NativeHistogramQuantileNaNSkewInfo, metricName),
}
}
func NewNativeHistogramFractionNaNsInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
- Err: fmt.Errorf("%w %q", NativeHistogramFractionNaNsInfo, metricName),
+ Err: maybeAddMetricName(NativeHistogramFractionNaNsInfo, metricName),
+ }
+}
+
+type HistogramOperation string
+
+const (
+ HistogramAdd HistogramOperation = "addition"
+ HistogramSub HistogramOperation = "subtraction"
+ HistogramAgg HistogramOperation = "aggregation"
+)
+
+func (op HistogramOperation) String() string {
+ switch op {
+ case HistogramAdd, HistogramSub, HistogramAgg:
+ return string(op)
+ default:
+ return "unknown operation"
+ }
+}
+
+// NewHistogramCounterResetCollisionWarning is used when two counter histograms are added or subtracted where one has
+// a CounterReset hint and the other has NotCounterReset.
+func NewHistogramCounterResetCollisionWarning(pos posrange.PositionRange, operation HistogramOperation) error {
+ return annoErr{
+ PositionRange: pos,
+ Err: fmt.Errorf("%w %s", HistogramCounterResetCollisionWarning, operation.String()),
+ }
+}
+
+// NewMismatchedCustomBucketsHistogramsInfo is used when the queried series includes
+// custom buckets histograms with mismatched custom bounds that cause reconciling.
+func NewMismatchedCustomBucketsHistogramsInfo(pos posrange.PositionRange, operation HistogramOperation) error {
+ return annoErr{
+ PositionRange: pos,
+ Err: fmt.Errorf("%w %s", MismatchedCustomBucketsHistogramsInfo, operation.String()),
}
}
diff --git a/vendor/github.com/prometheus/prometheus/util/compression/buffers.go b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go
index 765bc64c0ba..f510efc042f 100644
--- a/vendor/github.com/prometheus/prometheus/util/compression/buffers.go
+++ b/vendor/github.com/prometheus/prometheus/util/compression/buffers.go
@@ -75,11 +75,11 @@ func (b *concurrentEBuffer) zstdEncBuf() *zstd.Encoder {
// TODO(bwplotka): We could use pool, but putting it back into the pool needs to be
// on the caller side, so no pool for now.
-func (b *concurrentEBuffer) get() []byte {
+func (*concurrentEBuffer) get() []byte {
return nil
}
-func (b *concurrentEBuffer) set([]byte) {}
+func (*concurrentEBuffer) set([]byte) {}
type DecodeBuffer interface {
zstdDecBuf() *zstd.Decoder
@@ -135,8 +135,8 @@ func (b *concurrentDBuffer) zstdDecBuf() *zstd.Decoder {
return b.r
}
-func (b *concurrentDBuffer) get() []byte {
+func (*concurrentDBuffer) get() []byte {
return nil
}
-func (b *concurrentDBuffer) set([]byte) {}
+func (*concurrentDBuffer) set([]byte) {}
diff --git a/vendor/github.com/prometheus/prometheus/util/httputil/context.go b/vendor/github.com/prometheus/prometheus/util/httputil/context.go
index fddcfba9419..9b16428892b 100644
--- a/vendor/github.com/prometheus/prometheus/util/httputil/context.go
+++ b/vendor/github.com/prometheus/prometheus/util/httputil/context.go
@@ -41,7 +41,7 @@ func ContextFromRequest(ctx context.Context, r *http.Request) context.Context {
if v := ctx.Value(pathParam{}); v != nil {
path = v.(string)
}
- return promql.NewOriginContext(ctx, map[string]interface{}{
+ return promql.NewOriginContext(ctx, map[string]any{
"httpRequest": map[string]string{
"clientIP": ip,
"method": r.Method,
diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go
index 3f97b17f098..5e379442a24 100644
--- a/vendor/github.com/prometheus/prometheus/util/logging/file.go
+++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go
@@ -27,8 +27,8 @@ var _ slog.Handler = (*JSONFileLogger)(nil)
var _ io.Closer = (*JSONFileLogger)(nil)
-// JSONFileLogger represents a logger that writes JSON to a file. It implements
-// the slog.Handler interface, as well as the io.Closer interface.
+// JSONFileLogger represents a logger that writes JSON to a file.
+// It implements the promql.QueryLogger interface.
type JSONFileLogger struct {
handler slog.Handler
file *os.File
diff --git a/vendor/github.com/prometheus/prometheus/util/namevalidationutil/namevalidationutil.go b/vendor/github.com/prometheus/prometheus/util/namevalidationutil/namevalidationutil.go
new file mode 100644
index 00000000000..2e656b6a190
--- /dev/null
+++ b/vendor/github.com/prometheus/prometheus/util/namevalidationutil/namevalidationutil.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namevalidationutil
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/prometheus/common/model"
+)
+
+// CheckNameValidationScheme returns an error iff nameValidationScheme is unset.
+func CheckNameValidationScheme(nameValidationScheme model.ValidationScheme) error {
+ switch nameValidationScheme {
+ case model.UTF8Validation, model.LegacyValidation:
+ case model.UnsetValidation:
+ return errors.New("unset nameValidationScheme")
+ default:
+ panic(fmt.Errorf("unhandled nameValidationScheme: %s", nameValidationScheme.String()))
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/prometheus/util/pool/pool.go b/vendor/github.com/prometheus/prometheus/util/pool/pool.go
index 2ee89718544..7d5a8e3abf9 100644
--- a/vendor/github.com/prometheus/prometheus/util/pool/pool.go
+++ b/vendor/github.com/prometheus/prometheus/util/pool/pool.go
@@ -24,12 +24,12 @@ type Pool struct {
buckets []sync.Pool
sizes []int
// make is the function used to create an empty slice when none exist yet.
- make func(int) interface{}
+ make func(int) any
}
// New returns a new Pool with size buckets for minSize to maxSize
// increasing by the given factor.
-func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) *Pool {
+func New(minSize, maxSize int, factor float64, makeFunc func(int) any) *Pool {
if minSize < 1 {
panic("invalid minimum pool size")
}
@@ -56,7 +56,7 @@ func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) *
}
// Get returns a new byte slices that fits the given size.
-func (p *Pool) Get(sz int) interface{} {
+func (p *Pool) Get(sz int) any {
for i, bktSize := range p.sizes {
if sz > bktSize {
continue
@@ -71,7 +71,7 @@ func (p *Pool) Get(sz int) interface{} {
}
// Put adds a slice to the right bucket in the pool.
-func (p *Pool) Put(s interface{}) {
+func (p *Pool) Put(s any) {
slice := reflect.ValueOf(s)
if slice.Kind() != reflect.Slice {
diff --git a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
index 0e0e1871c33..d8ec186f4c1 100644
--- a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
+++ b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
@@ -89,7 +89,7 @@ func (s stepStat) String() string {
// MarshalJSON implements json.Marshaler.
func (s stepStat) MarshalJSON() ([]byte, error) {
- return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V})
+ return json.Marshal([...]any{float64(s.T) / 1000, s.V})
}
// queryTimings with all query timers mapped to durations.
@@ -323,7 +323,7 @@ func NewQuerySamples(enablePerStepStats bool) *QuerySamples {
return &qs
}
-func (qs *QuerySamples) NewChild() *QuerySamples {
+func (*QuerySamples) NewChild() *QuerySamples {
return NewQuerySamples(false)
}
diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
index 95dcb6f694f..0a78421fd43 100644
--- a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
+++ b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go
@@ -129,7 +129,7 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
switch c := s[0]; {
case c == quote && (quote == '\'' || quote == '"'):
err = ErrSyntax
- return
+ return value, multibyte, tail, err
case c >= utf8.RuneSelf:
r, size := utf8.DecodeRuneInString(s)
return r, true, s[size:], nil
@@ -140,7 +140,7 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
// Hard case: c is backslash.
if len(s) <= 1 {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
c := s[1]
s = s[2:]
@@ -173,13 +173,13 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
var v rune
if len(s) < n {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
for j := 0; j < n; j++ {
x, ok := unhex(s[j])
if !ok {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
v = v<<4 | x
}
@@ -191,7 +191,7 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
}
if v > utf8.MaxRune {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
value = v
multibyte = true
@@ -199,20 +199,20 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
v := rune(c) - '0'
if len(s) < 2 {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
- for j := 0; j < 2; j++ { // One digit already; two more.
+ for j := range 2 { // One digit already; two more.
x := rune(s[j]) - '0'
if x < 0 || x > 7 {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
v = (v << 3) | x
}
s = s[2:]
if v > 255 {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
value = v
case '\\':
@@ -220,15 +220,15 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string,
case '\'', '"':
if c != quote {
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
value = rune(c)
default:
err = ErrSyntax
- return
+ return value, multibyte, tail, err
}
tail = s
- return
+ return value, multibyte, tail, err
}
// contains reports whether the string contains the byte c.
@@ -251,5 +251,5 @@ func unhex(b byte) (v rune, ok bool) {
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
- return
+ return v, ok
}
diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
index e15d591e0c7..e0a6f39be29 100644
--- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
+++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
@@ -50,7 +50,6 @@ func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) {
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
opts.RetentionDuration = 0
- opts.EnableNativeHistograms = true
// Set OutOfOrderTimeWindow if provided, otherwise use default (0)
if len(outOfOrderTimeWindow) > 0 {
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/cmp.go b/vendor/github.com/prometheus/prometheus/util/testutil/cmp.go
index 24d39d514c1..3ea1f401681 100644
--- a/vendor/github.com/prometheus/prometheus/util/testutil/cmp.go
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/cmp.go
@@ -25,13 +25,13 @@ import (
// RequireEqual is a replacement for require.Equal using go-cmp adapted for
// Prometheus data structures, instead of DeepEqual.
-func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) {
+func RequireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
t.Helper()
RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...)
}
// RequireEqualWithOptions works like RequireEqual but allows extra cmp.Options.
-func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) {
+func RequireEqualWithOptions(t testing.TB, expected, actual any, extra []cmp.Option, msgAndArgs ...any) {
t.Helper()
options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...)
if cmp.Equal(expected, actual, options...) {
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go
index ea4b0e3746b..3d2a09d637c 100644
--- a/vendor/github.com/prometheus/prometheus/util/testutil/context.go
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go
@@ -27,7 +27,7 @@ type MockContext struct {
}
// Deadline always will return not set.
-func (c *MockContext) Deadline() (deadline time.Time, ok bool) {
+func (*MockContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
@@ -42,7 +42,7 @@ func (c *MockContext) Err() error {
}
// Value ignores the Value and always returns nil.
-func (c *MockContext) Value(interface{}) interface{} {
+func (*MockContext) Value(any) any {
return nil
}
diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go
index 38dabd1830d..176acb5dc1e 100644
--- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go
+++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go
@@ -72,12 +72,12 @@ type (
// the test flags, which we do not want in non-test binaries even if
// they make use of these utilities for some reason).
T interface {
- Errorf(format string, args ...interface{})
+ Errorf(format string, args ...any)
FailNow()
}
)
-func (c nilCloser) Close() {
+func (nilCloser) Close() {
}
func (c callbackCloser) Close() {
@@ -127,7 +127,7 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
tester: t,
}
- return
+ return handler
}
// DirHash returns a hash of all files attributes and their content within a directory.
diff --git a/vendor/github.com/prometheus/prometheus/util/zeropool/pool.go b/vendor/github.com/prometheus/prometheus/util/zeropool/pool.go
index 4f6deddfb1c..946ce020912 100644
--- a/vendor/github.com/prometheus/prometheus/util/zeropool/pool.go
+++ b/vendor/github.com/prometheus/prometheus/util/zeropool/pool.go
@@ -39,7 +39,7 @@ type Pool[T any] struct {
func New[T any](item func() T) Pool[T] {
return Pool[T]{
items: sync.Pool{
- New: func() interface{} {
+ New: func() any {
val := item()
return &val
},
diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
index a67c0aa525c..baddedd4951 100644
--- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
+++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go
@@ -37,6 +37,7 @@ import (
"github.com/grafana/regexp"
jsoniter "github.com/json-iterator/go"
"github.com/munnerz/goautoneg"
+ remoteapi "github.com/prometheus/client_golang/exp/api/remote"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
@@ -44,6 +45,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
@@ -73,20 +75,41 @@ const (
checkContextEveryNIterations = 128
)
-type errorType string
+type errorNum int
+
+type errorType struct {
+ num errorNum
+ str string
+}
const (
- errorNone errorType = ""
- errorTimeout errorType = "timeout"
- errorCanceled errorType = "canceled"
- errorExec errorType = "execution"
- errorBadData errorType = "bad_data"
- errorInternal errorType = "internal"
- errorUnavailable errorType = "unavailable"
- errorNotFound errorType = "not_found"
- errorNotAcceptable errorType = "not_acceptable"
+ ErrorNone errorNum = iota
+ ErrorTimeout
+ ErrorCanceled
+ ErrorExec
+ ErrorBadData
+ ErrorInternal
+ ErrorUnavailable
+ ErrorNotFound
+ ErrorNotAcceptable
+)
+
+var (
+ errorNone = errorType{ErrorNone, ""}
+ errorTimeout = errorType{ErrorTimeout, "timeout"}
+ errorCanceled = errorType{ErrorCanceled, "canceled"}
+ errorExec = errorType{ErrorExec, "execution"}
+ errorBadData = errorType{ErrorBadData, "bad_data"}
+ errorInternal = errorType{ErrorInternal, "internal"}
+ errorUnavailable = errorType{ErrorUnavailable, "unavailable"}
+ errorNotFound = errorType{ErrorNotFound, "not_found"}
+ errorNotAcceptable = errorType{ErrorNotAcceptable, "not_acceptable"}
)
+// OverrideErrorCode can be used to override status code for different error types.
+// Return false to fall back to default status code.
+type OverrideErrorCode func(errorNum, error) (code int, override bool)
+
var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"}
type apiError struct {
@@ -95,7 +118,7 @@ type apiError struct {
}
func (e *apiError) Error() string {
- return fmt.Sprintf("%s: %s", e.typ, e.err)
+ return fmt.Sprintf("%s: %s", e.typ.str, e.err)
}
// ScrapePoolsRetriever provide the list of all scrape pools.
@@ -108,6 +131,7 @@ type TargetRetriever interface {
TargetsActive() map[string][]*scrape.Target
TargetsDropped() map[string][]*scrape.Target
TargetsDroppedCounts() map[string]int
+ ScrapePoolConfig(string) (*config.ScrapeConfig, error)
}
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
@@ -162,16 +186,16 @@ type RuntimeInfo struct {
// Response contains a response to a HTTP API request.
type Response struct {
- Status status `json:"status"`
- Data interface{} `json:"data,omitempty"`
- ErrorType errorType `json:"errorType,omitempty"`
- Error string `json:"error,omitempty"`
- Warnings []string `json:"warnings,omitempty"`
- Infos []string `json:"infos,omitempty"`
+ Status status `json:"status"`
+ Data any `json:"data,omitempty"`
+ ErrorType string `json:"errorType,omitempty"`
+ Error string `json:"error,omitempty"`
+ Warnings []string `json:"warnings,omitempty"`
+ Infos []string `json:"infos,omitempty"`
}
type apiFuncResult struct {
- data interface{}
+ data any
err *apiError
warnings annotations.Annotations
finalizer func()
@@ -223,6 +247,8 @@ type API struct {
statsRenderer StatsRenderer
notificationsGetter func() []notifications.Notification
notificationsSub func() (<-chan notifications.Notification, func(), bool)
+ // Allows customizing the default mapping
+ overrideErrorCode OverrideErrorCode
remoteWriteHandler http.Handler
remoteReadHandler http.Handler
@@ -262,11 +288,13 @@ func NewAPI(
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
rwEnabled bool,
- acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
+ acceptRemoteWriteProtoMsgs remoteapi.MessageTypes,
otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
ctZeroIngestionEnabled bool,
lookbackDelta time.Duration,
enableTypeAndUnitLabels bool,
+ appendMetadata bool,
+ overrideErrorCode OverrideErrorCode,
) *API {
a := &API{
QueryEngine: qe,
@@ -295,6 +323,7 @@ func NewAPI(
statsRenderer: DefaultStatsRenderer,
notificationsGetter: notificationsGetter,
notificationsSub: notificationsSub,
+ overrideErrorCode: overrideErrorCode,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
@@ -310,13 +339,14 @@ func NewAPI(
}
if rwEnabled {
- a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled)
+ a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled, enableTypeAndUnitLabels, appendMetadata)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
ConvertDelta: otlpDeltaToCumulative,
NativeDelta: otlpNativeDeltaIngestion,
LookbackDelta: lookbackDelta,
+ IngestCTZeroSample: ctZeroIngestionEnabled,
EnableTypeAndUnitLabels: enableTypeAndUnitLabels,
})
}
@@ -403,6 +433,7 @@ func (api *API) Register(r *route.Router) {
r.Get("/scrape_pools", wrap(api.scrapePools))
r.Get("/targets", wrap(api.targets))
r.Get("/targets/metadata", wrap(api.targetMetadata))
+ r.Get("/targets/relabel_steps", wrap(api.targetRelabelSteps))
r.Get("/alertmanagers", wrapAgent(api.alertmanagers))
r.Get("/metadata", wrap(api.metricMetadata))
@@ -445,7 +476,7 @@ func invalidParamError(err error, parameter string) apiFuncResult {
}, nil, nil}
}
-func (api *API) options(*http.Request) apiFuncResult {
+func (*API) options(*http.Request) apiFuncResult {
return apiFuncResult{nil, nil, nil, nil}
}
@@ -518,7 +549,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
}, nil, warnings, qry.Close}
}
-func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
+func (*API) formatQuery(r *http.Request) (result apiFuncResult) {
expr, err := parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
@@ -527,7 +558,7 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
}
-func (api *API) parseQuery(r *http.Request) apiFuncResult {
+func (*API) parseQuery(r *http.Request) apiFuncResult {
expr, err := parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
@@ -790,8 +821,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
name = model.UnescapeName(name, model.ValueEncodingEscaping)
}
- label := model.LabelName(name)
- if !label.IsValid() {
+ if !model.UTF8Validation.IsValidLabelName(name) {
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil}
}
@@ -998,7 +1028,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
return apiFuncResult{metrics, nil, warnings, closer}
}
-func (api *API) dropSeries(_ *http.Request) apiFuncResult {
+func (*API) dropSeries(*http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil}
}
@@ -1278,6 +1308,49 @@ type metricMetadata struct {
Unit string `json:"unit"`
}
+type RelabelStep struct {
+ Rule *relabel.Config `json:"rule"`
+ Output labels.Labels `json:"output"`
+ Keep bool `json:"keep"`
+}
+
+type RelabelStepsResponse struct {
+ Steps []RelabelStep `json:"steps"`
+}
+
+func (api *API) targetRelabelSteps(r *http.Request) apiFuncResult {
+ scrapePool := r.FormValue("scrapePool")
+ if scrapePool == "" {
+ return apiFuncResult{nil, &apiError{errorBadData, errors.New("no scrapePool parameter provided")}, nil, nil}
+ }
+ labelsJSON := r.FormValue("labels")
+ if labelsJSON == "" {
+ return apiFuncResult{nil, &apiError{errorBadData, errors.New("no labels parameter provided")}, nil, nil}
+ }
+ var lbls labels.Labels
+ if err := json.Unmarshal([]byte(labelsJSON), &lbls); err != nil {
+ return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing labels: %w", err)}, nil, nil}
+ }
+
+ scrapeConfig, err := api.targetRetriever(r.Context()).ScrapePoolConfig(scrapePool)
+ if err != nil {
+ return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error retrieving scrape config: %w", err)}, nil, nil}
+ }
+
+ rules := scrapeConfig.RelabelConfigs
+ steps := make([]RelabelStep, len(rules))
+ for i, rule := range rules {
+ outLabels, keep := relabel.Process(lbls, rules[:i+1]...)
+ steps[i] = RelabelStep{
+ Rule: rule,
+ Output: outLabels,
+ Keep: keep,
+ }
+ }
+
+ return apiFuncResult{&RelabelStepsResponse{Steps: steps}, nil, nil, nil}
+}
+
// AlertmanagerDiscovery has all the active Alertmanagers.
type AlertmanagerDiscovery struct {
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
@@ -1444,7 +1517,7 @@ type RuleGroup struct {
LastEvaluation time.Time `json:"lastEvaluation"`
}
-type Rule interface{}
+type Rule any
type AlertingRule struct {
// State can be "pending", "firing", "inactive".
@@ -1467,7 +1540,7 @@ type AlertingRule struct {
type RecordingRule struct {
Name string `json:"name"`
Query string `json:"query"`
- Labels labels.Labels `json:"labels,omitempty"`
+ Labels labels.Labels `json:"labels"`
Health rules.RuleHealth `json:"health"`
LastError string `json:"lastError,omitempty"`
EvaluationTime float64 `json:"evaluationTime"`
@@ -1692,7 +1765,7 @@ type prometheusConfig struct {
YAML string `json:"yaml"`
}
-func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult {
+func (api *API) serveRuntimeInfo(*http.Request) apiFuncResult {
status, err := api.runtimeInfo()
if err != nil {
return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil}
@@ -1700,18 +1773,18 @@ func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult {
return apiFuncResult{status, nil, nil, nil}
}
-func (api *API) serveBuildInfo(_ *http.Request) apiFuncResult {
+func (api *API) serveBuildInfo(*http.Request) apiFuncResult {
return apiFuncResult{api.buildInfo, nil, nil, nil}
}
-func (api *API) serveConfig(_ *http.Request) apiFuncResult {
+func (api *API) serveConfig(*http.Request) apiFuncResult {
cfg := &prometheusConfig{
YAML: api.config().String(),
}
return apiFuncResult{cfg, nil, nil, nil}
}
-func (api *API) serveFlags(_ *http.Request) apiFuncResult {
+func (api *API) serveFlags(*http.Request) apiFuncResult {
return apiFuncResult{api.flagsMap, nil, nil, nil}
}
@@ -1749,7 +1822,7 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
return result
}
-func (api *API) serveTSDBBlocks(_ *http.Request) apiFuncResult {
+func (api *API) serveTSDBBlocks(*http.Request) apiFuncResult {
blockMetas, err := api.db.BlockMetas()
if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error getting block metadata: %w", err)}, nil, nil}
@@ -1978,7 +2051,7 @@ func (api *API) cleanTombstones(*http.Request) apiFuncResult {
// Query string is needed to get the position information for the annotations, and it
// can be empty if the position information isn't needed.
-func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) {
+func (api *API) respond(w http.ResponseWriter, req *http.Request, data any, warnings annotations.Annotations, query string) {
statusMessage := statusSuccess
warn, info := warnings.AsStrings(query, 10, 10)
@@ -2026,11 +2099,11 @@ func (api *API) negotiateCodec(req *http.Request, resp *Response) (Codec, error)
return defaultCodec, nil
}
-func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
+func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data any) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(&Response{
Status: statusError,
- ErrorType: apiErr.typ,
+ ErrorType: apiErr.typ.str,
Error: apiErr.err.Error(),
Data: data,
})
@@ -2041,29 +2114,41 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter
}
var code int
- switch apiErr.typ {
+ if api.overrideErrorCode != nil {
+ if newCode, override := api.overrideErrorCode(apiErr.typ.num, apiErr.err); override {
+ code = newCode
+ } else {
+ code = getDefaultErrorCode(apiErr.typ)
+ }
+ } else {
+ code = getDefaultErrorCode(apiErr.typ)
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(code)
+ if n, err := w.Write(b); err != nil {
+ api.logger.Error("error writing response", "bytesWritten", n, "err", err)
+ }
+}
+
+func getDefaultErrorCode(errType errorType) int {
+ switch errType {
case errorBadData:
- code = http.StatusBadRequest
+ return http.StatusBadRequest
case errorExec:
- code = http.StatusUnprocessableEntity
+ return http.StatusUnprocessableEntity
case errorCanceled:
- code = statusClientClosedConnection
+ return statusClientClosedConnection
case errorTimeout:
- code = http.StatusServiceUnavailable
+ return http.StatusServiceUnavailable
case errorInternal:
- code = http.StatusInternalServerError
+ return http.StatusInternalServerError
case errorNotFound:
- code = http.StatusNotFound
+ return http.StatusNotFound
case errorNotAcceptable:
- code = http.StatusNotAcceptable
+ return http.StatusNotAcceptable
default:
- code = http.StatusInternalServerError
- }
-
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(code)
- if n, err := w.Write(b); err != nil {
- api.logger.Error("error writing response", "bytesWritten", n, "err", err)
+ return http.StatusInternalServerError
}
}
diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go
index 6bd095a8f3d..4f3a23e976b 100644
--- a/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go
+++ b/vendor/github.com/prometheus/prometheus/web/api/v1/json_codec.go
@@ -38,20 +38,20 @@ func init() {
// JSONCodec is a Codec that encodes API responses as JSON.
type JSONCodec struct{}
-func (j JSONCodec) ContentType() MIMEType {
+func (JSONCodec) ContentType() MIMEType {
return MIMEType{Type: "application", SubType: "json"}
}
-func (j JSONCodec) CanEncode(_ *Response) bool {
+func (JSONCodec) CanEncode(*Response) bool {
return true
}
-func (j JSONCodec) Encode(resp *Response) ([]byte, error) {
+func (JSONCodec) Encode(resp *Response) ([]byte, error) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(resp)
}
-// marshalSeriesJSON writes something like the following:
+// unsafeMarshalSeriesJSON writes something like the following:
//
// {
// "metric" : {
@@ -108,7 +108,7 @@ func neverEmpty(unsafe.Pointer) bool {
return false
}
-// marshalSampleJSON writes something like the following for normal value samples:
+// unsafeMarshalSampleJSON writes something like the following for normal value samples:
//
// {
// "metric" : {
diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go
index afa11f16b9d..dc2e7e29015 100644
--- a/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go
+++ b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go
@@ -24,14 +24,14 @@ import (
// for the tree view in the UI.
// TODO: Could it make sense to do this via the normal JSON marshalling methods? Maybe
// too UI-specific though.
-func translateAST(node parser.Expr) interface{} {
+func translateAST(node parser.Expr) any {
if node == nil {
return nil
}
switch n := node.(type) {
case *parser.AggregateExpr:
- return map[string]interface{}{
+ return map[string]any{
"type": "aggregation",
"op": n.Op.String(),
"expr": translateAST(n.Expr),
@@ -40,9 +40,9 @@ func translateAST(node parser.Expr) interface{} {
"without": n.Without,
}
case *parser.BinaryExpr:
- var matching interface{}
+ var matching any
if m := n.VectorMatching; m != nil {
- matching = map[string]interface{}{
+ matching = map[string]any{
"card": m.Card.String(),
"labels": sanitizeList(m.MatchingLabels),
"on": m.On,
@@ -50,7 +50,7 @@ func translateAST(node parser.Expr) interface{} {
}
}
- return map[string]interface{}{
+ return map[string]any{
"type": "binaryExpr",
"op": n.Op.String(),
"lhs": translateAST(n.LHS),
@@ -59,14 +59,14 @@ func translateAST(node parser.Expr) interface{} {
"bool": n.ReturnBool,
}
case *parser.Call:
- args := []interface{}{}
+ args := []any{}
for _, arg := range n.Args {
args = append(args, translateAST(arg))
}
- return map[string]interface{}{
+ return map[string]any{
"type": "call",
- "func": map[string]interface{}{
+ "func": map[string]any{
"name": n.Func.Name,
"argTypes": n.Func.ArgTypes,
"variadic": n.Func.Variadic,
@@ -76,7 +76,7 @@ func translateAST(node parser.Expr) interface{} {
}
case *parser.MatrixSelector:
vs := n.VectorSelector.(*parser.VectorSelector)
- return map[string]interface{}{
+ return map[string]any{
"type": "matrixSelector",
"name": vs.Name,
"range": n.Range.Milliseconds(),
@@ -84,9 +84,11 @@ func translateAST(node parser.Expr) interface{} {
"matchers": translateMatchers(vs.LabelMatchers),
"timestamp": vs.Timestamp,
"startOrEnd": getStartOrEnd(vs.StartOrEnd),
+ "anchored": vs.Anchored,
+ "smoothed": vs.Smoothed,
}
case *parser.SubqueryExpr:
- return map[string]interface{}{
+ return map[string]any{
"type": "subquery",
"expr": translateAST(n.Expr),
"range": n.Range.Milliseconds(),
@@ -101,29 +103,31 @@ func translateAST(node parser.Expr) interface{} {
"val": strconv.FormatFloat(n.Val, 'f', -1, 64),
}
case *parser.ParenExpr:
- return map[string]interface{}{
+ return map[string]any{
"type": "parenExpr",
"expr": translateAST(n.Expr),
}
case *parser.StringLiteral:
- return map[string]interface{}{
+ return map[string]any{
"type": "stringLiteral",
"val": n.Val,
}
case *parser.UnaryExpr:
- return map[string]interface{}{
+ return map[string]any{
"type": "unaryExpr",
"op": n.Op.String(),
"expr": translateAST(n.Expr),
}
case *parser.VectorSelector:
- return map[string]interface{}{
+ return map[string]any{
"type": "vectorSelector",
"name": n.Name,
"offset": n.OriginalOffset.Milliseconds(),
"matchers": translateMatchers(n.LabelMatchers),
"timestamp": n.Timestamp,
"startOrEnd": getStartOrEnd(n.StartOrEnd),
+ "anchored": n.Anchored,
+ "smoothed": n.Smoothed,
}
}
panic("unsupported node type")
@@ -136,10 +140,10 @@ func sanitizeList(l []string) []string {
return l
}
-func translateMatchers(in []*labels.Matcher) interface{} {
- out := []map[string]interface{}{}
+func translateMatchers(in []*labels.Matcher) any {
+ out := []map[string]any{}
for _, m := range in {
- out = append(out, map[string]interface{}{
+ out = append(out, map[string]any{
"name": m.Name,
"value": m.Value,
"type": m.Type.String(),
@@ -148,7 +152,7 @@ func translateMatchers(in []*labels.Matcher) interface{} {
return out
}
-func getStartOrEnd(startOrEnd parser.ItemType) interface{} {
+func getStartOrEnd(startOrEnd parser.ItemType) any {
if startOrEnd == 0 {
return nil
}
diff --git a/vendor/github.com/prometheus/sigv4/.golangci.yml b/vendor/github.com/prometheus/sigv4/.golangci.yml
index 255980fa81b..070f76e9107 100644
--- a/vendor/github.com/prometheus/sigv4/.golangci.yml
+++ b/vendor/github.com/prometheus/sigv4/.golangci.yml
@@ -2,41 +2,31 @@ version: "2"
linters:
enable:
- errorlint
+ - gocritic
- misspell
- revive
- testifylint
settings:
+ gocritic:
+ enable-all: true
+ disabled-checks:
+ - whyNoLint
revive:
rules:
- name: unused-parameter
severity: warning
disabled: true
testifylint:
- enable:
- - bool-compare
- - compares
- - empty
- - error-is-as
- - error-nil
- - expected-actual
- - len
- - require-error
- - suite-dont-use-pkg
- - suite-extra-assert-call
+ enable-all: true
disable:
- float-compare
- go-require
exclusions:
- generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
- paths:
- - third_party$
- - builtin$
- - examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
@@ -48,9 +38,4 @@ formatters:
goimports:
local-prefixes:
- github.com/prometheus/sigv4
- exclusions:
- generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
+
diff --git a/vendor/github.com/prometheus/sigv4/sigv4.go b/vendor/github.com/prometheus/sigv4/sigv4.go
index 8ad1a2cbe37..e225ab5ffbf 100644
--- a/vendor/github.com/prometheus/sigv4/sigv4.go
+++ b/vendor/github.com/prometheus/sigv4/sigv4.go
@@ -119,7 +119,7 @@ func NewSigV4RoundTripper(cfg *SigV4Config, next http.RoundTripper) (http.RoundT
return rt, nil
}
-func (rt *sigV4RoundTripper) newBuf() interface{} {
+func (rt *sigV4RoundTripper) newBuf() any {
return bytes.NewBuffer(make([]byte, 0, 1024))
}
diff --git a/vendor/github.com/prometheus/sigv4/sigv4_config.go b/vendor/github.com/prometheus/sigv4/sigv4_config.go
index d4b88f6d236..f756151d61d 100644
--- a/vendor/github.com/prometheus/sigv4/sigv4_config.go
+++ b/vendor/github.com/prometheus/sigv4/sigv4_config.go
@@ -39,7 +39,7 @@ func (c *SigV4Config) Validate() error {
return nil
}
-func (c *SigV4Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *SigV4Config) UnmarshalYAML(unmarshal func(any) error) error {
type plain SigV4Config
*c = SigV4Config{}
if err := unmarshal((*plain)(c)); err != nil {
diff --git a/vendor/github.com/thanos-io/promql-engine/compute/aggregators.go b/vendor/github.com/thanos-io/promql-engine/compute/aggregators.go
index 31c56527811..a7c1efc45bd 100644
--- a/vendor/github.com/thanos-io/promql-engine/compute/aggregators.go
+++ b/vendor/github.com/thanos-io/promql-engine/compute/aggregators.go
@@ -21,19 +21,30 @@ const (
MixedTypeValue
)
+// CounterResetState tracks which counter reset hints have been seen during aggregation.
+// Used to detect collisions between CounterReset and NotCounterReset hints.
+type CounterResetState uint8
+
+const (
+ SeenCounterReset CounterResetState = 1 << iota // histogram with CounterReset hint was seen
+ SeenNotCounterReset // histogram with NotCounterReset hint was seen
+)
+
+// HasCollision returns true if both CounterReset and NotCounterReset hints were seen.
+func (s CounterResetState) HasCollision() bool {
+ return s&SeenCounterReset != 0 && s&SeenNotCounterReset != 0
+}
+
// Accumulators map prometheus behavior for aggregations, either operators or
// "[...]_over_time" functions. The caller is responsible to add all errors
// returned by Add as annotations.
-// Accumulators might ignore histograms (for example min or max), if they do
-// the caller can check the HasIgnoredHistograms method and add appropriate
-// annotations.
-// The ValueType function can be checked to see if the aggregator encountered
-// mixed values for its slot so the caller can again add the appropriate annotations.
+// The Warnings method returns a bitset of warning conditions that occurred
+// during accumulation (e.g., ignored histograms, mixed types).
type Accumulator interface {
Add(v float64, h *histogram.FloatHistogram) error
Value() (float64, *histogram.FloatHistogram)
ValueType() ValueType
- HasIgnoredHistograms() bool
+ Warnings() warnings.Warnings
Reset(float64)
}
@@ -42,16 +53,18 @@ type VectorAccumulator interface {
AddVector(vs []float64, hs []*histogram.FloatHistogram) error
Value() (float64, *histogram.FloatHistogram)
ValueType() ValueType
- HasIgnoredHistograms() bool
+ Warnings() warnings.Warnings
Reset(float64)
}
type SumAcc struct {
- value float64
- compensation float64
- histSum *histogram.FloatHistogram
- hasFloatVal bool
- hasError bool // histogram error occurred; accumulator becomes no-op
+ value float64
+ compensation float64
+ histSum *histogram.FloatHistogram
+ hasFloatVal bool
+ hasError bool // histogram error occurred; accumulator becomes no-op
+ warn warnings.Warnings
+ counterResetState CounterResetState
}
func NewSumAcc() *SumAcc {
@@ -68,8 +81,22 @@ func (s *SumAcc) AddVector(float64s []float64, histograms []*histogram.FloatHist
}
if len(histograms) > 0 {
- var err error
- s.histSum, err = histogramSum(s.histSum, histograms)
+ // Track counter reset hints for collision detection.
+ for _, h := range histograms {
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ s.counterResetState |= SeenCounterReset
+ case histogram.NotCounterReset:
+ s.counterResetState |= SeenNotCounterReset
+ }
+ }
+
+ var (
+ err error
+ warn warnings.Warnings
+ )
+ s.histSum, warn, err = histogramSum(s.histSum, histograms)
+ s.warn |= warn
if err != nil {
s.hasError = true
return err
@@ -91,21 +118,35 @@ func (s *SumAcc) Add(v float64, h *histogram.FloatHistogram) error {
}
func (s *SumAcc) addHistogram(h *histogram.FloatHistogram) error {
+ // Track counter reset hints for collision detection.
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ s.counterResetState |= SeenCounterReset
+ case histogram.NotCounterReset:
+ s.counterResetState |= SeenNotCounterReset
+ }
+
if s.histSum == nil {
s.histSum = h.Copy()
return nil
}
// The histogram being added must have an equal or larger schema.
// https://github.com/prometheus/prometheus/blob/57bcbf18880f7554ae34c5b341d52fc53f059a97/promql/engine.go#L2448-L2456
- var err error
+ var (
+ err error
+ nhcbBoundsReconciled bool
+ )
if h.Schema >= s.histSum.Schema {
- s.histSum, err = s.histSum.Add(h)
+ s.histSum, _, nhcbBoundsReconciled, err = s.histSum.Add(h)
} else {
t := h.Copy()
- if s.histSum, err = t.Add(s.histSum); err == nil {
+ if s.histSum, _, nhcbBoundsReconciled, err = t.Add(s.histSum); err == nil {
s.histSum = t
}
}
+ if nhcbBoundsReconciled {
+ s.warn |= warnings.WarnNHCBBoundsReconciledAgg
+ }
if err != nil {
s.histSum = nil
s.hasError = true
@@ -131,16 +172,26 @@ func (s *SumAcc) ValueType() ValueType {
return NoValue
}
-func (s *SumAcc) HasIgnoredHistograms() bool {
- return false // Sum handles histograms; use ValueType() instead
+func (s *SumAcc) Warnings() warnings.Warnings {
+ warn := s.warn
+ if s.ValueType() == MixedTypeValue {
+ warn |= warnings.WarnMixedFloatsHistograms
+ }
+ // Detect counter reset collision: if we've seen both CounterReset and NotCounterReset hints.
+ if s.counterResetState.HasCollision() {
+ warn |= warnings.WarnCounterResetCollision
+ }
+ return warn
}
func (s *SumAcc) Reset(_ float64) {
s.histSum = nil
s.hasFloatVal = false
s.hasError = false
+ s.warn = 0
s.value = 0
s.compensation = 0
+ s.counterResetState = 0
}
func NewMaxAcc() *MaxAcc {
@@ -148,14 +199,14 @@ func NewMaxAcc() *MaxAcc {
}
type MaxAcc struct {
- value float64
- hasValue bool
- ignoredHist bool
+ value float64
+ hasValue bool
+ warn warnings.Warnings
}
func (c *MaxAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) error {
if len(hs) > 0 {
- c.ignoredHist = true
+ c.warn |= warnings.WarnHistogramIgnoredInAggregation
}
if len(vs) == 0 {
return nil
@@ -171,15 +222,15 @@ func (c *MaxAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) error {
func (c *MaxAcc) Add(v float64, h *histogram.FloatHistogram) error {
if h != nil {
- c.ignoredHist = true
+ c.warn |= warnings.WarnHistogramIgnoredInAggregation
return nil
}
c.addFloat(v)
return nil
}
-func (c *MaxAcc) HasIgnoredHistograms() bool {
- return c.ignoredHist
+func (c *MaxAcc) Warnings() warnings.Warnings {
+ return c.warn
}
func (c *MaxAcc) addFloat(v float64) {
@@ -207,7 +258,7 @@ func (c *MaxAcc) ValueType() ValueType {
func (c *MaxAcc) Reset(_ float64) {
c.hasValue = false
- c.ignoredHist = false
+ c.warn = 0
c.value = 0
}
@@ -216,14 +267,14 @@ func NewMinAcc() *MinAcc {
}
type MinAcc struct {
- value float64
- hasValue bool
- ignoredHist bool
+ value float64
+ hasValue bool
+ warn warnings.Warnings
}
func (c *MinAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) error {
if len(hs) > 0 {
- c.ignoredHist = true
+ c.warn |= warnings.WarnHistogramIgnoredInAggregation
}
if len(vs) == 0 {
return nil
@@ -239,15 +290,15 @@ func (c *MinAcc) AddVector(vs []float64, hs []*histogram.FloatHistogram) error {
func (c *MinAcc) Add(v float64, h *histogram.FloatHistogram) error {
if h != nil {
- c.ignoredHist = true
+ c.warn |= warnings.WarnHistogramIgnoredInAggregation
return nil
}
c.addFloat(v)
return nil
}
-func (c *MinAcc) HasIgnoredHistograms() bool {
- return c.ignoredHist
+func (c *MinAcc) Warnings() warnings.Warnings {
+ return c.warn
}
func (c *MinAcc) addFloat(v float64) {
@@ -275,7 +326,7 @@ func (c *MinAcc) ValueType() ValueType {
func (c *MinAcc) Reset(_ float64) {
c.hasValue = false
- c.ignoredHist = false
+ c.warn = 0
c.value = 0
}
@@ -315,8 +366,8 @@ func (c *GroupAcc) ValueType() ValueType {
}
}
-func (c *GroupAcc) HasIgnoredHistograms() bool {
- return false
+func (c *GroupAcc) Warnings() warnings.Warnings {
+ return 0
}
func (c *GroupAcc) Reset(_ float64) {
@@ -358,8 +409,8 @@ func (c *CountAcc) ValueType() ValueType {
return NoValue
}
}
-func (c *CountAcc) HasIgnoredHistograms() bool {
- return false
+func (c *CountAcc) Warnings() warnings.Warnings {
+ return 0
}
func (c *CountAcc) Reset(_ float64) {
@@ -376,10 +427,12 @@ type AvgAcc struct {
hasValue bool
hasError bool // histogram error occurred; accumulator becomes no-op
- histSum *histogram.FloatHistogram
- histScratch *histogram.FloatHistogram
- histSumScratch *histogram.FloatHistogram
- histCount float64
+ histSum *histogram.FloatHistogram
+ histScratch *histogram.FloatHistogram
+ histSumScratch *histogram.FloatHistogram
+ histCount float64
+ warn warnings.Warnings
+ counterResetState CounterResetState
}
func NewAvgAcc() *AvgAcc {
@@ -397,6 +450,14 @@ func (a *AvgAcc) Add(v float64, h *histogram.FloatHistogram) error {
}
func (a *AvgAcc) addHistogram(h *histogram.FloatHistogram) error {
+ // Track counter reset hints for collision detection.
+ switch h.CounterResetHint {
+ case histogram.CounterReset:
+ a.counterResetState |= SeenCounterReset
+ case histogram.NotCounterReset:
+ a.counterResetState |= SeenNotCounterReset
+ }
+
a.histCount++
if a.histSum == nil {
a.histSum = h.Copy()
@@ -405,13 +466,24 @@ func (a *AvgAcc) addHistogram(h *histogram.FloatHistogram) error {
return nil
}
+ var (
+ err error
+ nhcbBoundsReconciled bool
+ )
h.CopyTo(a.histScratch)
left := a.histScratch.Div(a.histCount)
a.histSum.CopyTo(a.histSumScratch)
right := a.histSumScratch.Div(a.histCount)
- toAdd, err := left.Sub(right)
+ toAdd, _, nhcbBoundsReconciled, err := left.Sub(right)
+ if nhcbBoundsReconciled {
+ a.warn |= warnings.WarnNHCBBoundsReconciledAgg
+ }
if err == nil {
- a.histSum, err = a.histSum.Add(toAdd)
+ var nbr bool
+ a.histSum, _, nbr, err = a.histSum.Add(toAdd)
+ if nbr {
+ a.warn |= warnings.WarnNHCBBoundsReconciledAgg
+ }
}
if err != nil {
a.histSum = nil
@@ -518,8 +590,16 @@ func (a *AvgAcc) ValueType() ValueType {
return NoValue
}
-func (a *AvgAcc) HasIgnoredHistograms() bool {
- return false // Avg handles histograms; use ValueType() instead
+func (a *AvgAcc) Warnings() warnings.Warnings {
+ warn := a.warn
+ if a.ValueType() == MixedTypeValue {
+ warn |= warnings.WarnMixedFloatsHistograms
+ }
+ // Detect counter reset collision: if we've seen both CounterReset and NotCounterReset hints.
+ if a.counterResetState.HasCollision() {
+ warn |= warnings.WarnCounterResetCollision
+ }
+ return warn
}
func (a *AvgAcc) Reset(_ float64) {
@@ -532,17 +612,19 @@ func (a *AvgAcc) Reset(_ float64) {
a.histCount = 0
a.histSum = nil
+ a.warn = 0
+ a.counterResetState = 0
}
type statAcc struct {
- count float64
- mean float64
- cMean float64
- value float64
- cValue float64
- hasValue bool
- hasNaN bool
- ignoredHist bool
+ count float64
+ mean float64
+ cMean float64
+ value float64
+ cValue float64
+ hasValue bool
+ hasNaN bool
+ warn warnings.Warnings
}
func (s *statAcc) ValueType() ValueType {
@@ -552,14 +634,14 @@ func (s *statAcc) ValueType() ValueType {
return NoValue
}
-func (s *statAcc) HasIgnoredHistograms() bool {
- return s.ignoredHist
+func (s *statAcc) Warnings() warnings.Warnings {
+ return s.warn
}
func (s *statAcc) Reset(_ float64) {
s.hasValue = false
s.hasNaN = false
- s.ignoredHist = false
+ s.warn = 0
s.count = 0
s.mean = 0
s.cMean = 0
@@ -596,7 +678,7 @@ func NewStdDevAcc() *StdDevAcc {
func (s *StdDevAcc) Add(v float64, h *histogram.FloatHistogram) error {
if h != nil {
- s.ignoredHist = true
+ s.warn |= warnings.WarnHistogramIgnoredInAggregation
return nil
}
s.add(v)
@@ -617,7 +699,7 @@ func NewStdVarAcc() *StdVarAcc {
func (s *StdVarAcc) Add(v float64, h *histogram.FloatHistogram) error {
if h != nil {
- s.ignoredHist = true
+ s.warn |= warnings.WarnHistogramIgnoredInAggregation
return nil
}
s.add(v)
@@ -629,10 +711,10 @@ func (s *StdVarAcc) Value() (float64, *histogram.FloatHistogram) {
}
type QuantileAcc struct {
- arg float64
- points []float64
- hasValue bool
- ignoredHist bool
+ arg float64
+ points []float64
+ hasValue bool
+ warn warnings.Warnings
}
func NewQuantileAcc() Accumulator {
@@ -641,7 +723,7 @@ func NewQuantileAcc() Accumulator {
func (q *QuantileAcc) Add(v float64, h *histogram.FloatHistogram) error {
if h != nil {
- q.ignoredHist = true
+ q.warn |= warnings.WarnHistogramIgnoredInAggregation
return nil
}
@@ -662,13 +744,13 @@ func (q *QuantileAcc) ValueType() ValueType {
}
}
-func (q *QuantileAcc) HasIgnoredHistograms() bool {
- return q.ignoredHist
+func (q *QuantileAcc) Warnings() warnings.Warnings {
+ return q.warn
}
func (q *QuantileAcc) Reset(f float64) {
q.hasValue = false
- q.ignoredHist = false
+ q.warn = 0
q.arg = f
q.points = q.points[:0]
}
@@ -694,12 +776,12 @@ func (acc *HistogramAvgAcc) Add(v float64, h *histogram.FloatHistogram) error {
}
var err error
if h.Schema >= acc.sum.Schema {
- if acc.sum, err = acc.sum.Add(h); err != nil {
+ if acc.sum, _, _, err = acc.sum.Add(h); err != nil {
return err
}
} else {
t := h.Copy()
- if _, err = t.Add(acc.sum); err != nil {
+ if _, _, _, err = t.Add(acc.sum); err != nil {
return err
}
acc.sum = t
@@ -719,8 +801,8 @@ func (acc *HistogramAvgAcc) ValueType() ValueType {
return NoValue
}
-func (acc *HistogramAvgAcc) HasIgnoredHistograms() bool {
- return false // HistogramAvg handles histograms; use ValueType() instead
+func (acc *HistogramAvgAcc) Warnings() warnings.Warnings {
+ return 0
}
func (acc *HistogramAvgAcc) Reset(f float64) {
@@ -768,8 +850,8 @@ func (l *LastAcc) ValueType() ValueType {
return NoValue
}
-func (l *LastAcc) HasIgnoredHistograms() bool {
- return false // Last handles histograms; use ValueType() instead
+func (l *LastAcc) Warnings() warnings.Warnings {
+ return 0
}
func (l *LastAcc) Reset(_ float64) {
@@ -818,12 +900,12 @@ func Quantile(q float64, points []float64) float64 {
return points[int(lowerIndex)]*(1-weight) + points[int(upperIndex)]*weight
}
-func histogramSum(current *histogram.FloatHistogram, histograms []*histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
+func histogramSum(current *histogram.FloatHistogram, histograms []*histogram.FloatHistogram) (*histogram.FloatHistogram, warnings.Warnings, error) {
if len(histograms) == 0 {
- return current, nil
+ return current, 0, nil
}
if current == nil && len(histograms) == 1 {
- return histograms[0].Copy(), nil
+ return histograms[0].Copy(), 0, nil
}
var histSum *histogram.FloatHistogram
if current != nil {
@@ -833,19 +915,26 @@ func histogramSum(current *histogram.FloatHistogram, histograms []*histogram.Flo
histograms = histograms[1:]
}
- var err error
+ var (
+ err error
+ warn warnings.Warnings
+ nhcbBoundsReconciled bool
+ )
for i := range histograms {
if histograms[i].Schema >= histSum.Schema {
- histSum, err = histSum.Add(histograms[i])
+ histSum, _, nhcbBoundsReconciled, err = histSum.Add(histograms[i])
} else {
t := histograms[i].Copy()
- histSum, err = t.Add(histSum)
+ histSum, _, nhcbBoundsReconciled, err = t.Add(histSum)
+ }
+ if nhcbBoundsReconciled {
+ warn |= warnings.WarnNHCBBoundsReconciledAgg
}
if err != nil {
- return nil, warnings.ConvertHistogramError(err)
+ return nil, warn, warnings.ConvertHistogramError(err)
}
}
- return histSum, nil
+ return histSum, warn, nil
}
// compensatedSum returns the sum of the elements of the slice calculated with greater
diff --git a/vendor/github.com/thanos-io/promql-engine/engine/engine.go b/vendor/github.com/thanos-io/promql-engine/engine/engine.go
index 3a4c47b7dd1..6e92ca37c90 100644
--- a/vendor/github.com/thanos-io/promql-engine/engine/engine.go
+++ b/vendor/github.com/thanos-io/promql-engine/engine/engine.go
@@ -548,53 +548,54 @@ func (q *compatibilityQuery) Exec(ctx context.Context) (ret *promql.Result) {
return newErrResult(ret, err)
}
+ totalSteps := q.opts.TotalSteps()
series := make([]promql.Series, len(resultSeries))
for i, s := range resultSeries {
series[i].Metric = s
}
- totalSteps := q.opts.TotalSteps()
+
+ buf := make([]model.StepVector, q.opts.StepsBatch)
loop:
for {
select {
case <-ctx.Done():
return newErrResult(ret, ctx.Err())
default:
- r, err := q.Query.exec.Next(ctx)
+ n, err := q.Query.exec.Next(ctx, buf)
if err != nil {
return newErrResult(ret, err)
}
- if r == nil {
+ if n == 0 {
break loop
}
// Case where Series call might return nil, but samples are present.
// For example scalar(http_request_total) where http_request_total has multiple values.
- if len(series) == 0 && len(r) != 0 {
- series = make([]promql.Series, len(r[0].Samples))
+ if len(series) == 0 && n > 0 {
+ series = make([]promql.Series, len(buf[0].Samples))
}
- for _, vector := range r {
- for i, s := range vector.SampleIDs {
- if len(series[s].Floats) == 0 {
+ for i := range n {
+ vector := &buf[i]
+ for j, s := range vector.SampleIDs {
+ if series[s].Floats == nil {
series[s].Floats = make([]promql.FPoint, 0, totalSteps)
}
series[s].Floats = append(series[s].Floats, promql.FPoint{
T: vector.T,
- F: vector.Samples[i],
+ F: vector.Samples[j],
})
}
- for i, s := range vector.HistogramIDs {
- if len(series[s].Histograms) == 0 {
+ for j, s := range vector.HistogramIDs {
+ if series[s].Histograms == nil {
series[s].Histograms = make([]promql.HPoint, 0, totalSteps)
}
series[s].Histograms = append(series[s].Histograms, promql.HPoint{
T: vector.T,
- H: vector.Histograms[i],
+ H: vector.Histograms[j],
})
}
- q.Query.exec.GetPool().PutStepVector(vector)
}
- q.Query.exec.GetPool().PutVectors(r)
}
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/count_values.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/count_values.go
index 7d527952c70..f218456acc5 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/count_values.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/count_values.go
@@ -20,7 +20,6 @@ import (
)
type countValuesOperator struct {
- pool *model.VectorPool
next model.VectorOperator
param string
@@ -34,16 +33,16 @@ type countValuesOperator struct {
counts []map[int]int
series []labels.Labels
- once sync.Once
+ once sync.Once
+ tempBuf []model.StepVector
}
-func NewCountValues(pool *model.VectorPool, next model.VectorOperator, param string, by bool, grouping []string, opts *query.Options) model.VectorOperator {
+func NewCountValues(next model.VectorOperator, param string, by bool, grouping []string, opts *query.Options) model.VectorOperator {
// Grouping labels need to be sorted in order for metric hashing to work.
// https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181
slices.Sort(grouping)
op := &countValuesOperator{
- pool: pool,
next: next,
param: param,
stepsBatch: opts.StepsBatch,
@@ -57,10 +56,6 @@ func (c *countValuesOperator) Explain() []model.VectorOperator {
return []model.VectorOperator{c.next}
}
-func (c *countValuesOperator) GetPool() *model.VectorPool {
- return c.pool
-}
-
func (c *countValuesOperator) String() string {
if c.by {
return fmt.Sprintf("[countValues] by (%v) - param (%v)", c.grouping, c.param)
@@ -74,36 +69,38 @@ func (c *countValuesOperator) Series(ctx context.Context) ([]labels.Labels, erro
return c.series, err
}
-func (c *countValuesOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (c *countValuesOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
var err error
c.once.Do(func() { err = c.initSeriesOnce(ctx) })
if err != nil {
- return nil, err
+ return 0, err
}
if c.curStep >= len(c.ts) {
- return nil, nil
+ return 0, nil
}
- batch := c.pool.GetVectorBatch()
- for range c.stepsBatch {
+ n := 0
+ maxSteps := min(c.stepsBatch, len(buf))
+
+ for range maxSteps {
if c.curStep >= len(c.ts) {
break
}
- sv := c.pool.GetStepVector(c.ts[c.curStep])
- for i, v := range c.counts[c.curStep] {
- sv.AppendSample(c.pool, uint64(i), float64(v))
+ buf[n] = model.StepVector{T: c.ts[c.curStep]}
+ for id, v := range c.counts[c.curStep] {
+ buf[n].AppendSample(uint64(id), float64(v))
}
- batch = append(batch, sv)
c.curStep++
+ n++
}
- return batch, nil
+ return n, nil
}
func (c *countValuesOperator) initSeriesOnce(ctx context.Context) error {
@@ -115,6 +112,11 @@ func (c *countValuesOperator) initSeriesOnce(ctx context.Context) error {
if err != nil {
return err
}
+
+ // Allocate outer slice for buffer; inner slices will be allocated by child operators
+ // or grow on demand.
+ c.tempBuf = make([]model.StepVector, c.stepsBatch)
+
var (
inputIdToHashBucket = make(map[int]uint64)
hashToBucketLabels = make(map[uint64]labels.Labels)
@@ -147,13 +149,14 @@ func (c *countValuesOperator) initSeriesOnce(ctx context.Context) error {
default:
}
- in, err := c.next.Next(ctx)
+ n, err := c.next.Next(ctx, c.tempBuf)
if err != nil {
return err
}
- if in == nil {
+ if n == 0 {
break
}
+ in := c.tempBuf[:n]
for i := range in {
ts = append(ts, in[i].T)
countPerHashbucket := make(map[uint64]map[string]int, len(inputIdToHashBucket))
@@ -195,7 +198,6 @@ func (c *countValuesOperator) initSeriesOnce(ctx context.Context) error {
}
counts = append(counts, countsPerOutputId)
}
- c.next.GetPool().PutVectors(in)
}
c.ts = ts
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go
index 5d2477bef9f..05af86ddb98 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/hashaggregate.go
@@ -24,26 +24,26 @@ import (
)
type aggregate struct {
- next model.VectorOperator
- paramOp model.VectorOperator
- // params holds the aggregate parameter for each step.
- params []float64
- lastBatch []model.StepVector
-
- vectorPool *model.VectorPool
-
+ next model.VectorOperator
+ paramOp model.VectorOperator
by bool
labels []string
aggregation parser.ItemType
-
- once sync.Once
- tables []aggregateTable
- series []labels.Labels
- stepsBatch int
+ stepsBatch int
+
+ once sync.Once
+ series []labels.Labels
+ tables []aggregateTable
+ params []float64
+
+ lastBatch []model.StepVector
+ tempBuf []model.StepVector
+ paramBuf []model.StepVector
+ lastBatchBuf []model.StepVector
+ inputSeriesCount int
}
func NewHashAggregate(
- points *model.VectorPool,
next model.VectorOperator,
paramOp model.VectorOperator,
aggregation parser.ItemType,
@@ -60,15 +60,13 @@ func NewHashAggregate(
// https://github.com/prometheus/prometheus/blob/8ed39fdab1ead382a354e45ded999eb3610f8d5f/model/labels/labels.go#L162-L181
slices.Sort(labels)
a := &aggregate{
-
next: next,
paramOp: paramOp,
- params: make([]float64, opts.StepsBatch),
- vectorPool: points,
by: by,
- aggregation: aggregation,
labels: labels,
+ aggregation: aggregation,
stepsBatch: opts.StepsBatch,
+ params: make([]float64, opts.StepsBatch),
}
return telemetry.NewOperator(telemetry.NewTelemetry(a, opts), a), nil
@@ -99,83 +97,83 @@ func (a *aggregate) Series(ctx context.Context) ([]labels.Labels, error) {
return a.series, nil
}
-func (a *aggregate) GetPool() *model.VectorPool {
- return a.vectorPool
-}
-
-func (a *aggregate) Next(ctx context.Context) ([]model.StepVector, error) {
+func (a *aggregate) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
var err error
a.once.Do(func() { err = a.initializeTables(ctx) })
if err != nil {
- return nil, err
+ return 0, err
}
if a.paramOp != nil {
- args, err := a.paramOp.Next(ctx)
+ n, err := a.paramOp.Next(ctx, a.paramBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- for i := range args {
- a.params[i] = args[i].Samples[0]
+ for i := range n {
+ a.params[i] = a.paramBuf[i].Samples[0]
if sample := a.params[i]; math.IsNaN(sample) || sample < 0 || sample > 1 {
warnings.AddToContext(annotations.NewInvalidQuantileWarning(sample, posrange.PositionRange{}), ctx)
}
- a.paramOp.GetPool().PutStepVector(args[i])
}
- a.paramOp.GetPool().PutVectors(args)
}
for i, p := range a.params {
a.tables[i].reset(p)
}
+
+ // Track how many tables are populated during aggregation.
+ numTables := 0
if a.lastBatch != nil {
- a.aggregate(ctx, a.lastBatch)
+ numTables = len(a.lastBatch)
+ if warn := a.aggregate(a.lastBatch); warn != nil {
+ warnings.AddToContext(warn, ctx)
+ }
a.lastBatch = nil
}
+
for {
- next, err := a.next.Next(ctx)
+ n, err := a.next.Next(ctx, a.tempBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- if next == nil {
+ if n == 0 {
break
}
+ next := a.tempBuf[:n]
// Keep aggregating samples as long as timestamps of batches are equal.
currentTs := a.tables[0].timestamp()
if currentTs == math.MinInt64 || next[0].T == currentTs {
- a.aggregate(ctx, next)
+ numTables = n
+ if warn := a.aggregate(next); warn != nil {
+ warnings.AddToContext(warn, ctx)
+ }
continue
}
- a.lastBatch = next
+ a.lastBatch = a.lastBatchBuf[:n]
+ copy(a.lastBatch, next)
break
}
- if a.tables[0].timestamp() == math.MinInt64 {
- return nil, nil
- }
-
- result := a.vectorPool.GetVectorBatch()
- for i := range a.tables {
- if a.tables[i].timestamp() == math.MinInt64 {
- break
- }
- result = append(result, a.tables[i].toVector(ctx, a.vectorPool))
+ n := min(numTables, len(buf))
+ for i := range n {
+ buf[i].Reset(a.tables[i].timestamp())
+ a.tables[i].populateVector(ctx, &buf[i])
}
- return result, nil
+ return n, nil
}
-func (a *aggregate) aggregate(ctx context.Context, in []model.StepVector) {
+func (a *aggregate) aggregate(in []model.StepVector) error {
+ var err error
for i, vector := range in {
- a.tables[i].aggregate(ctx, vector)
- a.next.GetPool().PutStepVector(vector)
+ err = warnings.Coalesce(err, a.tables[i].aggregate(vector))
}
- a.next.GetPool().PutVectors(in)
+ return err
}
func (a *aggregate) initializeTables(ctx context.Context) error {
@@ -195,16 +193,25 @@ func (a *aggregate) initializeTables(ctx context.Context) error {
}
a.tables = tables
a.series = series
- a.vectorPool.SetStepSize(len(a.series))
+
+ // Allocate outer slice for buffers; inner slices will be allocated by child operators
+ // or grow on demand. This avoids over-allocation when aggregating many series to few.
+ a.tempBuf = make([]model.StepVector, a.stepsBatch)
+ a.lastBatchBuf = make([]model.StepVector, a.stepsBatch)
+ if a.paramOp != nil {
+ a.paramBuf = make([]model.StepVector, len(a.params))
+ }
return nil
}
func (a *aggregate) initializeVectorizedTables(ctx context.Context) ([]aggregateTable, []labels.Labels, error) {
// perform initialization of the underlying operator even if we are aggregating the labels away
- if _, err := a.next.Series(ctx); err != nil {
+ series, err := a.next.Series(ctx)
+ if err != nil {
return nil, nil, err
}
+ a.inputSeriesCount = len(series)
tables, err := newVectorizedTables(a.stepsBatch, a.aggregation)
if errors.Is(err, parse.ErrNotSupportedExpr) {
return a.initializeScalarTables(ctx)
@@ -222,6 +229,7 @@ func (a *aggregate) initializeScalarTables(ctx context.Context) ([]aggregateTabl
if err != nil {
return nil, nil, err
}
+ a.inputSeriesCount = len(series)
var (
// inputCache is an index from input seriesID to output seriesID.
inputCache = make([]uint64, len(series))
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go
index 4a2ea55d776..e1c825a08fe 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/khashaggregate.go
@@ -26,26 +26,25 @@ import (
)
type kAggregate struct {
- next model.VectorOperator
- paramOp model.VectorOperator
- // params holds the aggregate parameter for each step.
- params []float64
-
- vectorPool *model.VectorPool
-
+ next model.VectorOperator
+ paramOp model.VectorOperator
by bool
labels []string
aggregation parser.ItemType
+ stepsBatch int
+ compare func(float64, float64) bool
once sync.Once
series []labels.Labels
inputToHeap []*samplesHeap
heaps []*samplesHeap
- compare func(float64, float64) bool
+ params []float64
+
+ tempBuf []model.StepVector
+ paramBuf []model.StepVector
}
func NewKHashAggregate(
- points *model.VectorPool,
next model.VectorOperator,
paramOp model.VectorOperator,
aggregation parser.ItemType,
@@ -72,54 +71,63 @@ func NewKHashAggregate(
op := &kAggregate{
next: next,
- vectorPool: points,
by: by,
aggregation: aggregation,
labels: labels,
paramOp: paramOp,
compare: compare,
params: make([]float64, opts.StepsBatch),
+ stepsBatch: opts.StepsBatch,
}
return telemetry.NewOperator(telemetry.NewTelemetry(op, opts), op), nil
}
-func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) {
+func (a *kAggregate) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
- in, err := a.next.Next(ctx)
+ var err error
+ a.once.Do(func() { err = a.init(ctx) })
+ if err != nil {
+ return 0, err
+ }
+
+ nIn, err := a.next.Next(ctx, a.tempBuf)
if err != nil {
- return nil, err
+ return 0, err
}
+ if nIn == 0 {
+ return 0, nil
+ }
+ in := a.tempBuf[:nIn]
- args, err := a.paramOp.Next(ctx)
+ nParam, err := a.paramOp.Next(ctx, a.paramBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- for i := range args {
- a.params[i] = args[i].Samples[0]
- a.paramOp.GetPool().PutStepVector(args[i])
+ for i := range nParam {
+ a.params[i] = a.paramBuf[i].Samples[0]
val := a.params[i]
switch a.aggregation {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
if math.IsNaN(val) {
- return nil, errors.New("Parameter value is NaN")
+ return 0, errors.New("Parameter value is NaN")
}
if val > math.MaxInt64 {
- return nil, errors.Newf("Scalar value %v overflows int64", val)
+ return 0, errors.Newf("Scalar value %v overflows int64", val)
}
if val < math.MinInt64 {
- return nil, errors.Newf("Scalar value %v underflows int64", val)
+ return 0, errors.Newf("Scalar value %v underflows int64", val)
}
case parser.LIMIT_RATIO:
if math.IsNaN(val) {
- return nil, errors.Newf("Ratio value is NaN")
+ return 0, errors.Newf("Ratio value is NaN")
}
switch {
case val < -1.0:
@@ -132,22 +140,14 @@ func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) {
a.params[i] = val
}
}
- a.paramOp.GetPool().PutVectors(args)
- if in == nil {
- return nil, nil
- }
-
- a.once.Do(func() { err = a.init(ctx) })
- if err != nil {
- return nil, err
- }
-
- result := a.vectorPool.GetVectorBatch()
- for i, vector := range in {
+ n := 0
+ for i := 0; i < nIn && n < len(buf); i++ {
+ vector := in[i]
// Skip steps where the argument is less than or equal to 0, limit_ratio is an exception.
if (a.aggregation != parser.LIMIT_RATIO && int(a.params[i]) <= 0) || (a.aggregation == parser.LIMIT_RATIO && a.params[i] == 0) {
- result = append(result, a.GetPool().GetStepVector(vector.T))
+ buf[n] = model.StepVector{T: vector.T}
+ n++
continue
}
if a.aggregation != parser.LIMITK && a.aggregation != parser.LIMIT_RATIO && len(vector.Histograms) > 0 {
@@ -163,12 +163,12 @@ func (a *kAggregate) Next(ctx context.Context) ([]model.StepVector, error) {
k = int(a.params[i])
}
- a.aggregate(vector.T, &result, k, ratio, vector.SampleIDs, vector.Samples, vector.HistogramIDs, vector.Histograms)
- a.next.GetPool().PutStepVector(vector)
+ buf[n].Reset(vector.T)
+ a.aggregate(&buf[n], k, ratio, vector.SampleIDs, vector.Samples, vector.HistogramIDs, vector.Histograms)
+ n++
}
- a.next.GetPool().PutVectors(in)
- return result, nil
+ return n, nil
}
func (a *kAggregate) Series(ctx context.Context) ([]labels.Labels, error) {
@@ -180,10 +180,6 @@ func (a *kAggregate) Series(ctx context.Context) ([]labels.Labels, error) {
return a.series, nil
}
-func (a *kAggregate) GetPool() *model.VectorPool {
- return a.vectorPool
-}
-
func (a *kAggregate) String() string {
if a.by {
return fmt.Sprintf("[kaggregate] %v by (%v)", a.aggregation.String(), a.labels)
@@ -222,8 +218,13 @@ func (a *kAggregate) init(ctx context.Context) error {
}
a.inputToHeap = append(a.inputToHeap, h)
}
- a.vectorPool.SetStepSize(len(series))
a.series = series
+
+ // Allocate outer slice for buffers; inner slices will be allocated by child operators
+ // or grow on demand. This avoids over-allocation when aggregating many series to few.
+ a.tempBuf = make([]model.StepVector, a.stepsBatch)
+ a.paramBuf = make([]model.StepVector, a.stepsBatch)
+
return nil
}
@@ -232,7 +233,7 @@ func (a *kAggregate) init(ctx context.Context) error {
// bottomk: gives the 'k' smallest element based on the sample values
// limitk: samples the first 'k' element from the given timeseries (has native histogram support)
// limit_ratio: deterministically samples out the 'ratio' amount of the samples from the given timeseries (also has native histogram support).
-func (a *kAggregate) aggregate(t int64, result *[]model.StepVector, k int, ratio float64, sampleIDs []uint64, samples []float64, histogramIDs []uint64, histograms []*histogram.FloatHistogram) {
+func (a *kAggregate) aggregate(out *model.StepVector, k int, ratio float64, sampleIDs []uint64, samples []float64, histogramIDs []uint64, histograms []*histogram.FloatHistogram) {
groupsRemaining := len(a.heaps)
switch a.aggregation {
@@ -332,16 +333,24 @@ func (a *kAggregate) aggregate(t int64, result *[]model.StepVector, k int, ratio
}
}
- s := a.vectorPool.GetStepVector(t)
+ // Add results from all heaps to the output step vector.
+ inputSize := len(sampleIDs) + len(histogramIDs)
+ hint := inputSize
+ if k > 0 && k*len(a.heaps) < inputSize {
+ hint = k * len(a.heaps)
+ } else if ratio != 0 {
+ estimated := int(float64(inputSize) * math.Abs(ratio))
+ if estimated < hint {
+ hint = estimated
+ }
+ }
for _, sampleHeap := range a.heaps {
// for topk and bottomk the heap keeps the lowest value on top, so reverse it.
if a.aggregation == parser.TOPK || a.aggregation == parser.BOTTOMK {
sort.Sort(sort.Reverse(sampleHeap))
}
- sampleHeap.addSamplesToPool(a.vectorPool, &s)
+ sampleHeap.addSamplesToPool(out, hint)
}
-
- *result = append(*result, s)
}
type entry struct {
@@ -360,12 +369,12 @@ func (s samplesHeap) Len() int {
return len(s.entries)
}
-func (s *samplesHeap) addSamplesToPool(pool *model.VectorPool, stepVector *model.StepVector) {
+func (s *samplesHeap) addSamplesToPool(stepVector *model.StepVector, hint int) {
for _, e := range s.entries {
if e.histogramSample == nil {
- stepVector.AppendSample(pool, e.sId, e.total)
+ stepVector.AppendSampleWithSizeHint(e.sId, e.total, hint)
} else {
- stepVector.AppendHistogram(pool, e.histId, e.histogramSample)
+ stepVector.AppendHistogramWithSizeHint(e.histId, e.histogramSample, hint)
}
}
s.entries = s.entries[:0]
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go
index 4c9845968fd..0e182e0e383 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/scalar_table.go
@@ -14,8 +14,10 @@ import (
"github.com/thanos-io/promql-engine/warnings"
"github.com/efficientgo/core/errors"
+ "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -26,10 +28,9 @@ type aggregateTable interface {
// If the table is empty, it returns math.MinInt64.
timestamp() int64
// aggregate aggregates the given vector into the table.
- aggregate(ctx context.Context, vector model.StepVector)
- // toVector writes out the accumulated result to the given vector and
- // resets the table.
- toVector(ctx context.Context, pool *model.VectorPool) model.StepVector
+ aggregate(vector model.StepVector) error
+ // populateVector writes out the accumulated result into the provided vector.
+ populateVector(ctx context.Context, vec *model.StepVector)
// reset resets the table with a new aggregation argument.
// The argument is currently used for quantile aggregation.
reset(arg float64)
@@ -75,23 +76,31 @@ func newScalarTable(inputSampleIDs []uint64, outputs []*model.Series, aggregatio
}, nil
}
-func (t *scalarTable) aggregate(ctx context.Context, vector model.StepVector) {
+func (t *scalarTable) aggregate(vector model.StepVector) error {
t.ts = vector.T
+ var err error
for i := range vector.Samples {
- outputSampleID := t.inputs[vector.SampleIDs[i]]
- output := t.outputs[outputSampleID]
- if err := t.accumulators[output.ID].Add(vector.Samples[i], nil); err != nil {
- warnings.AddToContext(err, ctx)
- }
+ err = warnings.Coalesce(err, t.addSample(vector.SampleIDs[i], vector.Samples[i]))
}
for i := range vector.Histograms {
- outputSampleID := t.inputs[vector.HistogramIDs[i]]
- output := t.outputs[outputSampleID]
- if err := t.accumulators[output.ID].Add(0, vector.Histograms[i]); err != nil {
- warnings.AddToContext(err, ctx)
- }
+ err = warnings.Coalesce(err, t.addHistogram(vector.HistogramIDs[i], vector.Histograms[i]))
}
+ return err
+}
+
+func (t *scalarTable) addSample(sampleID uint64, sample float64) error {
+ outputSampleID := t.inputs[sampleID]
+ output := t.outputs[outputSampleID]
+
+ return t.accumulators[output.ID].Add(sample, nil)
+}
+
+func (t *scalarTable) addHistogram(sampleID uint64, h *histogram.FloatHistogram) error {
+ outputSampleID := t.inputs[sampleID]
+ output := t.outputs[outputSampleID]
+
+ return t.accumulators[output.ID].Add(0, h)
}
func (t *scalarTable) reset(arg float64) {
@@ -101,28 +110,44 @@ func (t *scalarTable) reset(arg float64) {
t.ts = math.MinInt64
}
-func (t *scalarTable) toVector(ctx context.Context, pool *model.VectorPool) model.StepVector {
- result := pool.GetStepVector(t.ts)
+func (t *scalarTable) populateVector(ctx context.Context, vec *model.StepVector) {
+ hint := len(t.outputs)
for i, v := range t.outputs {
acc := t.accumulators[i]
- if acc.HasIgnoredHistograms() {
- warnings.AddToContext(annotations.HistogramIgnoredInAggregationInfo, ctx)
- }
+ emitAccumulatorWarnings(ctx, acc.Warnings())
switch acc.ValueType() {
- case compute.NoValue:
+ case compute.NoValue, compute.MixedTypeValue:
+ // MixedTypeValue: warning already emitted by emitAccumulatorWarnings
+ // for accumulators that track mixed floats/histograms.
continue
case compute.SingleTypeValue:
f, h := acc.Value()
if h == nil {
- result.AppendSample(pool, v.ID, f)
+ vec.AppendSampleWithSizeHint(v.ID, f, hint)
} else {
- result.AppendHistogram(pool, v.ID, h)
+ vec.AppendHistogramWithSizeHint(v.ID, h, hint)
}
- case compute.MixedTypeValue:
- warnings.AddToContext(warnings.MixedFloatsHistogramsAggWarning, ctx)
}
}
- return result
+}
+
+// emitAccumulatorWarnings converts accumulator warning flags to annotations and adds them to context.
+func emitAccumulatorWarnings(ctx context.Context, warn warnings.Warnings) {
+ if warn == 0 {
+ return
+ }
+ if warn&warnings.WarnHistogramIgnoredInAggregation != 0 {
+ warnings.AddToContext(annotations.HistogramIgnoredInAggregationInfo, ctx)
+ }
+ if warn&warnings.WarnMixedFloatsHistograms != 0 {
+ warnings.AddToContext(warnings.MixedFloatsHistogramsAggWarning, ctx)
+ }
+ if warn&warnings.WarnCounterResetCollision != 0 {
+ warnings.AddToContext(annotations.NewHistogramCounterResetCollisionWarning(posrange.PositionRange{}, annotations.HistogramAgg), ctx)
+ }
+ if warn&warnings.WarnNHCBBoundsReconciledAgg != 0 {
+ warnings.AddToContext(annotations.NewMismatchedCustomBucketsHistogramsInfo(posrange.PositionRange{}, annotations.HistogramAgg), ctx)
+ }
}
func hashMetric(
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/vector_table.go b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/vector_table.go
index 6d7d81c237a..d5deec49e28 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/aggregate/vector_table.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/aggregate/vector_table.go
@@ -11,11 +11,9 @@ import (
"github.com/thanos-io/promql-engine/compute"
"github.com/thanos-io/promql-engine/execution/model"
"github.com/thanos-io/promql-engine/execution/parse"
- "github.com/thanos-io/promql-engine/warnings"
"github.com/efficientgo/core/errors"
"github.com/prometheus/prometheus/promql/parser"
- "github.com/prometheus/prometheus/util/annotations"
)
type vectorTable struct {
@@ -47,32 +45,25 @@ func (t *vectorTable) timestamp() int64 {
return t.ts
}
-func (t *vectorTable) aggregate(ctx context.Context, vector model.StepVector) {
+func (t *vectorTable) aggregate(vector model.StepVector) error {
t.ts = vector.T
- if err := t.accumulator.AddVector(vector.Samples, vector.Histograms); err != nil {
- warnings.AddToContext(err, ctx)
- }
+ return t.accumulator.AddVector(vector.Samples, vector.Histograms)
}
-func (t *vectorTable) toVector(ctx context.Context, pool *model.VectorPool) model.StepVector {
- result := pool.GetStepVector(t.ts)
- if t.accumulator.HasIgnoredHistograms() {
- warnings.AddToContext(annotations.HistogramIgnoredInAggregationInfo, ctx)
- }
+func (t *vectorTable) populateVector(ctx context.Context, vec *model.StepVector) {
+ emitAccumulatorWarnings(ctx, t.accumulator.Warnings())
switch t.accumulator.ValueType() {
- case compute.NoValue:
- return result
+ case compute.NoValue, compute.MixedTypeValue:
+ // MixedTypeValue: warning already emitted by emitAccumulatorWarnings
+ return
case compute.SingleTypeValue:
v, h := t.accumulator.Value()
if h == nil {
- result.AppendSample(pool, 0, v)
+ vec.AppendSample(0, v)
} else {
- result.AppendHistogram(pool, 0, h)
+ vec.AppendHistogram(0, h)
}
- case compute.MixedTypeValue:
- warnings.AddToContext(warnings.MixedFloatsHistogramsAggWarning, ctx)
}
- return result
}
func (t *vectorTable) reset(p float64) {
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go b/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go
index 65c63cd14c0..683e414b778 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/binary/scalar.go
@@ -21,23 +21,22 @@ import (
// scalarOperator evaluates expressions where one operand is a scalarOperator.
type scalarOperator struct {
- seriesOnce sync.Once
- series []labels.Labels
-
- pool *model.VectorPool
- lhs model.VectorOperator
- rhs model.VectorOperator
- opType parser.ItemType
-
- // If true then return the comparison result as 0/1.
+ lhs model.VectorOperator
+ rhs model.VectorOperator
+ lhsType parser.ValueType
+ rhsType parser.ValueType
+ opType parser.ItemType
returnBool bool
+ stepsBatch int
+
+ once sync.Once
+ series []labels.Labels
- lhsType parser.ValueType
- rhsType parser.ValueType
+ lhsBuf []model.StepVector
+ rhsBuf []model.StepVector
}
func NewScalar(
- pool *model.VectorPool,
lhs model.VectorOperator,
rhs model.VectorOperator,
lhsType parser.ValueType,
@@ -47,13 +46,13 @@ func NewScalar(
opts *query.Options,
) (model.VectorOperator, error) {
op := &scalarOperator{
- pool: pool,
lhs: lhs,
rhs: rhs,
lhsType: lhsType,
rhsType: rhsType,
opType: opType,
returnBool: returnBool,
+ stepsBatch: opts.StepsBatch,
}
return telemetry.NewOperator(telemetry.NewTelemetry(op, opts), op), nil
@@ -65,7 +64,7 @@ func (o *scalarOperator) Explain() (next []model.VectorOperator) {
func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) {
var err error
- o.seriesOnce.Do(func() { err = o.loadSeries(ctx) })
+ o.once.Do(func() { err = o.loadSeries(ctx) })
if err != nil {
return nil, err
}
@@ -76,64 +75,55 @@ func (o *scalarOperator) String() string {
return fmt.Sprintf("[vectorScalarBinary] %s", parser.ItemTypeStr[o.opType])
}
-func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *scalarOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
var err error
- o.seriesOnce.Do(func() { err = o.loadSeries(ctx) })
+ o.once.Do(func() { err = o.loadSeries(ctx) })
if err != nil {
- return nil, err
+ return 0, err
}
- var lhs []model.StepVector
+ var lhsN int
var lerrChan = make(chan error, 1)
go func() {
var err error
- lhs, err = o.lhs.Next(ctx)
+ lhsN, err = o.lhs.Next(ctx, o.lhsBuf)
if err != nil {
lerrChan <- err
}
close(lerrChan)
}()
- rhs, rerr := o.rhs.Next(ctx)
+ rhsN, rerr := o.rhs.Next(ctx, o.rhsBuf)
lerr := <-lerrChan
if rerr != nil {
- return nil, rerr
+ return 0, rerr
}
if lerr != nil {
- return nil, lerr
+ return 0, lerr
}
// TODO(fpetkovski): When one operator becomes empty,
// we might want to drain or close the other one.
// We don't have a concept of closing an operator yet.
- if len(lhs) == 0 || len(rhs) == 0 {
- return nil, nil
+ if lhsN == 0 || rhsN == 0 {
+ return 0, nil
}
- batch := o.pool.GetVectorBatch()
- for i := range lhs {
- if i < len(rhs) {
- step := o.execBinaryOperation(ctx, lhs[i], rhs[i])
- batch = append(batch, step)
- o.rhs.GetPool().PutStepVector(rhs[i])
- }
- o.lhs.GetPool().PutStepVector(lhs[i])
- }
- o.lhs.GetPool().PutVectors(lhs)
- o.rhs.GetPool().PutVectors(rhs)
+ n := 0
+ minN := min(rhsN, lhsN)
- return batch, nil
-
-}
+ for i := 0; i < minN && n < len(buf); i++ {
+ o.execBinaryOperation(ctx, o.lhsBuf[i], o.rhsBuf[i], &buf[n])
+ n++
+ }
-func (o *scalarOperator) GetPool() *model.VectorPool {
- return o.pool
+ return n, nil
}
func (o *scalarOperator) loadSeries(ctx context.Context) error {
@@ -161,12 +151,37 @@ func (o *scalarOperator) loadSeries(ctx context.Context) error {
}
o.series = series
+
+ // Pre-allocate buffers with appropriate inner slice capacities.
+ // One side is a scalar (1 sample), the other is a vector (len(vectorSeries) samples).
+ o.lhsBuf = make([]model.StepVector, o.stepsBatch)
+ o.rhsBuf = make([]model.StepVector, o.stepsBatch)
+
+ var lhsSeriesCount, rhsSeriesCount int
+ if o.lhsType == parser.ValueTypeScalar {
+ lhsSeriesCount = 1
+ rhsSeriesCount = len(vectorSeries)
+ } else {
+ lhsSeriesCount = len(vectorSeries)
+ rhsSeriesCount = 1
+ }
+
+ // Pre-allocate float sample slices; histogram slices will grow on demand.
+ for i := range o.lhsBuf {
+ o.lhsBuf[i].SampleIDs = make([]uint64, 0, lhsSeriesCount)
+ o.lhsBuf[i].Samples = make([]float64, 0, lhsSeriesCount)
+ }
+ for i := range o.rhsBuf {
+ o.rhsBuf[i].SampleIDs = make([]uint64, 0, rhsSeriesCount)
+ o.rhsBuf[i].Samples = make([]float64, 0, rhsSeriesCount)
+ }
+
return nil
}
-func (o *scalarOperator) execBinaryOperation(ctx context.Context, lhs, rhs model.StepVector) model.StepVector {
+func (o *scalarOperator) execBinaryOperation(ctx context.Context, lhs, rhs model.StepVector, step *model.StepVector) {
ts := lhs.T
- step := o.pool.GetStepVector(ts)
+ step.Reset(ts)
scalar, other := lhs, rhs
if o.lhsType != parser.ValueTypeScalar {
@@ -179,18 +194,23 @@ func (o *scalarOperator) execBinaryOperation(ctx context.Context, lhs, rhs model
keep bool
err error
)
+ var warn warnings.Warnings
+ sampleHint := len(other.Samples)
for i, otherVal := range other.Samples {
scalarVal := scalar.Samples[0]
if o.lhsType == parser.ValueTypeScalar {
- v, _, keep, err = binOp(o.opType, scalarVal, otherVal, nil, nil)
+ v, _, keep, warn, err = binOp(o.opType, scalarVal, otherVal, nil, nil)
} else {
- v, _, keep, err = binOp(o.opType, otherVal, scalarVal, nil, nil)
+ v, _, keep, warn, err = binOp(o.opType, otherVal, scalarVal, nil, nil)
}
if err != nil {
warnings.AddToContext(err, ctx)
continue
}
+ if warn != 0 {
+ emitBinaryOpWarnings(ctx, warn, o.opType)
+ }
// in comparison operations between scalars and vectors, the vectors are filtered, regardless if lhs or rhs
if keep && o.opType.IsComparisonOperator() && (o.lhsType == parser.ValueTypeVector || o.rhsType == parser.ValueTypeVector) {
v = otherVal
@@ -203,22 +223,27 @@ func (o *scalarOperator) execBinaryOperation(ctx context.Context, lhs, rhs model
} else if !keep {
continue
}
- step.AppendSample(o.pool, other.SampleIDs[i], v)
+ step.AppendSampleWithSizeHint(other.SampleIDs[i], v, sampleHint)
}
+ histogramHint := len(other.Histograms)
for i, otherVal := range other.Histograms {
scalarVal := scalar.Samples[0]
if o.lhsType == parser.ValueTypeScalar {
- _, h, _, err = binOp(o.opType, scalarVal, 0., nil, otherVal)
+ _, h, keep, warn, err = binOp(o.opType, scalarVal, 0., nil, otherVal)
} else {
- _, h, _, err = binOp(o.opType, 0., scalarVal, otherVal, nil)
+ _, h, keep, warn, err = binOp(o.opType, 0., scalarVal, otherVal, nil)
}
if err != nil {
warnings.AddToContext(err, ctx)
continue
}
- step.AppendHistogram(o.pool, other.HistogramIDs[i], h)
+ if warn != 0 {
+ emitBinaryOpWarnings(ctx, warn, o.opType)
+ }
+ if !keep {
+ continue
+ }
+ step.AppendHistogramWithSizeHint(other.HistogramIDs[i], h, histogramHint)
}
-
- return step
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/binary/utils.go b/vendor/github.com/thanos-io/promql-engine/execution/binary/utils.go
index e5d4cf3f741..a11557cadf4 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/binary/utils.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/binary/utils.go
@@ -4,15 +4,16 @@
package binary
import (
+ "context"
"fmt"
"math"
"github.com/thanos-io/promql-engine/warnings"
- "github.com/efficientgo/core/errors"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -56,84 +57,136 @@ func shouldDropMetricName(op parser.ItemType, returnBool bool) bool {
}
// binOp evaluates a binary operation between two values.
-func binOp(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
+// Returns: value, histogram, keep, warnings, error.
+func binOp(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
switch {
case hlhs == nil && hrhs == nil:
{
switch op {
case parser.ADD:
- return lhs + rhs, nil, true, nil
+ return lhs + rhs, nil, true, 0, nil
case parser.SUB:
- return lhs - rhs, nil, true, nil
+ return lhs - rhs, nil, true, 0, nil
case parser.MUL:
- return lhs * rhs, nil, true, nil
+ return lhs * rhs, nil, true, 0, nil
case parser.DIV:
- return lhs / rhs, nil, true, nil
+ return lhs / rhs, nil, true, 0, nil
case parser.POW:
- return math.Pow(lhs, rhs), nil, true, nil
+ return math.Pow(lhs, rhs), nil, true, 0, nil
case parser.MOD:
- return math.Mod(lhs, rhs), nil, true, nil
+ return math.Mod(lhs, rhs), nil, true, 0, nil
case parser.EQLC:
- return lhs, nil, lhs == rhs, nil
+ return lhs, nil, lhs == rhs, 0, nil
case parser.NEQ:
- return lhs, nil, lhs != rhs, nil
+ return lhs, nil, lhs != rhs, 0, nil
case parser.GTR:
- return lhs, nil, lhs > rhs, nil
+ return lhs, nil, lhs > rhs, 0, nil
case parser.LSS:
- return lhs, nil, lhs < rhs, nil
+ return lhs, nil, lhs < rhs, 0, nil
case parser.GTE:
- return lhs, nil, lhs >= rhs, nil
+ return lhs, nil, lhs >= rhs, 0, nil
case parser.LTE:
- return lhs, nil, lhs <= rhs, nil
+ return lhs, nil, lhs <= rhs, 0, nil
case parser.ATAN2:
- return math.Atan2(lhs, rhs), nil, true, nil
+ return math.Atan2(lhs, rhs), nil, true, 0, nil
}
}
case hlhs == nil && hrhs != nil:
{
switch op {
case parser.MUL:
- return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil
+ return 0, hrhs.Copy().Mul(lhs).Compact(0), true, 0, nil
case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.IncompatibleTypesInBinOpInfo
+ return 0, nil, false, warnings.WarnIncompatibleTypesInBinOp, nil
}
}
case hlhs != nil && hrhs == nil:
{
switch op {
case parser.MUL:
- return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil
+ return 0, hlhs.Copy().Mul(rhs).Compact(0), true, 0, nil
case parser.DIV:
- return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil
+ return 0, hlhs.Copy().Div(rhs).Compact(0), true, 0, nil
case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.IncompatibleTypesInBinOpInfo
+ return 0, nil, false, warnings.WarnIncompatibleTypesInBinOp, nil
}
}
case hlhs != nil && hrhs != nil:
{
switch op {
case parser.ADD:
- res, err := hlhs.Copy().Add(hrhs)
+ res, counterResetCollision, nhcbBoundsReconciled, err := hlhs.Copy().Add(hrhs)
if err != nil {
- return 0, nil, false, warnings.ConvertHistogramError(err)
+ return 0, nil, false, 0, err
}
- return 0, res.Compact(0), true, nil
+ var warn warnings.Warnings
+ if counterResetCollision {
+ warn |= warnings.WarnCounterResetCollision
+ }
+ if nhcbBoundsReconciled {
+ warn |= warnings.WarnNHCBBoundsReconciled
+ }
+ return 0, res.Compact(0), true, warn, nil
case parser.SUB:
- res, err := hlhs.Copy().Sub(hrhs)
+ res, counterResetCollision, nhcbBoundsReconciled, err := hlhs.Copy().Sub(hrhs)
if err != nil {
- return 0, nil, false, warnings.ConvertHistogramError(err)
+ return 0, nil, false, 0, err
+ }
+ var warn warnings.Warnings
+ if counterResetCollision {
+ warn |= warnings.WarnCounterResetCollision
+ }
+ if nhcbBoundsReconciled {
+ warn |= warnings.WarnNHCBBoundsReconciled
}
- return 0, res.Compact(0), true, nil
+ return 0, res.Compact(0), true, warn, nil
case parser.EQLC:
// This operation expects that both histograms are compacted.
- return 0, hlhs, hlhs.Equals(hrhs), nil
+ return 0, hlhs, hlhs.Equals(hrhs), 0, nil
case parser.NEQ:
// This operation expects that both histograms are compacted.
- return 0, hlhs, !hlhs.Equals(hrhs), nil
+ return 0, hlhs, !hlhs.Equals(hrhs), 0, nil
case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
- return 0, nil, false, annotations.IncompatibleTypesInBinOpInfo
+ return 0, nil, false, warnings.WarnIncompatibleTypesInBinOp, nil
}
}
}
- return 0, nil, false, errors.Newf("%s, operator %q not allowed for operations between vectors", annotations.PromQLWarning, op)
+ return 0, nil, false, 0, nil
+}
+
+// emitBinaryOpWarnings emits warnings for binary operation side effects.
+func emitBinaryOpWarnings(ctx context.Context, warn warnings.Warnings, opType parser.ItemType) {
+ if warn == 0 {
+ return
+ }
+ if warn&warnings.WarnMixedExponentialCustomBuckets != 0 {
+ warnings.AddToContext(annotations.NewMixedExponentialCustomHistogramsWarning("", posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnCounterResetCollision != 0 {
+ var op annotations.HistogramOperation
+ switch opType {
+ case parser.ADD:
+ op = annotations.HistogramAdd
+ case parser.SUB:
+ op = annotations.HistogramSub
+ default:
+ return
+ }
+ warnings.AddToContext(annotations.NewHistogramCounterResetCollisionWarning(posrange.PositionRange{}, op), ctx)
+ }
+ if warn&warnings.WarnNHCBBoundsReconciled != 0 {
+ var op annotations.HistogramOperation
+ switch opType {
+ case parser.ADD:
+ op = annotations.HistogramAdd
+ case parser.SUB:
+ op = annotations.HistogramSub
+ default:
+ return
+ }
+ warnings.AddToContext(annotations.NewMismatchedCustomBucketsHistogramsInfo(posrange.PositionRange{}, op), ctx)
+ }
+ if warn&warnings.WarnIncompatibleTypesInBinOp != 0 {
+ warnings.AddToContext(annotations.IncompatibleTypesInBinOpInfo, ctx)
+ }
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go b/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go
index 7d168270161..6c707ea17f3 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/binary/vector.go
@@ -31,33 +31,28 @@ type joinBucket struct {
// vectorOperator evaluates an expression between two step vectors.
type vectorOperator struct {
- pool *model.VectorPool
- once sync.Once
+ lhs model.VectorOperator
+ rhs model.VectorOperator
+ matching *parser.VectorMatching
+ opType parser.ItemType
+ returnBool bool
+ stepsBatch int
+ sigFunc func(labels.Labels) uint64
- lhs model.VectorOperator
- rhs model.VectorOperator
+ once sync.Once
+ series []labels.Labels
lhsSampleIDs []labels.Labels
rhsSampleIDs []labels.Labels
- series []labels.Labels
-
- // join signature
- sigFunc func(labels.Labels) uint64
+ outputMap map[uint64]uint64
- // join helpers
lcJoinBuckets []*joinBucket
hcJoinBuckets []*joinBucket
- outputMap map[uint64]uint64
-
- matching *parser.VectorMatching
- opType parser.ItemType
-
- // If true then 1/0 needs to be returned instead of the value.
- returnBool bool
+ lhsBuf []model.StepVector
+ rhsBuf []model.StepVector
}
func NewVectorOperator(
- pool *model.VectorPool,
lhs model.VectorOperator,
rhs model.VectorOperator,
matching *parser.VectorMatching,
@@ -66,13 +61,13 @@ func NewVectorOperator(
opts *query.Options,
) (model.VectorOperator, error) {
op := &vectorOperator{
- pool: pool,
lhs: lhs,
rhs: rhs,
matching: matching,
opType: opType,
returnBool: returnBool,
sigFunc: signatureFunc(matching.On, matching.MatchingLabels...),
+ stepsBatch: opts.StepsBatch,
}
return telemetry.NewOperator(telemetry.NewTelemetry(op, opts), op), nil
@@ -96,65 +91,56 @@ func (o *vectorOperator) Series(ctx context.Context) ([]labels.Labels, error) {
return o.series, nil
}
-func (o *vectorOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *vectorOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
// Some operators do not call Series of all their children.
if err := o.initOnce(ctx); err != nil {
- return nil, err
+ return 0, err
}
- var lhs []model.StepVector
+ var lhsN int
var lerrChan = make(chan error, 1)
go func() {
var err error
- lhs, err = o.lhs.Next(ctx)
+ lhsN, err = o.lhs.Next(ctx, o.lhsBuf)
if err != nil {
lerrChan <- err
}
close(lerrChan)
}()
- rhs, rerr := o.rhs.Next(ctx)
+ rhsN, rerr := o.rhs.Next(ctx, o.rhsBuf)
lerr := <-lerrChan
if rerr != nil {
- return nil, rerr
+ return 0, rerr
}
if lerr != nil {
- return nil, lerr
+ return 0, lerr
}
// TODO(fpetkovski): When one operator becomes empty,
// we might want to drain or close the other one.
// We don't have a concept of closing an operator yet.
- if len(lhs) == 0 || len(rhs) == 0 {
- return nil, nil
+ if lhsN == 0 || rhsN == 0 {
+ return 0, nil
}
- batch := o.pool.GetVectorBatch()
- for i, vector := range lhs {
- if i < len(rhs) {
- step, err := o.execBinaryOperation(ctx, lhs[i], rhs[i])
- if err != nil {
- return nil, err
- }
- batch = append(batch, step)
- o.rhs.GetPool().PutStepVector(rhs[i])
+ n := 0
+ minN := min(rhsN, lhsN)
+
+ for i := 0; i < minN && n < len(buf); i++ {
+ if err := o.execBinaryOperation(ctx, o.lhsBuf[i], o.rhsBuf[i], &buf[n]); err != nil {
+ return 0, err
}
- o.lhs.GetPool().PutStepVector(vector)
+ n++
}
- o.lhs.GetPool().PutVectors(lhs)
- o.rhs.GetPool().PutVectors(rhs)
-
- return batch, nil
-}
-func (o *vectorOperator) GetPool() *model.VectorPool {
- return o.pool
+ return n, nil
}
func (o *vectorOperator) initOnce(ctx context.Context) error {
@@ -191,25 +177,43 @@ func (o *vectorOperator) init(ctx context.Context) error {
o.initJoinTables(highCardSide, lowCardSide)
+ // Pre-allocate buffers with appropriate inner slice capacities
+ // based on series counts from each side.
+ lhsSeriesCount := len(o.lhsSampleIDs)
+ rhsSeriesCount := len(o.rhsSampleIDs)
+
+ o.lhsBuf = make([]model.StepVector, o.stepsBatch)
+ o.rhsBuf = make([]model.StepVector, o.stepsBatch)
+
+ // Pre-allocate float sample slices; histogram slices will grow on demand.
+ for i := range o.lhsBuf {
+ o.lhsBuf[i].SampleIDs = make([]uint64, 0, lhsSeriesCount)
+ o.lhsBuf[i].Samples = make([]float64, 0, lhsSeriesCount)
+ }
+ for i := range o.rhsBuf {
+ o.rhsBuf[i].SampleIDs = make([]uint64, 0, rhsSeriesCount)
+ o.rhsBuf[i].Samples = make([]float64, 0, rhsSeriesCount)
+ }
+
return nil
}
-func (o *vectorOperator) execBinaryOperation(ctx context.Context, lhs, rhs model.StepVector) (model.StepVector, error) {
+func (o *vectorOperator) execBinaryOperation(ctx context.Context, lhs, rhs model.StepVector, step *model.StepVector) error {
switch o.opType {
case parser.LAND:
- return o.execBinaryAnd(lhs, rhs)
+ return o.execBinaryAnd(lhs, rhs, step)
case parser.LOR:
- return o.execBinaryOr(lhs, rhs)
+ return o.execBinaryOr(lhs, rhs, step)
case parser.LUNLESS:
- return o.execBinaryUnless(lhs, rhs)
+ return o.execBinaryUnless(lhs, rhs, step)
default:
- return o.execBinaryArithmetic(ctx, lhs, rhs)
+ return o.execBinaryArithmetic(ctx, lhs, rhs, step)
}
}
-func (o *vectorOperator) execBinaryAnd(lhs, rhs model.StepVector) (model.StepVector, error) {
+func (o *vectorOperator) execBinaryAnd(lhs, rhs model.StepVector, step *model.StepVector) error {
ts := lhs.T
- step := o.pool.GetStepVector(ts)
+ step.Reset(ts)
for _, sampleID := range rhs.SampleIDs {
jp := o.lcJoinBuckets[sampleID]
@@ -221,54 +225,58 @@ func (o *vectorOperator) execBinaryAnd(lhs, rhs model.StepVector) (model.StepVec
jp.ats = ts
}
+ sampleHint := len(lhs.Samples)
for i, sampleID := range lhs.SampleIDs {
if jp := o.hcJoinBuckets[sampleID]; jp.ats == ts {
- step.AppendSample(o.pool, o.outputSeriesID(sampleID+1, 0), lhs.Samples[i])
+ step.AppendSampleWithSizeHint(o.outputSeriesID(sampleID+1, 0), lhs.Samples[i], sampleHint)
}
}
+ histogramHint := len(lhs.Histograms)
for i, histogramID := range lhs.HistogramIDs {
if jp := o.hcJoinBuckets[histogramID]; jp.ats == ts {
- step.AppendHistogram(o.pool, o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i])
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i], histogramHint)
}
}
- return step, nil
+ return nil
}
-func (o *vectorOperator) execBinaryOr(lhs, rhs model.StepVector) (model.StepVector, error) {
+func (o *vectorOperator) execBinaryOr(lhs, rhs model.StepVector, step *model.StepVector) error {
ts := lhs.T
- step := o.pool.GetStepVector(ts)
+ step.Reset(ts)
+ sampleHint := len(lhs.Samples) + len(rhs.Samples)
for i, sampleID := range lhs.SampleIDs {
jp := o.hcJoinBuckets[sampleID]
jp.ats = ts
- step.AppendSample(o.pool, o.outputSeriesID(sampleID+1, 0), lhs.Samples[i])
+ step.AppendSampleWithSizeHint(o.outputSeriesID(sampleID+1, 0), lhs.Samples[i], sampleHint)
}
+ histogramHint := len(lhs.Histograms) + len(rhs.Histograms)
for i, histogramID := range lhs.HistogramIDs {
jp := o.hcJoinBuckets[histogramID]
jp.ats = ts
- step.AppendHistogram(o.pool, o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i])
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i], histogramHint)
}
for i, sampleID := range rhs.SampleIDs {
if jp := o.lcJoinBuckets[sampleID]; jp.ats != ts {
- step.AppendSample(o.pool, o.outputSeriesID(0, sampleID+1), rhs.Samples[i])
+ step.AppendSampleWithSizeHint(o.outputSeriesID(0, sampleID+1), rhs.Samples[i], sampleHint)
}
}
for i, histogramID := range rhs.HistogramIDs {
if jp := o.lcJoinBuckets[histogramID]; jp.ats != ts {
- step.AppendHistogram(o.pool, o.outputSeriesID(0, histogramID+1), rhs.Histograms[i])
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(0, histogramID+1), rhs.Histograms[i], histogramHint)
}
}
- return step, nil
+ return nil
}
-func (o *vectorOperator) execBinaryUnless(lhs, rhs model.StepVector) (model.StepVector, error) {
+func (o *vectorOperator) execBinaryUnless(lhs, rhs model.StepVector, step *model.StepVector) error {
ts := lhs.T
- step := o.pool.GetStepVector(ts)
+ step.Reset(ts)
for _, sampleID := range rhs.SampleIDs {
jp := o.lcJoinBuckets[sampleID]
@@ -279,32 +287,32 @@ func (o *vectorOperator) execBinaryUnless(lhs, rhs model.StepVector) (model.Step
jp.ats = ts
}
+ sampleHint := len(lhs.Samples)
for i, sampleID := range lhs.SampleIDs {
if jp := o.hcJoinBuckets[sampleID]; jp.ats != ts {
- step.AppendSample(o.pool, o.outputSeriesID(sampleID+1, 0), lhs.Samples[i])
+ step.AppendSampleWithSizeHint(o.outputSeriesID(sampleID+1, 0), lhs.Samples[i], sampleHint)
}
}
+ histogramHint := len(lhs.Histograms)
for i, histogramID := range lhs.HistogramIDs {
if jp := o.hcJoinBuckets[histogramID]; jp.ats != ts {
- step.AppendHistogram(o.pool, o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i])
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(histogramID+1, 0), lhs.Histograms[i], histogramHint)
}
}
- return step, nil
+ return nil
}
-func (o *vectorOperator) computeBinaryPairing(hval, lval float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
+func (o *vectorOperator) computeBinaryPairing(hval, lval float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
// operand is not commutative so we need to address potential swapping
if o.matching.Card == parser.CardOneToMany {
- v, h, keep, err := binOp(o.opType, lval, hval, hlhs, hrhs)
- return v, h, keep, err
+ return binOp(o.opType, lval, hval, hlhs, hrhs)
}
- v, h, keep, err := binOp(o.opType, hval, lval, hlhs, hrhs)
- return v, h, keep, err
+ return binOp(o.opType, hval, lval, hlhs, hrhs)
}
-func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs model.StepVector) (model.StepVector, error) {
+func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs model.StepVector, step *model.StepVector) error {
ts := lhs.T
- step := o.pool.GetStepVector(ts)
+ step.Reset(ts)
var (
hcs, lcs model.StepVector
@@ -319,18 +327,18 @@ func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs mode
case parser.CardOneToMany:
hcs, lcs = rhs, lhs
default:
- return step, errors.Newf("Unexpected matching cardinality: %s", o.matching.Card.String())
+ return errors.Newf("Unexpected matching cardinality: %s", o.matching.Card.String())
}
// shortcut: if we have no samples and histograms on the high card side we cannot compute pairings
if len(hcs.Samples) == 0 && len(hcs.Histograms) == 0 {
- return step, nil
+ return nil
}
for i, sampleID := range lcs.SampleIDs {
jp := o.lcJoinBuckets[sampleID]
// Hash collisions on the low-card-side would imply a many-to-many relation.
if jp.ats == ts {
- return model.StepVector{}, o.newManyToManyMatchErrorOnLowCardSide(jp.sid, sampleID)
+ return o.newManyToManyMatchErrorOnLowCardSide(jp.sid, sampleID)
}
jp.sid = sampleID
jp.val = lcs.Samples[i]
@@ -341,13 +349,16 @@ func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs mode
jp := o.lcJoinBuckets[histogramID]
// Hash collisions on the low-card-side would imply a many-to-many relation.
if jp.ats == ts {
- return model.StepVector{}, o.newManyToManyMatchErrorOnLowCardSide(jp.sid, histogramID)
+ return o.newManyToManyMatchErrorOnLowCardSide(jp.sid, histogramID)
}
jp.sid = histogramID
jp.histogramVal = lcs.Histograms[i]
jp.ats = ts
}
+ sampleHint := len(hcs.Samples) + len(hcs.Histograms)
+ histogramHint := len(hcs.Samples) + len(hcs.Histograms)
+
for i, histogramID := range hcs.HistogramIDs {
jp := o.hcJoinBuckets[histogramID]
if jp.ats != ts {
@@ -356,34 +367,42 @@ func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs mode
// Hash collisions on the high card side are expected except if a one-to-one
// matching was requested and we have an implicit many-to-one match instead.
if jp.bts == ts && o.matching.Card == parser.CardOneToOne {
- return model.StepVector{}, o.newImplicitManyToOneError()
+ return o.newImplicitManyToOneError()
}
jp.bts = ts
+ var warn warnings.Warnings
if jp.histogramVal != nil {
- _, h, keep, err = o.computeBinaryPairing(0, 0, hcs.Histograms[i], jp.histogramVal)
+ _, h, keep, warn, err = o.computeBinaryPairing(0, 0, hcs.Histograms[i], jp.histogramVal)
} else {
- _, h, keep, err = o.computeBinaryPairing(0, jp.val, hcs.Histograms[i], nil)
+ _, h, keep, warn, err = o.computeBinaryPairing(0, jp.val, hcs.Histograms[i], nil)
}
if err != nil {
warnings.AddToContext(err, ctx)
continue
}
+ if warn != 0 {
+ emitBinaryOpWarnings(ctx, warn, o.opType)
+ // For incompatible types, skip entirely - don't produce any output
+ if warn&warnings.WarnIncompatibleTypesInBinOp != 0 {
+ continue
+ }
+ }
switch {
case o.returnBool:
h = nil
if keep {
- step.AppendSample(o.pool, o.outputSeriesID(histogramID+1, jp.sid+1), 1.0)
+ step.AppendSampleWithSizeHint(o.outputSeriesID(histogramID+1, jp.sid+1), 1.0, sampleHint)
} else {
- step.AppendSample(o.pool, o.outputSeriesID(histogramID+1, jp.sid+1), 0.0)
+ step.AppendSampleWithSizeHint(o.outputSeriesID(histogramID+1, jp.sid+1), 0.0, sampleHint)
}
case !keep:
continue
}
if h != nil {
- step.AppendHistogram(o.pool, o.outputSeriesID(histogramID+1, jp.sid+1), h)
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(histogramID+1, jp.sid+1), h, histogramHint)
}
}
@@ -395,24 +414,37 @@ func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs mode
// Hash collisions on the high card side are expected except if a one-to-one
// matching was requested and we have an implicit many-to-one match instead.
if jp.bts == ts && o.matching.Card == parser.CardOneToOne {
- return model.StepVector{}, o.newImplicitManyToOneError()
+ return o.newImplicitManyToOneError()
}
jp.bts = ts
var val float64
+ var warn warnings.Warnings
if jp.histogramVal != nil {
- _, h, _, err = o.computeBinaryPairing(hcs.Samples[i], 0, nil, jp.histogramVal)
+ _, h, keep, warn, err = o.computeBinaryPairing(hcs.Samples[i], 0, nil, jp.histogramVal)
if err != nil {
warnings.AddToContext(err, ctx)
continue
}
- step.AppendHistogram(o.pool, o.outputSeriesID(sampleID+1, jp.sid+1), h)
+ if warn != 0 {
+ emitBinaryOpWarnings(ctx, warn, o.opType)
+ if warn&warnings.WarnIncompatibleTypesInBinOp != 0 {
+ continue
+ }
+ }
+ if !keep {
+ continue
+ }
+ step.AppendHistogramWithSizeHint(o.outputSeriesID(sampleID+1, jp.sid+1), h, histogramHint)
} else {
- val, _, keep, err = o.computeBinaryPairing(hcs.Samples[i], jp.val, nil, nil)
+ val, _, keep, warn, err = o.computeBinaryPairing(hcs.Samples[i], jp.val, nil, nil)
if err != nil {
warnings.AddToContext(err, ctx)
continue
}
+ if warn != 0 {
+ emitBinaryOpWarnings(ctx, warn, o.opType)
+ }
if o.returnBool {
val = 0
if keep {
@@ -421,10 +453,10 @@ func (o *vectorOperator) execBinaryArithmetic(ctx context.Context, lhs, rhs mode
} else if !keep {
continue
}
- step.AppendSample(o.pool, o.outputSeriesID(sampleID+1, jp.sid+1), val)
+ step.AppendSampleWithSizeHint(o.outputSeriesID(sampleID+1, jp.sid+1), val, sampleHint)
}
}
- return step, nil
+ return nil
}
func (o *vectorOperator) newManyToManyMatchErrorOnLowCardSide(originalSampleId, duplicateSampleId uint64) error {
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go
index c0a2cadd305..029d5ec441f 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/coalesce.go
@@ -14,6 +14,7 @@ import (
"github.com/thanos-io/promql-engine/query"
"github.com/efficientgo/core/errors"
+ "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
)
@@ -37,7 +38,6 @@ type coalesce struct {
once sync.Once
series []labels.Labels
- pool *model.VectorPool
wg sync.WaitGroup
operators []model.VectorOperator
batchSize int64
@@ -46,14 +46,17 @@ type coalesce struct {
inVectors [][]model.StepVector
// sampleOffsets holds per-operator offsets needed to map an input sample ID to an output sample ID.
sampleOffsets []uint64
+ // seriesCounts holds the number of series per operator for pre-allocation.
+ seriesCounts []int
+ // tempBufs are reusable buffers for reading from operators
+ tempBufs [][]model.StepVector
}
-func NewCoalesce(pool *model.VectorPool, opts *query.Options, batchSize int64, operators ...model.VectorOperator) model.VectorOperator {
+func NewCoalesce(opts *query.Options, batchSize int64, operators ...model.VectorOperator) model.VectorOperator {
if len(operators) == 1 {
return operators[0]
}
oper := &coalesce{
- pool: pool,
sampleOffsets: make([]uint64, len(operators)),
operators: operators,
inVectors: make([][]model.StepVector, len(operators)),
@@ -71,10 +74,6 @@ func (c *coalesce) String() string {
return "[coalesce]"
}
-func (c *coalesce) GetPool() *model.VectorPool {
- return c.pool
-}
-
func (c *coalesce) Series(ctx context.Context) ([]labels.Labels, error) {
var err error
c.once.Do(func() { err = c.loadSeries(ctx) })
@@ -84,25 +83,41 @@ func (c *coalesce) Series(ctx context.Context) ([]labels.Labels, error) {
return c.series, nil
}
-func (c *coalesce) Next(ctx context.Context) ([]model.StepVector, error) {
+func (c *coalesce) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
var err error
c.once.Do(func() { err = c.loadSeries(ctx) })
if err != nil {
- return nil, err
+ return 0, err
+ }
+
+ // Allocate temporary buffers on first use.
+ // Inner slices will be lazily pre-allocated by child operators when they append data.
+ if c.tempBufs == nil {
+ c.tempBufs = make([][]model.StepVector, len(c.operators))
+ for i := range c.tempBufs {
+ c.tempBufs[i] = make([]model.StepVector, len(buf))
+ }
}
var mu sync.Mutex
var minTs int64 = math.MaxInt64
var errChan = make(errorChan, len(c.operators))
+ vectorCounts := make([]int, len(c.operators))
+
for idx, o := range c.operators {
// We already have a batch from the previous iteration.
if c.inVectors[idx] != nil {
+ mu.Lock()
+ if len(c.inVectors[idx]) > 0 {
+ minTs = min(minTs, c.inVectors[idx][0].T)
+ }
+ mu.Unlock()
continue
}
@@ -110,67 +125,90 @@ func (c *coalesce) Next(ctx context.Context) ([]model.StepVector, error) {
go func(opIdx int, o model.VectorOperator) {
defer c.wg.Done()
- in, err := o.Next(ctx)
+ n, err := o.Next(ctx, c.tempBufs[opIdx])
if err != nil {
errChan <- err
return
}
+ vectorCounts[opIdx] = n
// Map input IDs to output IDs.
- for _, vector := range in {
- for i := range vector.SampleIDs {
- vector.SampleIDs[i] = vector.SampleIDs[i] + c.sampleOffsets[opIdx]
+ for i := range n {
+ vector := &c.tempBufs[opIdx][i]
+ for j := range vector.SampleIDs {
+ vector.SampleIDs[j] = vector.SampleIDs[j] + c.sampleOffsets[opIdx]
}
- for i := range vector.HistogramIDs {
- vector.HistogramIDs[i] = vector.HistogramIDs[i] + c.sampleOffsets[opIdx]
+ for j := range vector.HistogramIDs {
+ vector.HistogramIDs[j] = vector.HistogramIDs[j] + c.sampleOffsets[opIdx]
}
}
- c.inVectors[opIdx] = in
- if in == nil {
- return
- }
- mu.Lock()
- if minTs > in[0].T {
- minTs = in[0].T
+ if n > 0 {
+ c.inVectors[opIdx] = c.tempBufs[opIdx][:n]
+ mu.Lock()
+ minTs = min(minTs, c.tempBufs[opIdx][0].T)
+ mu.Unlock()
+ } else {
+ c.inVectors[opIdx] = nil
}
- mu.Unlock()
}(idx, o)
}
c.wg.Wait()
close(errChan)
if err := errChan.getError(); err != nil {
- return nil, err
+ return 0, err
}
- var out []model.StepVector = nil
+ // Count vectors with minTs and prepare output
+ n := 0
for opIdx, vectors := range c.inVectors {
if len(vectors) == 0 || vectors[0].T != minTs {
continue
}
- if len(vectors) > 0 && out == nil {
- out = c.pool.GetVectorBatch()
- for i := range vectors {
- out = append(out, c.pool.GetStepVector(vectors[i].T))
+ // Initialize output vectors if needed
+ if n == 0 {
+ maxSteps := min(len(vectors), len(buf))
+ for i := range maxSteps {
+ buf[i].Reset(vectors[i].T)
+ // Ensure sufficient capacity for float samples.
+ // Histogram slices will grow on demand since most queries don't use them.
+ totalSamples := 0
+ totalHistograms := 0
+ for _, v := range c.inVectors {
+ if len(v) > i {
+ totalSamples += len(v[i].SampleIDs)
+ totalHistograms += len(v[i].HistogramIDs)
+ }
+ }
+ if cap(buf[i].SampleIDs) < totalSamples {
+ buf[i].SampleIDs = make([]uint64, 0, totalSamples)
+ buf[i].Samples = make([]float64, 0, totalSamples)
+ }
+ if totalHistograms > 0 && cap(buf[i].HistogramIDs) < totalHistograms {
+ buf[i].HistogramIDs = make([]uint64, 0, totalHistograms)
+ buf[i].Histograms = make([]*histogram.FloatHistogram, 0, totalHistograms)
+ }
}
+ n = maxSteps
}
- for i := range vectors {
- out[i].AppendSamples(c.pool, vectors[i].SampleIDs, vectors[i].Samples)
- out[i].AppendHistograms(c.pool, vectors[i].HistogramIDs, vectors[i].Histograms)
- c.operators[opIdx].GetPool().PutStepVector(vectors[i])
+ // Append samples from this operator
+ for i := 0; i < n && i < len(vectors); i++ {
+ buf[i].AppendSamples(vectors[i].SampleIDs, vectors[i].Samples)
+ buf[i].AppendHistograms(vectors[i].HistogramIDs, vectors[i].Histograms)
}
- c.inVectors[opIdx] = nil
- c.operators[opIdx].GetPool().PutVectors(vectors)
- }
- if out == nil {
- return nil, nil
+ // Keep remaining vectors for next iteration
+ if n < len(vectors) {
+ c.inVectors[opIdx] = vectors[n:]
+ } else {
+ c.inVectors[opIdx] = nil
+ }
}
- return out, nil
+ return n, nil
}
func (c *coalesce) loadSeries(ctx context.Context) error {
@@ -211,15 +249,16 @@ func (c *coalesce) loadSeries(ctx context.Context) error {
}
c.sampleOffsets = make([]uint64, len(c.operators))
+ c.seriesCounts = make([]int, len(c.operators))
c.series = make([]labels.Labels, 0, numSeries)
for i, series := range allSeries {
c.sampleOffsets[i] = uint64(len(c.series))
+ c.seriesCounts[i] = len(series)
c.series = append(c.series, series...)
}
if c.batchSize == 0 || c.batchSize > int64(len(c.series)) {
c.batchSize = int64(len(c.series))
}
- c.pool.SetStepSize(int(c.batchSize))
return nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go
index 78ea488cbc8..3015926999c 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/concurrent.go
@@ -16,15 +16,25 @@ import (
)
type maybeStepVector struct {
- err error
- stepVector []model.StepVector
+ err error
+ vectors []model.StepVector // The actual buffer with data
+ n int
}
type concurrencyOperator struct {
once sync.Once
+ seriesOnce sync.Once
next model.VectorOperator
buffer chan maybeStepVector
bufferSize int
+ opts *query.Options
+
+ // Buffer management for zero-copy swapping
+ // We maintain a pool of buffers that get swapped between producer and consumer
+ returnChan chan []model.StepVector // Channel to return buffers for reuse
+
+ // seriesCount is used to pre-allocate inner slices of StepVectors
+ seriesCount int
}
func NewConcurrent(next model.VectorOperator, bufferSize int, opts *query.Options) model.VectorOperator {
@@ -32,6 +42,8 @@ func NewConcurrent(next model.VectorOperator, bufferSize int, opts *query.Option
next: next,
buffer: make(chan maybeStepVector, bufferSize),
bufferSize: bufferSize,
+ opts: opts,
+ returnChan: make(chan []model.StepVector, bufferSize+2),
}
return telemetry.NewOperator(telemetry.NewTelemetry(oper, opts), oper)
@@ -50,20 +62,34 @@ func (c *concurrencyOperator) Series(ctx context.Context) ([]labels.Labels, erro
if err != nil {
return nil, err
}
- return series, nil
-}
-func (c *concurrencyOperator) GetPool() *model.VectorPool {
- return c.next.GetPool()
+ // Initialize buffers. Inner slices will be allocated by the child operator
+ // which knows the actual batch size for pre-allocation.
+ c.seriesOnce.Do(func() {
+ c.seriesCount = len(series)
+ for i := 0; i < c.bufferSize+1; i++ {
+ c.returnChan <- make([]model.StepVector, c.opts.StepsBatch)
+ }
+ })
+
+ return series, nil
}
-func (c *concurrencyOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (c *concurrencyOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
+ // Ensure buffers are initialized (in case Series() wasn't called first)
+ c.seriesOnce.Do(func() {
+ // Fallback: create buffers without pre-sized inner slices
+ for i := 0; i < c.bufferSize+1; i++ {
+ c.returnChan <- make([]model.StepVector, c.opts.StepsBatch)
+ }
+ })
+
c.once.Do(func() {
go c.pull(ctx)
go c.drainBufferOnCancel(ctx)
@@ -71,13 +97,24 @@ func (c *concurrencyOperator) Next(ctx context.Context) ([]model.StepVector, err
r, ok := <-c.buffer
if !ok {
- return nil, nil
+ return 0, nil
}
if r.err != nil {
- return nil, r.err
+ return 0, r.err
+ }
+
+ // Zero-copy swap: move data from internal buffer to caller's buffer
+ // by swapping the slice contents directly
+ n := min(r.n, len(buf))
+ for i := range n {
+ // Swap the step vector contents (this is just pointer/slice header swaps, not data copy)
+ buf[i], r.vectors[i] = r.vectors[i], buf[i]
}
- return r.stepVector, nil
+ // Return the (now empty) buffer for reuse by the producer
+ c.returnChan <- r.vectors
+
+ return n, nil
}
func (c *concurrencyOperator) pull(ctx context.Context) {
@@ -89,21 +126,40 @@ func (c *concurrencyOperator) pull(ctx context.Context) {
c.buffer <- maybeStepVector{err: ctx.Err()}
return
default:
- r, err := c.next.Next(ctx)
+ // Get an available buffer from the return channel
+ var readBuf []model.StepVector
+ select {
+ case readBuf = <-c.returnChan:
+ case <-ctx.Done():
+ c.buffer <- maybeStepVector{err: ctx.Err()}
+ return
+ }
+
+ n, err := c.next.Next(ctx, readBuf)
if err != nil {
+ // Return the buffer
+ c.returnChan <- readBuf
c.buffer <- maybeStepVector{err: err}
return
}
- if r == nil {
+ if n == 0 {
+ // Return the buffer
+ c.returnChan <- readBuf
return
}
- c.buffer <- maybeStepVector{stepVector: r}
+
+ // Send the buffer with data
+ c.buffer <- maybeStepVector{vectors: readBuf, n: n}
}
}
}
func (c *concurrencyOperator) drainBufferOnCancel(ctx context.Context) {
<-ctx.Done()
- for range c.buffer {
+ for r := range c.buffer {
+ if r.vectors != nil {
+ // Return the buffer
+ c.returnChan <- r.vectors
+ }
}
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go
index 95c8d0e31c6..f07ce4b667a 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/dedup.go
@@ -34,38 +34,39 @@ type dedupOperator struct {
once sync.Once
series []labels.Labels
- pool *model.VectorPool
next model.VectorOperator
// outputIndex is a slice that is used as an index from input sample ID to output sample ID.
outputIndex []uint64
dedupCache dedupCache
}
-func NewDedupOperator(pool *model.VectorPool, next model.VectorOperator, opts *query.Options) model.VectorOperator {
+func NewDedupOperator(next model.VectorOperator, opts *query.Options) model.VectorOperator {
oper := &dedupOperator{
next: next,
- pool: pool,
}
return telemetry.NewOperator(telemetry.NewTelemetry(oper, opts), oper)
}
-func (d *dedupOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (d *dedupOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
var err error
d.once.Do(func() { err = d.loadSeries(ctx) })
if err != nil {
- return nil, err
+ return 0, err
}
- in, err := d.next.Next(ctx)
+ n, err := d.next.Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- if in == nil {
- return nil, nil
+ if n == 0 {
+ return 0, nil
}
- result := d.pool.GetVectorBatch()
- for _, vector := range in {
+ // Process each input vector and overwrite it with the deduplicated output
+ for idx := range n {
+ vector := &buf[idx]
+
+ // Update dedup cache with all samples from this vector
for i, inputSampleID := range vector.SampleIDs {
outputSampleID := d.outputIndex[inputSampleID]
d.dedupCache[outputSampleID].t = vector.T
@@ -78,24 +79,27 @@ func (d *dedupOperator) Next(ctx context.Context) ([]model.StepVector, error) {
d.dedupCache[outputSampleID].h = vector.Histograms[i]
}
- out := d.pool.GetStepVector(vector.T)
+ // Clear the vector and rebuild it with deduplicated data
+ t := vector.T
+ buf[idx].Reset(t)
+
+ hint := len(d.series)
for outputSampleID, sample := range d.dedupCache {
// To avoid clearing the dedup cache for each step vector, we use the `t` field
// to detect whether a sample for the current step should be mapped to the output.
// If the timestamp of the sample does not match the input vector timestamp, it means that
// the sample was added in a previous iteration and should be skipped.
- if sample.t == vector.T {
+ if sample.t == t {
if sample.h == nil {
- out.AppendSample(d.pool, uint64(outputSampleID), sample.v)
+ buf[idx].AppendSampleWithSizeHint(uint64(outputSampleID), sample.v, hint)
} else {
- out.AppendHistogram(d.pool, uint64(outputSampleID), sample.h)
+ buf[idx].AppendHistogramWithSizeHint(uint64(outputSampleID), sample.h, hint)
}
}
}
- result = append(result, out)
}
- return result, nil
+ return n, nil
}
func (d *dedupOperator) Series(ctx context.Context) ([]labels.Labels, error) {
@@ -107,10 +111,6 @@ func (d *dedupOperator) Series(ctx context.Context) ([]labels.Labels, error) {
return d.series, nil
}
-func (d *dedupOperator) GetPool() *model.VectorPool {
- return d.pool
-}
-
func (d *dedupOperator) Explain() (next []model.VectorOperator) {
return []model.VectorOperator{d.next}
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go b/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go
index c6599067fba..b18fb35c465 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/exchange/duplicate_label.go
@@ -32,23 +32,23 @@ func NewDuplicateLabelCheck(next model.VectorOperator, opts *query.Options) mode
return telemetry.NewOperator(telemetry.NewTelemetry(oper, opts), oper)
}
-func (d *duplicateLabelCheckOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (d *duplicateLabelCheckOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if err := d.init(ctx); err != nil {
- return nil, err
+ return 0, err
}
- in, err := d.next.Next(ctx)
+ n, err := d.next.Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- if in == nil {
- return nil, nil
+ if n == 0 {
+ return 0, nil
}
// TODO: currently there is a bug, we need to reset 'd.c's state
@@ -60,19 +60,20 @@ func (d *duplicateLabelCheckOperator) Next(ctx context.Context) ([]model.StepVec
d.c[d.p[i].a] = 0
d.c[d.p[i].b] = 0
}
- for i, sv := range in {
+ for i := range n {
+ sv := &buf[i]
for _, sid := range sv.SampleIDs {
d.c[sid] |= 2 << i
}
}
for i := range d.p {
if d.c[d.p[i].a]&d.c[d.p[i].b] > 0 {
- return nil, extlabels.ErrDuplicateLabelSet
+ return 0, extlabels.ErrDuplicateLabelSet
}
}
}
- return in, nil
+ return n, nil
}
func (d *duplicateLabelCheckOperator) Series(ctx context.Context) ([]labels.Labels, error) {
@@ -86,10 +87,6 @@ func (d *duplicateLabelCheckOperator) Series(ctx context.Context) ([]labels.Labe
return series, nil
}
-func (d *duplicateLabelCheckOperator) GetPool() *model.VectorPool {
- return d.next.GetPool()
-}
-
func (d *duplicateLabelCheckOperator) Explain() (next []model.VectorOperator) {
return []model.VectorOperator{d.next}
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/execution.go b/vendor/github.com/thanos-io/promql-engine/execution/execution.go
index 7f170fda3ce..6d20339bec4 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/execution.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/execution.go
@@ -56,7 +56,7 @@ func New(ctx context.Context, expr logicalplan.Node, storage storage.Scanners, o
func newOperator(ctx context.Context, expr logicalplan.Node, storage storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
switch e := expr.(type) {
case *logicalplan.NumberLiteral:
- return scan.NewNumberLiteralSelector(model.NewVectorPool(opts.StepsBatch), opts, e.Val), nil
+ return scan.NewNumberLiteralSelector(opts, e.Val), nil
case *logicalplan.VectorSelector:
return newVectorSelector(ctx, e, storage, opts, hints)
case *logicalplan.FunctionCall:
@@ -80,7 +80,7 @@ func newOperator(ctx context.Context, expr logicalplan.Node, storage storage.Sca
case logicalplan.Noop:
return noop.NewOperator(opts), nil
case logicalplan.UserDefinedExpr:
- return e.MakeExecutionOperator(ctx, model.NewVectorPool(opts.StepsBatch), opts, hints)
+ return e.MakeExecutionOperator(ctx, opts, hints)
default:
return nil, errors.Wrapf(parse.ErrNotSupportedExpr, "got: %s (%T)", e, e)
}
@@ -239,7 +239,7 @@ func newSubqueryFunction(ctx context.Context, e *logicalplan.FunctionCall, t *lo
}
}
- return scan.NewSubqueryOperator(model.NewVectorPool(opts.StepsBatch), inner, scalarArg, scalarArg2, &outerOpts, e, t)
+ return scan.NewSubqueryOperator(inner, scalarArg, scalarArg2, &outerOpts, e, t)
}
func newInstantVectorFunction(ctx context.Context, e *logicalplan.FunctionCall, storage storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
@@ -270,7 +270,7 @@ func newAggregateExpression(ctx context.Context, e *logicalplan.Aggregation, sca
}
if e.Op == parser.COUNT_VALUES {
param := logicalplan.UnsafeUnwrapString(e.Param)
- return aggregate.NewCountValues(model.NewVectorPool(opts.StepsBatch), next, param, !e.Without, e.Grouping, opts), nil
+ return aggregate.NewCountValues(next, param, !e.Without, e.Grouping, opts), nil
}
// parameter is only required for count_values, quantile, topk, bottomk, limitk, and limit_ratio.
@@ -283,9 +283,9 @@ func newAggregateExpression(ctx context.Context, e *logicalplan.Aggregation, sca
}
}
if e.Op == parser.TOPK || e.Op == parser.BOTTOMK || e.Op == parser.LIMITK || e.Op == parser.LIMIT_RATIO {
- next, err = aggregate.NewKHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts)
+ next, err = aggregate.NewKHashAggregate(next, paramOp, e.Op, !e.Without, e.Grouping, opts)
} else {
- next, err = aggregate.NewHashAggregate(model.NewVectorPool(opts.StepsBatch), next, paramOp, e.Op, !e.Without, e.Grouping, opts)
+ next, err = aggregate.NewHashAggregate(next, paramOp, e.Op, !e.Without, e.Grouping, opts)
}
if err != nil {
return nil, err
@@ -310,7 +310,7 @@ func newVectorBinaryOperator(ctx context.Context, e *logicalplan.Binary, storage
if err != nil {
return nil, err
}
- return binary.NewVectorOperator(model.NewVectorPool(opts.StepsBatch), leftOperator, rightOperator, e.VectorMatching, e.Op, e.ReturnBool, opts)
+ return binary.NewVectorOperator(leftOperator, rightOperator, e.VectorMatching, e.Op, e.ReturnBool, opts)
}
func newScalarBinaryOperator(ctx context.Context, e *logicalplan.Binary, storage storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
@@ -323,7 +323,7 @@ func newScalarBinaryOperator(ctx context.Context, e *logicalplan.Binary, storage
return nil, err
}
- return binary.NewScalar(model.NewVectorPoolWithSize(opts.StepsBatch, 1), lhs, rhs, e.LHS.ReturnType(), e.RHS.ReturnType(), e.Op, e.ReturnBool, opts)
+ return binary.NewScalar(lhs, rhs, e.LHS.ReturnType(), e.RHS.ReturnType(), e.Op, e.ReturnBool, opts)
}
func newUnaryExpression(ctx context.Context, e *logicalplan.Unary, scanners storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
@@ -346,13 +346,13 @@ func newUnaryExpression(ctx context.Context, e *logicalplan.Unary, scanners stor
func newStepInvariantExpression(ctx context.Context, e *logicalplan.StepInvariantExpr, scanners storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
switch t := e.Expr.(type) {
case *logicalplan.NumberLiteral:
- return scan.NewNumberLiteralSelector(model.NewVectorPool(opts.StepsBatch), opts, t.Val), nil
+ return scan.NewNumberLiteralSelector(opts, t.Val), nil
}
next, err := newOperator(ctx, e.Expr, scanners, opts.WithEndTime(opts.Start), hints)
if err != nil {
return nil, err
}
- return step_invariant.NewStepInvariantOperator(model.NewVectorPoolWithSize(opts.StepsBatch, 1), next, e.Expr, opts)
+ return step_invariant.NewStepInvariantOperator(next, e.Expr, opts)
}
func newDeduplication(ctx context.Context, e logicalplan.Deduplicate, scanners storage.Scanners, opts *query.Options, hints promstorage.SelectHints) (model.VectorOperator, error) {
@@ -372,8 +372,8 @@ func newDeduplication(ctx context.Context, e logicalplan.Deduplicate, scanners s
}
operators[i] = operator
}
- coalesce := exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, 0, operators...)
- dedup := exchange.NewDedupOperator(model.NewVectorPool(opts.StepsBatch), coalesce, opts)
+ coalesce := exchange.NewCoalesce(opts, 0, operators...)
+ dedup := exchange.NewDedupOperator(coalesce, opts)
return exchange.NewConcurrent(dedup, 2, opts), nil
}
@@ -389,7 +389,7 @@ func newRemoteExecution(ctx context.Context, e logicalplan.RemoteExecution, opts
// We need to set the lookback for the selector to 0 since the remote query already applies one lookback.
selectorOpts := *opts
selectorOpts.LookbackDelta = 0
- remoteExec := remote.NewExecution(qry, model.NewVectorPool(opts.StepsBatch), e.QueryRangeStart, e.QueryRangeEnd, e.Engine.LabelSets(), &selectorOpts, hints)
+ remoteExec := remote.NewExecution(qry, e.QueryRangeStart, e.QueryRangeEnd, e.Engine.LabelSets(), &selectorOpts, hints)
return exchange.NewConcurrent(remoteExec, 2, opts), nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go b/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go
index e605ab62cf3..deb3e7a503d 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/absent.go
@@ -19,19 +19,16 @@ type absentOperator struct {
once sync.Once
funcExpr *logicalplan.FunctionCall
series []labels.Labels
- pool *model.VectorPool
next model.VectorOperator
}
func newAbsentOperator(
funcExpr *logicalplan.FunctionCall,
- pool *model.VectorPool,
next model.VectorOperator,
opts *query.Options,
) model.VectorOperator {
oper := &absentOperator{
funcExpr: funcExpr,
- pool: pool,
next: next,
}
return telemetry.NewOperator(telemetry.NewTelemetry(oper, opts), oper)
@@ -53,8 +50,6 @@ func (o *absentOperator) Series(_ context.Context) ([]labels.Labels, error) {
func (o *absentOperator) loadSeries() {
// we need to put the filtered labels back for absent to compute its series properly
o.once.Do(func() {
- o.pool.SetStepSize(1)
-
// https://github.com/prometheus/prometheus/blob/df1b4da348a7c2f8c0b294ffa1f05db5f6641278/promql/functions.go#L1857
var lm []*labels.Matcher
switch n := o.funcExpr.Args[0].(type) {
@@ -85,36 +80,27 @@ func (o *absentOperator) loadSeries() {
})
}
-func (o *absentOperator) GetPool() *model.VectorPool {
- return o.pool
-}
-
-func (o *absentOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *absentOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
o.loadSeries()
- vectors, err := o.next.Next(ctx)
+ n, err := o.next.Next(ctx, buf)
if err != nil {
- return nil, err
- }
- if len(vectors) == 0 {
- return nil, nil
+ return 0, err
}
- result := o.GetPool().GetVectorBatch()
- for i := range vectors {
- sv := o.GetPool().GetStepVector(vectors[i].T)
- if len(vectors[i].Samples) == 0 && len(vectors[i].Histograms) == 0 {
- sv.AppendSample(o.GetPool(), 0, 1)
+ for i := range n {
+ vector := &buf[i]
+ isEmpty := len(vector.Samples) == 0 && len(vector.Histograms) == 0
+ vector.Reset(vector.T)
+ if isEmpty {
+ vector.AppendSample(0, 1)
}
- result = append(result, sv)
- o.next.GetPool().PutStepVector(vectors[i])
}
- o.next.GetPool().PutVectors(vectors)
- return result, nil
+ return n, nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go b/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go
index 29d5179e978..b59345643d2 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/histogram.go
@@ -25,9 +25,10 @@ import (
)
type histogramSeries struct {
- outputID int
- upperBound float64
- hasBucketValue bool
+ outputID int
+ upperBound float64
+ hasBucketValue bool
+ bucketLabelValue string // original bucket label value for use in warnings
}
// histogramOperator is a function operator that calculates percentiles.
@@ -35,12 +36,12 @@ type histogramOperator struct {
once sync.Once
series []labels.Labels
- pool *model.VectorPool
- funcName string
- funcArgs logicalplan.Nodes
- vectorOp model.VectorOperator
- scalar1Op model.VectorOperator
- scalar2Op model.VectorOperator
+ funcName string
+ funcArgs logicalplan.Nodes
+ stepsBatch int
+ vectorOp model.VectorOperator
+ scalar1Op model.VectorOperator
+ scalar2Op model.VectorOperator
// scalarPoints is a reusable buffer for points from the first argument of histogram_quantile.
scalar1Points []float64
@@ -56,31 +57,38 @@ type histogramOperator struct {
// seriesBuckets are the buckets for each individual conventional histogram series.
seriesBuckets []promql.Buckets
+
+ // badBucketWarned tracks which series have already emitted bad bucket label warnings.
+ badBucketWarned map[uint64]bool
+
+ vectorBuf []model.StepVector
+ scalar1Buf []model.StepVector
+ scalar2Buf []model.StepVector
}
func newHistogramOperator(
- pool *model.VectorPool,
call *logicalplan.FunctionCall,
nextOps []model.VectorOperator,
+ stepsBatch int,
opts *query.Options,
) model.VectorOperator {
o := &histogramOperator{
- pool: pool,
- funcName: call.Func.Name,
- funcArgs: call.Args,
+ funcName: call.Func.Name,
+ funcArgs: call.Args,
+ stepsBatch: stepsBatch,
}
switch o.funcName {
case "histogram_quantile":
o.scalar1Op = nextOps[0]
o.vectorOp = nextOps[1]
- o.scalar1Points = make([]float64, opts.StepsBatch)
+ o.scalar1Points = make([]float64, stepsBatch)
case "histogram_fraction":
o.scalar1Op = nextOps[0]
o.scalar2Op = nextOps[1]
o.vectorOp = nextOps[2]
- o.scalar1Points = make([]float64, opts.StepsBatch)
- o.scalar2Points = make([]float64, opts.StepsBatch)
+ o.scalar1Points = make([]float64, stepsBatch)
+ o.scalar2Points = make([]float64, stepsBatch)
default:
panic("unsupported function passed")
}
@@ -110,36 +118,43 @@ func (o *histogramOperator) Series(ctx context.Context) ([]labels.Labels, error)
return o.series, nil
}
-func (o *histogramOperator) GetPool() *model.VectorPool {
- return o.pool
-}
-
-func (o *histogramOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *histogramOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
var err error
o.once.Do(func() { err = o.loadSeries(ctx) })
if err != nil {
- return nil, err
+ return 0, err
+ }
+
+ // First get vector data to know how many steps we need
+ vectorN, err := o.vectorOp.Next(ctx, o.vectorBuf)
+ if err != nil {
+ return 0, err
+ }
+ if vectorN == 0 {
+ return 0, nil
}
+ // Now get scalar data for the same number of steps
switch o.funcName {
case "histogram_quantile":
- scalars1, err := o.scalar1Op.Next(ctx)
+ scalar1N, err := o.scalar1Op.Next(ctx, o.scalar1Buf[:vectorN])
if err != nil {
- return nil, err
+ return 0, err
}
- if len(scalars1) == 0 {
- return nil, nil
+ if scalar1N == 0 {
+ return 0, nil
}
o.scalar1Points = o.scalar1Points[:0]
- for _, scalar := range scalars1 {
+ for i := range scalar1N {
+ scalar := o.scalar1Buf[i]
if len(scalar.Samples) > 0 {
sample := scalar.Samples[0]
if math.IsNaN(sample) || sample < 0 || sample > 1 {
@@ -147,66 +162,73 @@ func (o *histogramOperator) Next(ctx context.Context) ([]model.StepVector, error
}
o.scalar1Points = append(o.scalar1Points, sample)
}
- o.scalar1Op.GetPool().PutStepVector(scalar)
}
- o.scalar1Op.GetPool().PutVectors(scalars1)
case "histogram_fraction":
- scalars1, err := o.scalar1Op.Next(ctx)
+ scalar1N, err := o.scalar1Op.Next(ctx, o.scalar1Buf[:vectorN])
if err != nil {
- return nil, err
+ return 0, err
}
- if len(scalars1) == 0 {
- return nil, nil
+ if scalar1N == 0 {
+ return 0, nil
}
o.scalar1Points = o.scalar1Points[:0]
- for _, scalar := range scalars1 {
+ for i := range scalar1N {
+ scalar := o.scalar1Buf[i]
if len(scalar.Samples) > 0 {
sample := scalar.Samples[0]
o.scalar1Points = append(o.scalar1Points, sample)
}
- o.scalar1Op.GetPool().PutStepVector(scalar)
}
- o.scalar1Op.GetPool().PutVectors(scalars1)
- scalars2, err := o.scalar2Op.Next(ctx)
+ scalar2N, err := o.scalar2Op.Next(ctx, o.scalar2Buf[:vectorN])
if err != nil {
- return nil, err
+ return 0, err
}
- if len(scalars2) == 0 {
- return nil, nil
+ if scalar2N == 0 {
+ return 0, nil
}
o.scalar2Points = o.scalar2Points[:0]
- for _, scalar := range scalars2 {
+ for i := range scalar2N {
+ scalar := o.scalar2Buf[i]
if len(scalar.Samples) > 0 {
sample := scalar.Samples[0]
o.scalar2Points = append(o.scalar2Points, sample)
}
- o.scalar2Op.GetPool().PutStepVector(scalar)
}
- o.scalar2Op.GetPool().PutVectors(scalars2)
}
- vectors, err := o.vectorOp.Next(ctx)
+ // Process the vector data and write to output buffer
+ vectors := o.vectorBuf[:vectorN]
+ n, err := o.processInputSeries(ctx, vectors, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- return o.processInputSeries(ctx, vectors)
+ return n, nil
}
// nolint: unparam
-func (o *histogramOperator) processInputSeries(ctx context.Context, vectors []model.StepVector) ([]model.StepVector, error) {
- out := o.pool.GetVectorBatch()
+func (o *histogramOperator) processInputSeries(ctx context.Context, vectors []model.StepVector, buf []model.StepVector) (int, error) {
+ n := 0
for stepIndex, vector := range vectors {
+ if n >= len(buf) {
+ break
+ }
o.resetBuckets()
for i, seriesID := range vector.SampleIDs {
outputSeries := o.outputIndex[seriesID]
// This means that it has an invalid `le` label.
if outputSeries == nil || !outputSeries.hasBucketValue {
+ // Emit warning for invalid bucket label (only once per series).
+ if outputSeries != nil && !o.badBucketWarned[uint64(seriesID)] {
+ o.badBucketWarned[uint64(seriesID)] = true
+ metricName := o.inputSeriesNames[seriesID]
+ warnings.AddToContext(annotations.NewBadBucketLabelWarning(metricName, outputSeries.bucketLabelValue, posrange.PositionRange{}), ctx)
+ }
continue
}
@@ -218,7 +240,7 @@ func (o *histogramOperator) processInputSeries(ctx context.Context, vectors []mo
o.seriesBuckets[outputSeriesID] = append(o.seriesBuckets[outputSeriesID], bucket)
}
- step := o.pool.GetStepVector(vector.T)
+ buf[n].Reset(vector.T)
for i, seriesID := range vector.HistogramIDs {
outputSeriesID := o.outputIndex[seriesID].outputID
// We need to check if there is a conventional histogram mapped to this output series ID.
@@ -230,10 +252,10 @@ func (o *histogramOperator) processInputSeries(ctx context.Context, vectors []mo
switch o.funcName {
case "histogram_quantile":
v, annos = promql.HistogramQuantile(o.scalar1Points[stepIndex], vector.Histograms[i], o.inputSeriesNames[seriesID], posrange.PositionRange{})
- step.AppendSample(o.pool, uint64(outputSeriesID), v)
+ buf[n].AppendSample(uint64(outputSeriesID), v)
case "histogram_fraction":
v, annos = promql.HistogramFraction(o.scalar1Points[stepIndex], o.scalar2Points[stepIndex], vector.Histograms[i], o.inputSeriesNames[seriesID], posrange.PositionRange{})
- step.AppendSample(o.pool, uint64(outputSeriesID), v)
+ buf[n].AppendSample(uint64(outputSeriesID), v)
}
warnings.MergeToContext(annos, ctx)
} else {
@@ -247,33 +269,43 @@ func (o *histogramOperator) processInputSeries(ctx context.Context, vectors []mo
if len(stepBuckets) == 0 {
continue
}
- // If there is only bucket or if we are after how many
- // scalar points we have then it needs to be NaN.
- if len(stepBuckets) == 1 || stepIndex >= len(o.scalar1Points) {
- step.AppendSample(o.pool, uint64(i), math.NaN())
+ // If we are after how many scalar points we have then it needs to be NaN.
+ if stepIndex >= len(o.scalar1Points) {
+ buf[n].AppendSample(uint64(i), math.NaN())
continue
}
switch o.funcName {
case "histogram_quantile":
+ // histogram_quantile needs at least 2 buckets.
+ if len(stepBuckets) == 1 {
+ buf[n].AppendSample(uint64(i), math.NaN())
+ continue
+ }
v, forcedMonotonicity, _ := promql.BucketQuantile(o.scalar1Points[stepIndex], stepBuckets)
- step.AppendSample(o.pool, uint64(i), v)
+ buf[n].AppendSample(uint64(i), v)
if forcedMonotonicity {
warnings.AddToContext(annotations.NewHistogramQuantileForcedMonotonicityInfo(o.inputSeriesNames[i], posrange.PositionRange{}), ctx)
}
case "histogram_fraction":
+ // BucketFraction handles single bucket and other edge cases properly.
v := promql.BucketFraction(o.scalar1Points[stepIndex], o.scalar2Points[stepIndex], stepBuckets)
- step.AppendSample(o.pool, uint64(i), v)
+ buf[n].AppendSample(uint64(i), v)
}
}
- out = append(out, step)
- o.vectorOp.GetPool().PutStepVector(vector)
+ n++
}
- o.vectorOp.GetPool().PutVectors(vectors)
- return out, nil
+ return n, nil
}
func (o *histogramOperator) loadSeries(ctx context.Context) error {
+
+ o.vectorBuf = make([]model.StepVector, o.stepsBatch)
+ o.scalar1Buf = make([]model.StepVector, o.stepsBatch)
+ if o.scalar2Op != nil {
+ o.scalar2Buf = make([]model.StepVector, o.stepsBatch)
+ }
+
series, err := o.vectorOp.Series(ctx)
if err != nil {
return err
@@ -318,13 +350,14 @@ func (o *histogramOperator) loadSeries(ctx context.Context) error {
o.inputSeriesNames[i] = s.Get(labels.MetricName)
o.outputIndex[i] = &histogramSeries{
- outputID: seriesID,
- upperBound: value,
- hasBucketValue: hasBucketValue,
+ outputID: seriesID,
+ upperBound: value,
+ hasBucketValue: hasBucketValue,
+ bucketLabelValue: bucketLabel.Value,
}
}
o.seriesBuckets = make([]promql.Buckets, len(o.series))
- o.pool.SetStepSize(len(o.series))
+ o.badBucketWarned = make(map[uint64]bool)
return nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go b/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go
index c07a752ca73..3758d90754f 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/noarg.go
@@ -20,7 +20,6 @@ type noArgFunctionOperator struct {
stepsBatch int
funcExpr *logicalplan.FunctionCall
call noArgFunctionCall
- vectorPool *model.VectorPool
series []labels.Labels
sampleIDs []uint64
}
@@ -37,29 +36,26 @@ func (o *noArgFunctionOperator) Series(_ context.Context) ([]labels.Labels, erro
return o.series, nil
}
-func (o *noArgFunctionOperator) GetPool() *model.VectorPool {
- return o.vectorPool
-}
-
-func (o *noArgFunctionOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *noArgFunctionOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if o.currentStep > o.maxt {
- return nil, nil
+ return 0, nil
}
- ret := o.vectorPool.GetVectorBatch()
- for i := 0; i < o.stepsBatch && o.currentStep <= o.maxt; i++ {
- sv := o.vectorPool.GetStepVector(o.currentStep)
- sv.Samples = []float64{o.call(o.currentStep)}
- sv.SampleIDs = o.sampleIDs
- ret = append(ret, sv)
+ n := 0
+ maxSteps := min(o.stepsBatch, len(buf))
+
+ for i := 0; i < maxSteps && o.currentStep <= o.maxt; i++ {
+ buf[n].Reset(o.currentStep)
+ buf[n].AppendSample(o.sampleIDs[0], o.call(o.currentStep))
+ n++
o.currentStep += o.step
}
- return ret, nil
+ return n, nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go
index a8a6355bea0..d5d0641b9c9 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/operator.go
@@ -25,15 +25,15 @@ func NewFunctionOperator(funcExpr *logicalplan.FunctionCall, nextOps []model.Vec
// Some functions need to be handled in special operators
switch funcExpr.Func.Name {
case "scalar":
- return newScalarOperator(model.NewVectorPoolWithSize(stepsBatch, 1), nextOps[0], opts), nil
+ return newScalarOperator(nextOps[0], opts), nil
case "timestamp":
return newTimestampOperator(nextOps[0], opts), nil
case "label_join", "label_replace":
return newRelabelOperator(nextOps[0], funcExpr, opts), nil
case "absent":
- return newAbsentOperator(funcExpr, model.NewVectorPool(stepsBatch), nextOps[0], opts), nil
+ return newAbsentOperator(funcExpr, nextOps[0], opts), nil
case "histogram_quantile", "histogram_fraction":
- return newHistogramOperator(model.NewVectorPool(stepsBatch), funcExpr, nextOps, opts), nil
+ return newHistogramOperator(funcExpr, nextOps, stepsBatch, opts), nil
}
// Short-circuit functions that take no args. Their only input is the step's timestamp.
@@ -64,7 +64,6 @@ func newNoArgsFunctionOperator(funcExpr *logicalplan.FunctionCall, stepsBatch in
stepsBatch: stepsBatch,
funcExpr: funcExpr,
call: call,
- vectorPool: model.NewVectorPool(stepsBatch),
}
switch funcExpr.Func.Name {
@@ -87,9 +86,11 @@ type functionOperator struct {
vectorIndex int
nextOps []model.VectorOperator
+ stepsBatch int
call functionCall
scalarPoints [][]float64
+ scalarBuf []model.StepVector
}
func newInstantVectorFunctionOperator(funcExpr *logicalplan.FunctionCall, nextOps []model.VectorOperator, stepsBatch int, opts *query.Options) (model.VectorOperator, error) {
@@ -107,6 +108,7 @@ func newInstantVectorFunctionOperator(funcExpr *logicalplan.FunctionCall, nextOp
call: call,
funcExpr: funcExpr,
vectorIndex: 0,
+ stepsBatch: stepsBatch,
scalarPoints: scalarPoints,
}
@@ -141,85 +143,85 @@ func (o *functionOperator) Series(ctx context.Context) ([]labels.Labels, error)
return o.series, nil
}
-func (o *functionOperator) GetPool() *model.VectorPool {
- return o.nextOps[o.vectorIndex].GetPool()
-}
-
-func (o *functionOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *functionOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if err := o.loadSeries(ctx); err != nil {
- return nil, err
+ return 0, err
}
// Process non-variadic single/multi-arg instant vector and scalar input functions.
// Call next on vector input.
- vectors, err := o.nextOps[o.vectorIndex].Next(ctx)
+ n, err := o.nextOps[o.vectorIndex].Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- if len(vectors) == 0 {
- return nil, nil
+ if n == 0 {
+ return 0, nil
}
+
scalarIndex := 0
for i := range o.nextOps {
if i == o.vectorIndex {
continue
}
- scalarVectors, err := o.nextOps[i].Next(ctx)
+ scalarN, err := o.nextOps[i].Next(ctx, o.scalarBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- for batchIndex := range vectors {
+ for batchIndex := range n {
val := math.NaN()
- if len(scalarVectors) > 0 && len(scalarVectors[batchIndex].Samples) > 0 {
- val = scalarVectors[batchIndex].Samples[0]
- o.nextOps[i].GetPool().PutStepVector(scalarVectors[batchIndex])
+ if batchIndex < scalarN && len(o.scalarBuf[batchIndex].Samples) > 0 {
+ val = o.scalarBuf[batchIndex].Samples[0]
}
o.scalarPoints[batchIndex][scalarIndex] = val
}
- o.nextOps[i].GetPool().PutVectors(scalarVectors)
scalarIndex++
}
- for batchIndex, vector := range vectors {
+
+ for batchIndex := range n {
+ vector := &buf[batchIndex]
i := 0
- for i < len(vectors[batchIndex].Samples) {
+ for i < len(vector.Samples) {
if v, ok := o.call(vector.Samples[i], nil, o.scalarPoints[batchIndex]...); ok {
vector.Samples[i] = v
i++
} else {
// This operator modifies samples directly in the input vector to avoid allocations.
// In case of an invalid output sample, we need to do an in-place removal of the input sample.
- vectors[batchIndex].RemoveSample(i)
+ vector.RemoveSample(i)
}
}
i = 0
- for i < len(vectors[batchIndex].Histograms) {
+ for i < len(vector.Histograms) {
v, ok := o.call(0., vector.Histograms[i], o.scalarPoints[batchIndex]...)
// This operator modifies samples directly in the input vector to avoid allocations.
// All current functions for histograms produce a float64 sample. It's therefore safe to
// always remove the input histogram so that it does not propagate to the output.
- sampleID := vectors[batchIndex].HistogramIDs[i]
- vectors[batchIndex].RemoveHistogram(i)
+ sampleID := vector.HistogramIDs[i]
+ vector.RemoveHistogram(i)
if ok {
- vectors[batchIndex].AppendSample(o.GetPool(), sampleID, v)
+ vector.AppendSample(sampleID, v)
}
}
}
- return vectors, nil
+ return n, nil
}
func (o *functionOperator) loadSeries(ctx context.Context) error {
var err error
o.once.Do(func() {
+
+ o.scalarBuf = make([]model.StepVector, o.stepsBatch)
+
if o.funcExpr.Func.Name == "vector" {
o.series = []labels.Labels{labels.New()}
return
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go b/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go
index 383b89c72a0..ba0aa299c19 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/relabel.go
@@ -52,12 +52,8 @@ func (o *relabelOperator) Series(ctx context.Context) ([]labels.Labels, error) {
return o.series, err
}
-func (o *relabelOperator) GetPool() *model.VectorPool {
- return o.next.GetPool()
-}
-
-func (o *relabelOperator) Next(ctx context.Context) ([]model.StepVector, error) {
- return o.next.Next(ctx)
+func (o *relabelOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
+ return o.next.Next(ctx, buf)
}
func (o *relabelOperator) loadSeries(ctx context.Context) (err error) {
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go b/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go
index 97e56be9a4d..16d25d3774b 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/scalar.go
@@ -15,13 +15,11 @@ import (
)
type scalarOperator struct {
- pool *model.VectorPool
next model.VectorOperator
}
-func newScalarOperator(pool *model.VectorPool, next model.VectorOperator, opts *query.Options) model.VectorOperator {
+func newScalarOperator(next model.VectorOperator, opts *query.Options) model.VectorOperator {
oper := &scalarOperator{
- pool: pool,
next: next,
}
@@ -40,37 +38,29 @@ func (o *scalarOperator) Series(ctx context.Context) ([]labels.Labels, error) {
return nil, nil
}
-func (o *scalarOperator) GetPool() *model.VectorPool {
- return o.pool
-}
-
-func (o *scalarOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *scalarOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
- in, err := o.next.Next(ctx)
+ n, err := o.next.Next(ctx, buf)
if err != nil {
- return nil, err
- }
- if len(in) == 0 {
- return nil, nil
+ return 0, err
}
- result := o.GetPool().GetVectorBatch()
- for _, vector := range in {
- sv := o.GetPool().GetStepVector(vector.T)
- if len(vector.Samples) != 1 {
- sv.AppendSample(o.GetPool(), 0, math.NaN())
+ for i := range n {
+ vector := &buf[i]
+ var val float64
+ if len(vector.Samples) == 1 {
+ val = vector.Samples[0]
} else {
- sv.AppendSample(o.GetPool(), 0, vector.Samples[0])
+ val = math.NaN()
}
- result = append(result, sv)
- o.next.GetPool().PutStepVector(vector)
+ vector.Reset(vector.T)
+ vector.AppendSample(0, val)
}
- o.next.GetPool().PutVectors(in)
- return result, nil
+ return n, nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go b/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go
index 997ea1c8ad5..514cfadb49c 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/function/timestamp.go
@@ -64,25 +64,22 @@ func (o *timestampOperator) loadSeries(ctx context.Context) error {
return err
}
-func (o *timestampOperator) GetPool() *model.VectorPool {
- return o.next.GetPool()
-}
-
-func (o *timestampOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *timestampOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
- in, err := o.next.Next(ctx)
+ n, err := o.next.Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- for _, vector := range in {
- for i := range vector.Samples {
- vector.Samples[i] = float64(vector.T / 1000)
+ for i := range n {
+ vector := &buf[i]
+ for j := range vector.Samples {
+ vector.Samples[j] = float64(vector.T / 1000)
}
}
- return in, nil
+ return n, nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go
index 12f82d66a14..aea70b74ab2 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/model/operator.go
@@ -13,16 +13,16 @@ import (
// VectorOperator performs operations on series in step by step fashion.
type VectorOperator interface {
// Next yields vectors of samples from all series for one or more execution steps.
- Next(ctx context.Context) ([]StepVector, error)
+ // The caller provides a buffer (buf) to be filled with StepVectors.
+ // Returns the number of StepVectors written to buf and any error encountered.
+ // A return value of 0 indicates no more data is available.
+ Next(ctx context.Context, buf []StepVector) (int, error)
// Series returns all series that the operator will process during Next results.
// The result can be used by upstream operators to allocate output tables and buffers
// before starting to process samples.
Series(ctx context.Context) ([]labels.Labels, error)
- // GetPool returns pool of vectors that can be shared across operators.
- GetPool() *VectorPool
-
// Explain returns human-readable explanation of the current operator and optional nested operators.
Explain() (next []VectorOperator)
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/model/pool.go b/vendor/github.com/thanos-io/promql-engine/execution/model/pool.go
deleted file mode 100644
index 521be5ffe86..00000000000
--- a/vendor/github.com/thanos-io/promql-engine/execution/model/pool.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) The Thanos Community Authors.
-// Licensed under the Apache License 2.0.
-
-package model
-
-import (
- "sync"
-
- "github.com/prometheus/prometheus/model/histogram"
-)
-
-type VectorPool struct {
- vectors sync.Pool
-
- stepSize int
- samples sync.Pool
- sampleIDs sync.Pool
- histograms sync.Pool
-}
-
-func NewVectorPoolWithSize(stepsBatch, size int) *VectorPool {
- pool := NewVectorPool(stepsBatch)
- pool.SetStepSize(size)
- return pool
-}
-
-func NewVectorPool(stepsBatch int) *VectorPool {
- pool := &VectorPool{}
- pool.vectors = sync.Pool{
- New: func() any {
- sv := make([]StepVector, 0, stepsBatch)
- return &sv
- },
- }
- pool.samples = sync.Pool{
- New: func() any {
- samples := make([]float64, 0, pool.stepSize)
- return &samples
- },
- }
- pool.sampleIDs = sync.Pool{
- New: func() any {
- sampleIDs := make([]uint64, 0, pool.stepSize)
- return &sampleIDs
- },
- }
- pool.histograms = sync.Pool{
- New: func() any {
- histograms := make([]*histogram.FloatHistogram, pool.stepSize)[:0]
- return &histograms
- },
- }
-
- return pool
-}
-
-func (p *VectorPool) GetVectorBatch() []StepVector {
- return *p.vectors.Get().(*[]StepVector)
-}
-
-func (p *VectorPool) PutVectors(vector []StepVector) {
- vector = vector[:0]
- p.vectors.Put(&vector)
-}
-
-func (p *VectorPool) GetStepVector(t int64) StepVector {
- return StepVector{T: t}
-}
-
-func (p *VectorPool) getSampleBuffers() ([]uint64, []float64) {
- return *p.sampleIDs.Get().(*[]uint64), *p.samples.Get().(*[]float64)
-}
-
-func (p *VectorPool) getHistogramBuffers() ([]uint64, []*histogram.FloatHistogram) {
- return *p.sampleIDs.Get().(*[]uint64), *p.histograms.Get().(*[]*histogram.FloatHistogram)
-}
-
-func (p *VectorPool) PutStepVector(v StepVector) {
- if v.SampleIDs != nil {
- v.SampleIDs = v.SampleIDs[:0]
- p.sampleIDs.Put(&v.SampleIDs)
-
- v.Samples = v.Samples[:0]
- p.samples.Put(&v.Samples)
- }
-
- if v.HistogramIDs != nil {
- v.Histograms = v.Histograms[:0]
- p.histograms.Put(&v.Histograms)
-
- v.HistogramIDs = v.HistogramIDs[:0]
- p.sampleIDs.Put(&v.HistogramIDs)
- }
-}
-
-func (p *VectorPool) SetStepSize(n int) {
- p.stepSize = n
-}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/model/vector.go b/vendor/github.com/thanos-io/promql-engine/execution/model/vector.go
index 6c324639e26..185717e732b 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/model/vector.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/model/vector.go
@@ -26,21 +26,48 @@ type StepVector struct {
Histograms []*histogram.FloatHistogram
}
-func (s *StepVector) AppendSample(pool *VectorPool, id uint64, val float64) {
- if s.Samples == nil {
- s.SampleIDs, s.Samples = pool.getSampleBuffers()
+// Reset resets the StepVector to the given timestamp while preserving slice capacity.
+func (s *StepVector) Reset(t int64) {
+ s.T = t
+ if s.SampleIDs != nil {
+ s.SampleIDs = s.SampleIDs[:0]
+ }
+ if s.Samples != nil {
+ s.Samples = s.Samples[:0]
+ }
+ if s.HistogramIDs != nil {
+ s.HistogramIDs = s.HistogramIDs[:0]
+ }
+ if s.Histograms != nil {
+ s.Histograms = s.Histograms[:0]
+ }
+}
+
+func (s *StepVector) AppendSample(id uint64, val float64) {
+ s.SampleIDs = append(s.SampleIDs, id)
+ s.Samples = append(s.Samples, val)
+}
+
+// AppendSampleWithSizeHint appends a sample and lazily pre-allocates capacity if needed.
+// Use this when you know the expected number of samples to avoid repeated slice growth.
+func (s *StepVector) AppendSampleWithSizeHint(id uint64, val float64, hint int) {
+ if s.SampleIDs == nil || cap(s.SampleIDs) < hint {
+ newSampleIDs := make([]uint64, len(s.SampleIDs), hint)
+ copy(newSampleIDs, s.SampleIDs)
+ s.SampleIDs = newSampleIDs
+
+ newSamples := make([]float64, len(s.Samples), hint)
+ copy(newSamples, s.Samples)
+ s.Samples = newSamples
}
s.SampleIDs = append(s.SampleIDs, id)
s.Samples = append(s.Samples, val)
}
-func (s *StepVector) AppendSamples(pool *VectorPool, ids []uint64, vals []float64) {
+func (s *StepVector) AppendSamples(ids []uint64, vals []float64) {
if len(ids) == 0 && len(vals) == 0 {
return
}
- if s.Samples == nil {
- s.SampleIDs, s.Samples = pool.getSampleBuffers()
- }
s.SampleIDs = append(s.SampleIDs, ids...)
s.Samples = append(s.Samples, vals...)
}
@@ -50,21 +77,31 @@ func (s *StepVector) RemoveSample(index int) {
s.SampleIDs = slices.Delete(s.SampleIDs, index, index+1)
}
-func (s *StepVector) AppendHistogram(pool *VectorPool, histogramID uint64, h *histogram.FloatHistogram) {
- if s.Histograms == nil {
- s.HistogramIDs, s.Histograms = pool.getHistogramBuffers()
+func (s *StepVector) AppendHistogram(histogramID uint64, h *histogram.FloatHistogram) {
+ s.HistogramIDs = append(s.HistogramIDs, histogramID)
+ s.Histograms = append(s.Histograms, h)
+}
+
+// AppendHistogramWithSizeHint appends a histogram and lazily pre-allocates capacity if needed.
+// Use this when you know the expected number of histograms to avoid repeated slice growth.
+func (s *StepVector) AppendHistogramWithSizeHint(histogramID uint64, h *histogram.FloatHistogram, hint int) {
+ if s.HistogramIDs == nil || cap(s.HistogramIDs) < hint {
+ newHistogramIDs := make([]uint64, len(s.HistogramIDs), hint)
+ copy(newHistogramIDs, s.HistogramIDs)
+ s.HistogramIDs = newHistogramIDs
+
+ newHistograms := make([]*histogram.FloatHistogram, len(s.Histograms), hint)
+ copy(newHistograms, s.Histograms)
+ s.Histograms = newHistograms
}
s.HistogramIDs = append(s.HistogramIDs, histogramID)
s.Histograms = append(s.Histograms, h)
}
-func (s *StepVector) AppendHistograms(pool *VectorPool, histogramIDs []uint64, hs []*histogram.FloatHistogram) {
+func (s *StepVector) AppendHistograms(histogramIDs []uint64, hs []*histogram.FloatHistogram) {
if len(histogramIDs) == 0 && len(hs) == 0 {
return
}
- if s.Histograms == nil {
- s.HistogramIDs, s.Histograms = pool.getHistogramBuffers()
- }
s.HistogramIDs = append(s.HistogramIDs, histogramIDs...)
s.Histograms = append(s.Histograms, hs...)
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/noop/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/noop/operator.go
index 8f10b4b7823..5b4f03dc352 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/noop/operator.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/noop/operator.go
@@ -19,10 +19,13 @@ type operator struct {
func NewOperator(opts *query.Options) model.VectorOperator {
scanner := prometheus.NewVectorSelector(
- model.NewVectorPool(0),
noopSelector{},
opts,
- 0, 0, false, 0, 1,
+ 0, // offset
+ 0, // batchSize
+ false, // selectTimestamp
+ 0, // shard
+ 1, // numShards
)
return &operator{VectorOperator: scanner}
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go b/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go
index 7e869c3b73e..fa71bef077c 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/remote/operator.go
@@ -32,7 +32,7 @@ type Execution struct {
vectorSelector model.VectorOperator
}
-func NewExecution(query promql.Query, pool *model.VectorPool, queryRangeStart, queryRangeEnd time.Time, engineLabels []labels.Labels, opts *query.Options, _ storage.SelectHints) model.VectorOperator {
+func NewExecution(query promql.Query, queryRangeStart, queryRangeEnd time.Time, engineLabels []labels.Labels, opts *query.Options, _ storage.SelectHints) model.VectorOperator {
storage := newStorageFromQuery(query, opts, engineLabels)
oper := &Execution{
storage: storage,
@@ -40,7 +40,7 @@ func NewExecution(query promql.Query, pool *model.VectorPool, queryRangeStart, q
opts: opts,
queryRangeStart: queryRangeStart,
queryRangeEnd: queryRangeEnd,
- vectorSelector: promstorage.NewVectorSelector(pool, storage, opts, 0, 0, false, 0, 1),
+ vectorSelector: promstorage.NewVectorSelector(storage, opts, 0, 0, false, 0, 1),
}
return telemetry.NewOperator(telemetry.NewTelemetry(oper, opts), oper)
@@ -58,19 +58,15 @@ func (e *Execution) String() string {
return fmt.Sprintf("[remoteExec] %s (%d, %d)", e.query, e.queryRangeStart.Unix(), e.queryRangeEnd.Unix())
}
-func (e *Execution) Next(ctx context.Context) ([]model.StepVector, error) {
- next, err := e.vectorSelector.Next(ctx)
- if next == nil {
+func (e *Execution) Next(ctx context.Context, buf []model.StepVector) (int, error) {
+ n, err := e.vectorSelector.Next(ctx, buf)
+ if n == 0 {
// Closing the storage prematurely can lead to results from the query
// engine to be recycled. Because of this, we close the storage only
// when we are done with processing all samples returned by the query.
e.storage.Close()
}
- return next, err
-}
-
-func (e *Execution) GetPool() *model.VectorPool {
- return e.vectorSelector.GetPool()
+ return n, err
}
func (e *Execution) Explain() (next []model.VectorOperator) {
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go
index f3387dc8797..7b8aba70b9f 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/literal_selector.go
@@ -17,8 +17,6 @@ import (
// numberLiteralSelector returns []model.StepVector with same sample value across time range.
type numberLiteralSelector struct {
- vectorPool *model.VectorPool
-
numSteps int
mint int64
maxt int64
@@ -30,10 +28,9 @@ type numberLiteralSelector struct {
val float64
}
-func NewNumberLiteralSelector(pool *model.VectorPool, opts *query.Options, val float64) model.VectorOperator {
+func NewNumberLiteralSelector(opts *query.Options, val float64) model.VectorOperator {
oper := &numberLiteralSelector{
- vectorPool: pool,
- numSteps: opts.NumSteps(),
+ numSteps: opts.NumStepsPerBatch(),
mint: opts.Start.UnixMilli(),
maxt: opts.End.UnixMilli(),
step: opts.Step.Milliseconds(),
@@ -57,31 +54,27 @@ func (o *numberLiteralSelector) Series(context.Context) ([]labels.Labels, error)
return o.series, nil
}
-func (o *numberLiteralSelector) GetPool() *model.VectorPool {
- return o.vectorPool
-}
-
-func (o *numberLiteralSelector) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *numberLiteralSelector) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if o.currentStep > o.maxt {
- return nil, nil
+ return 0, nil
}
o.loadSeries()
ts := o.currentStep
- vectors := o.vectorPool.GetVectorBatch()
- for currStep := 0; currStep < o.numSteps && ts <= o.maxt; currStep++ {
- stepVector := o.vectorPool.GetStepVector(ts)
- stepVector.AppendSample(o.vectorPool, 0, o.val)
- vectors = append(vectors, stepVector)
+ n := 0
+ for n < len(buf) && n < o.numSteps && ts <= o.maxt {
+ buf[n].Reset(ts)
+ buf[n].AppendSample(0, o.val)
ts += o.step
+ n++
}
// For instant queries, set the step to a positive value
@@ -89,15 +82,13 @@ func (o *numberLiteralSelector) Next(ctx context.Context) ([]model.StepVector, e
if o.step == 0 {
o.step = 1
}
- o.currentStep += o.step * int64(o.numSteps)
+ o.currentStep += o.step * int64(n)
- return vectors, nil
+ return n, nil
}
func (o *numberLiteralSelector) loadSeries() {
- // If number literal is included within function, []labels.labels must be initialized.
o.once.Do(func() {
- o.series = make([]labels.Labels, 1)
- o.vectorPool.SetStepSize(len(o.series))
+ o.series = []labels.Labels{{}}
})
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go b/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go
index 8d5c55300a0..90f319572e0 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/scan/subquery.go
@@ -21,23 +21,20 @@ import (
)
type subqueryOperator struct {
+ next model.VectorOperator
+ paramOp model.VectorOperator
+ paramOp2 model.VectorOperator
+ call ringbuffer.FunctionCall
telemetry telemetry.OperatorTelemetry
+ funcExpr *logicalplan.FunctionCall
+ subQuery *logicalplan.Subquery
+ opts *query.Options
- next model.VectorOperator
- paramOp model.VectorOperator
- paramOp2 model.VectorOperator
-
- pool *model.VectorPool
- call ringbuffer.FunctionCall
mint int64
maxt int64
currentStep int64
step int64
stepsBatch int
- opts *query.Options
-
- funcExpr *logicalplan.FunctionCall
- subQuery *logicalplan.Subquery
onceSeries sync.Once
series []labels.Labels
@@ -51,9 +48,13 @@ type subqueryOperator struct {
// double_exponential_smoothing uses two (params, params2) for (sf, tf)
params []float64
params2 []float64
+
+ paramBuf []model.StepVector
+ param2Buf []model.StepVector
+ tempBuf []model.StepVector
}
-func NewSubqueryOperator(pool *model.VectorPool, next, paramOp, paramOp2 model.VectorOperator, opts *query.Options, funcExpr *logicalplan.FunctionCall, subQuery *logicalplan.Subquery) (model.VectorOperator, error) {
+func NewSubqueryOperator(next, paramOp, paramOp2 model.VectorOperator, opts *query.Options, funcExpr *logicalplan.FunctionCall, subQuery *logicalplan.Subquery) (model.VectorOperator, error) {
call, err := ringbuffer.NewRangeVectorFunc(funcExpr.Func.Name)
if err != nil {
return nil, err
@@ -68,7 +69,6 @@ func NewSubqueryOperator(pool *model.VectorPool, next, paramOp, paramOp2 model.V
paramOp: paramOp,
paramOp2: paramOp2,
call: call,
- pool: pool,
funcExpr: funcExpr,
subQuery: subQuery,
opts: opts,
@@ -100,53 +100,51 @@ func (o *subqueryOperator) Explain() (next []model.VectorOperator) {
}
}
-func (o *subqueryOperator) GetPool() *model.VectorPool { return o.pool }
-
-func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *subqueryOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if o.currentStep > o.maxt {
- return nil, nil
+ return 0, nil
}
if err := o.initSeries(ctx); err != nil {
- return nil, err
+ return 0, err
}
if o.paramOp != nil {
- args, err := o.paramOp.Next(ctx)
+ n, err := o.paramOp.Next(ctx, o.paramBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- for i := range args {
+ for i := range n {
o.params[i] = math.NaN()
- if len(args[i].Samples) == 1 {
- o.params[i] = args[i].Samples[0]
+ if len(o.paramBuf[i].Samples) == 1 {
+ o.params[i] = o.paramBuf[i].Samples[0]
}
- o.paramOp.GetPool().PutStepVector(args[i])
+
}
- o.paramOp.GetPool().PutVectors(args)
}
if o.paramOp2 != nil { // double_exponential_smoothing
- args, err := o.paramOp2.Next(ctx)
+ n, err := o.paramOp2.Next(ctx, o.param2Buf)
if err != nil {
- return nil, err
+ return 0, err
}
- for i := range args {
+ for i := range n {
o.params2[i] = math.NaN()
- if len(args[i].Samples) == 1 {
- o.params2[i] = args[i].Samples[0]
+ if len(o.param2Buf[i].Samples) == 1 {
+ o.params2[i] = o.param2Buf[i].Samples[0]
}
- o.paramOp2.GetPool().PutStepVector(args[i])
+
}
- o.paramOp2.GetPool().PutVectors(args)
}
- res := o.pool.GetVectorBatch()
- for i := 0; o.currentStep <= o.maxt && i < o.stepsBatch; i++ {
+ n := 0
+ maxSteps := min(o.stepsBatch, len(buf))
+
+ for i := 0; o.currentStep <= o.maxt && i < maxSteps; i++ {
mint := o.currentStep - o.subQuery.Range.Milliseconds() - o.subQuery.OriginalOffset.Milliseconds() + 1
maxt := o.currentStep - o.subQuery.OriginalOffset.Milliseconds()
for _, b := range o.buffers {
@@ -161,7 +159,7 @@ func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error)
o.lastCollected++
}
if o.lastCollected == len(o.lastVectors)-1 {
- o.next.GetPool().PutVectors(o.lastVectors)
+
o.lastVectors = nil
o.lastCollected = -1
}
@@ -169,13 +167,14 @@ func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error)
ACC:
for len(o.lastVectors) == 0 {
- vectors, err := o.next.Next(ctx)
+ vecN, err := o.next.Next(ctx, o.tempBuf)
if err != nil {
- return nil, err
+ return 0, err
}
- if len(vectors) == 0 {
+ if vecN == 0 {
break ACC
}
+ vectors := o.tempBuf[:vecN]
for j, vector := range vectors {
if vector.T > maxt {
o.lastVectors = vectors
@@ -184,34 +183,35 @@ func (o *subqueryOperator) Next(ctx context.Context) ([]model.StepVector, error)
}
o.collect(vector, mint)
}
- o.next.GetPool().PutVectors(vectors)
+
}
- sv := o.pool.GetStepVector(o.currentStep)
+ buf[n].Reset(o.currentStep)
+ hint := len(o.buffers)
for sampleId, rangeSamples := range o.buffers {
- f, h, ok, err := rangeSamples.Eval(ctx, o.params[i], o.params2[i], math.MinInt64)
+ f, h, ok, _, err := rangeSamples.Eval(ctx, o.params[i], o.params2[i], math.MinInt64)
if err != nil {
- return nil, err
+ return 0, err
}
+ // Note: warnings from subqueries are currently ignored since we don't have metric names here
if ok {
if h != nil {
- sv.AppendHistogram(o.pool, uint64(sampleId), h)
+ buf[n].AppendHistogramWithSizeHint(uint64(sampleId), h, hint)
} else {
- sv.AppendSample(o.pool, uint64(sampleId), f)
+ buf[n].AppendSampleWithSizeHint(uint64(sampleId), f, hint)
}
}
- o.telemetry.IncrementSamplesAtTimestamp(rangeSamples.SampleCount(), sv.T)
+ o.telemetry.IncrementSamplesAtTimestamp(rangeSamples.SampleCount(), buf[n].T)
}
- res = append(res, sv)
+ n++
o.currentStep += o.step
}
- return res, nil
+ return n, nil
}
func (o *subqueryOperator) collect(v model.StepVector, mint int64) {
if v.T < mint {
- o.next.GetPool().PutStepVector(v)
return
}
for i, s := range v.Samples {
@@ -246,7 +246,7 @@ func (o *subqueryOperator) collect(v model.StepVector, mint int64) {
}
buffer.Push(v.T, ringbuffer.Value{H: s})
}
- o.next.GetPool().PutStepVector(v)
+
}
func (o *subqueryOperator) Series(ctx context.Context) ([]labels.Labels, error) {
@@ -259,6 +259,15 @@ func (o *subqueryOperator) Series(ctx context.Context) ([]labels.Labels, error)
func (o *subqueryOperator) initSeries(ctx context.Context) error {
var err error
o.onceSeries.Do(func() {
+
+ o.tempBuf = make([]model.StepVector, o.stepsBatch)
+ if o.paramOp != nil {
+ o.paramBuf = make([]model.StepVector, o.stepsBatch)
+ }
+ if o.paramOp2 != nil {
+ o.param2Buf = make([]model.StepVector, o.stepsBatch)
+ }
+
var series []labels.Labels
series, err = o.next.Series(ctx)
if err != nil {
@@ -278,7 +287,7 @@ func (o *subqueryOperator) initSeries(ctx context.Context) error {
}
o.series[i] = lbls
}
- o.pool.SetStepSize(len(o.series))
+
})
return err
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go b/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go
index 6a1c78f3245..1c80c4b9411 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/step_invariant/step_invariant.go
@@ -17,7 +17,6 @@ import (
)
type stepInvariantOperator struct {
- vectorPool *model.VectorPool
next model.VectorOperator
cacheResult bool
@@ -42,14 +41,12 @@ func (u *stepInvariantOperator) String() string {
}
func NewStepInvariantOperator(
- pool *model.VectorPool,
next model.VectorOperator,
expr logicalplan.Node,
opts *query.Options,
) (model.VectorOperator, error) {
// We set interval to be at least 1.
u := &stepInvariantOperator{
- vectorPool: pool,
next: next,
currentStep: opts.Start.UnixMilli(),
mint: opts.Start.UnixMilli(),
@@ -75,7 +72,6 @@ func (u *stepInvariantOperator) Series(ctx context.Context) ([]labels.Labels, er
var err error
u.seriesOnce.Do(func() {
u.series, err = u.next.Series(ctx)
- u.vectorPool.SetStepSize(len(u.series))
})
if err != nil {
return nil, err
@@ -83,57 +79,56 @@ func (u *stepInvariantOperator) Series(ctx context.Context) ([]labels.Labels, er
return u.series, nil
}
-func (u *stepInvariantOperator) GetPool() *model.VectorPool {
- return u.vectorPool
-}
-
-func (u *stepInvariantOperator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (u *stepInvariantOperator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if u.currentStep > u.maxt {
- return nil, nil
+ return 0, nil
}
if !u.cacheResult {
- return u.next.Next(ctx)
+ return u.next.Next(ctx, buf)
}
if err := u.cacheInputVector(ctx); err != nil {
- return nil, err
+ return 0, err
}
- result := u.vectorPool.GetVectorBatch()
- for i := 0; i < u.stepsBatch && u.currentStep <= u.maxt; i++ {
- outVector := u.vectorPool.GetStepVector(u.currentStep)
- outVector.AppendSamples(u.vectorPool, u.cachedVector.SampleIDs, u.cachedVector.Samples)
- outVector.AppendHistograms(u.vectorPool, u.cachedVector.HistogramIDs, u.cachedVector.Histograms)
- result = append(result, outVector)
+ n := 0
+ maxSteps := min(u.stepsBatch, len(buf))
+
+ for i := 0; i < maxSteps && u.currentStep <= u.maxt; i++ {
+ buf[n].Reset(u.currentStep)
+ buf[n].AppendSamples(u.cachedVector.SampleIDs, u.cachedVector.Samples)
+ buf[n].AppendHistograms(u.cachedVector.HistogramIDs, u.cachedVector.Histograms)
+ n++
u.currentStep += u.step
}
- return result, nil
+ return n, nil
}
func (u *stepInvariantOperator) cacheInputVector(ctx context.Context) error {
var err error
- var in []model.StepVector
u.cacheVectorOnce.Do(func() {
- in, err = u.next.Next(ctx)
- if err != nil {
+ // Create a temporary buffer for reading one vector
+ tempBuf := make([]model.StepVector, 1)
+ n, readErr := u.next.Next(ctx, tempBuf)
+ if readErr != nil {
+ err = readErr
return
}
- defer u.next.GetPool().PutVectors(in)
- if len(in) == 0 || (len(in[0].Samples) == 0 && len(in[0].Histograms) == 0) {
+ if n == 0 || (len(tempBuf[0].Samples) == 0 && len(tempBuf[0].Histograms) == 0) {
return
}
// Make sure we only have exactly one step vector.
- if len(in) != 1 {
+ if n != 1 {
err = errors.New("unexpected number of samples")
return
}
@@ -141,10 +136,9 @@ func (u *stepInvariantOperator) cacheInputVector(ctx context.Context) error {
// Copy the evaluated step vector.
// The timestamp of the vector is not relevant since we will produce
// new output vectors with the current step's timestamp.
- u.cachedVector = u.vectorPool.GetStepVector(0)
- u.cachedVector.AppendSamples(u.vectorPool, in[0].SampleIDs, in[0].Samples)
- u.cachedVector.AppendHistograms(u.vectorPool, in[0].HistogramIDs, in[0].Histograms)
- u.next.GetPool().PutStepVector(in[0])
+ u.cachedVector = model.StepVector{T: 0}
+ u.cachedVector.AppendSamples(tempBuf[0].SampleIDs, tempBuf[0].Samples)
+ u.cachedVector.AppendHistograms(tempBuf[0].HistogramIDs, tempBuf[0].Histograms)
})
return err
}
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/telemetry/telemetry.go b/vendor/github.com/thanos-io/promql-engine/execution/telemetry/telemetry.go
index ca486eb2e49..10cb1c8b850 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/telemetry/telemetry.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/telemetry/telemetry.go
@@ -203,7 +203,7 @@ func (t *Operator) Series(ctx context.Context) ([]labels.Labels, error) {
return s, err
}
-func (t *Operator) Next(ctx context.Context) ([]model.StepVector, error) {
+func (t *Operator) Next(ctx context.Context, buf []model.StepVector) (int, error) {
start := time.Now()
var totalSamplesBeforeCount int64
totalSamplesBefore := t.OperatorTelemetry.Samples()
@@ -214,9 +214,9 @@ func (t *Operator) Next(ctx context.Context) ([]model.StepVector, error) {
}
defer func() { t.OperatorTelemetry.AddNextExecutionTime(time.Since(start)) }()
- out, err := t.inner.Next(ctx)
+ n, err := t.inner.Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
var totalSamplesAfter int64
@@ -229,11 +229,7 @@ func (t *Operator) Next(ctx context.Context) ([]model.StepVector, error) {
t.OperatorTelemetry.UpdatePeak(int(totalSamplesAfter) - int(totalSamplesBeforeCount))
- return out, err
-}
-
-func (t *Operator) GetPool() *model.VectorPool {
- return t.inner.GetPool()
+ return n, err
}
func (t *Operator) Explain() []model.VectorOperator {
diff --git a/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go b/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go
index 646b076a63a..29e9736448f 100644
--- a/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go
+++ b/vendor/github.com/thanos-io/promql-engine/execution/unary/unary.go
@@ -64,29 +64,25 @@ func (u *unaryNegation) loadSeries(ctx context.Context) error {
return err
}
-func (u *unaryNegation) GetPool() *model.VectorPool {
- return u.next.GetPool()
-}
-
-func (u *unaryNegation) Next(ctx context.Context) ([]model.StepVector, error) {
+func (u *unaryNegation) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
- in, err := u.next.Next(ctx)
+ n, err := u.next.Next(ctx, buf)
if err != nil {
- return nil, err
+ return 0, err
}
- if in == nil {
- return nil, nil
+ if n == 0 {
+ return 0, nil
}
- for i := range in {
- floats.Scale(-1, in[i].Samples)
- negateHistograms(in[i].Histograms)
+ for i := range n {
+ floats.Scale(-1, buf[i].Samples)
+ negateHistograms(buf[i].Histograms)
}
- return in, nil
+ return n, nil
}
func negateHistograms(hists []*histogram.FloatHistogram) {
diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go
index 446ce5d9a03..42ee5f8b254 100644
--- a/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go
+++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/distribute.go
@@ -579,7 +579,7 @@ func numSteps(start, end time.Time, step time.Duration) int64 {
// down since each engine's top 10 won't overlap with other engines' top 10.
func preservesPartitionLabels(expr Node, partitionLabels map[string]struct{}) bool {
if len(partitionLabels) == 0 {
- return true
+ return false
}
switch e := expr.(type) {
diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/histogram_stats.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/histogram_stats.go
index f5ba8fbcc11..806925c3e8e 100644
--- a/vendor/github.com/thanos-io/promql-engine/logicalplan/histogram_stats.go
+++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/histogram_stats.go
@@ -24,6 +24,13 @@ func (d DetectHistogramStatsOptimizer) optimize(plan Node, decodeStats bool) (No
switch n := (*node).(type) {
case *VectorSelector:
n.DecodeNativeHistogramStats = decodeStats
+ case *Subquery:
+ // Do not propagate decodeStats through subqueries.
+ // Subqueries may apply functions like increase/rate that need
+ // full histogram bucket data for proper counter reset detection.
+ n.Expr, _ = d.optimize(n.Expr, false)
+ stop = true
+ return
case *FunctionCall:
switch n.Func.Name {
case "histogram_count", "histogram_sum", "histogram_avg":
diff --git a/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go b/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go
index 6525837ea2f..6f19f21fdee 100644
--- a/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go
+++ b/vendor/github.com/thanos-io/promql-engine/logicalplan/user_defined.go
@@ -17,7 +17,6 @@ type UserDefinedExpr interface {
Node
MakeExecutionOperator(
ctx context.Context,
- vectors *model.VectorPool,
opts *query.Options,
hints storage.SelectHints,
) (model.VectorOperator, error)
diff --git a/vendor/github.com/thanos-io/promql-engine/query/options.go b/vendor/github.com/thanos-io/promql-engine/query/options.go
index 6bd01f6658b..72b45755681 100644
--- a/vendor/github.com/thanos-io/promql-engine/query/options.go
+++ b/vendor/github.com/thanos-io/promql-engine/query/options.go
@@ -20,19 +20,6 @@ type Options struct {
DecodingConcurrency int
}
-func (o *Options) NumSteps() int {
- // Instant evaluation is executed as a range evaluation with one step.
- if o.Step.Milliseconds() == 0 {
- return 1
- }
-
- totalSteps := (o.End.UnixMilli()-o.Start.UnixMilli())/o.Step.Milliseconds() + 1
- if int64(o.StepsBatch) < totalSteps {
- return o.StepsBatch
- }
- return int(totalSteps)
-}
-
// TotalSteps returns the total number of steps in the query, regardless of batching.
// This is useful for pre-allocating result slices.
func (o *Options) TotalSteps() int {
@@ -43,8 +30,16 @@ func (o *Options) TotalSteps() int {
return int((o.End.UnixMilli()-o.Start.UnixMilli())/o.Step.Milliseconds() + 1)
}
+func (o *Options) NumStepsPerBatch() int {
+ totalSteps := o.TotalSteps()
+ if o.StepsBatch < totalSteps {
+ return o.StepsBatch
+ }
+ return totalSteps
+}
+
func (o *Options) IsInstantQuery() bool {
- return o.NumSteps() == 1
+ return o.TotalSteps() == 1
}
func (o *Options) WithEndTime(end time.Time) *Options {
diff --git a/vendor/github.com/thanos-io/promql-engine/ringbuffer/functions.go b/vendor/github.com/thanos-io/promql-engine/ringbuffer/functions.go
index 09299a452c7..bbf6c054a1b 100644
--- a/vendor/github.com/thanos-io/promql-engine/ringbuffer/functions.go
+++ b/vendor/github.com/thanos-io/promql-engine/ringbuffer/functions.go
@@ -4,7 +4,6 @@
package ringbuffer
import (
- "context"
"math"
"sort"
@@ -12,15 +11,13 @@ import (
"github.com/thanos-io/promql-engine/execution/parse"
"github.com/thanos-io/promql-engine/warnings"
+ "github.com/efficientgo/core/errors"
"github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/promql/parser/posrange"
- "github.com/prometheus/prometheus/util/annotations"
)
type SamplesBuffer GenericRingBuffer
type FunctionArgs struct {
- ctx context.Context
Samples []Sample
StepTime int64
SelectRange int64
@@ -32,15 +29,16 @@ type FunctionArgs struct {
ScalarPoint2 float64 // only for double_exponential_smoothing (trend factor)
}
-type FunctionCall func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error)
+type FunctionCall func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error)
-func instantValue(ctx context.Context, samples []Sample, isRate bool) (float64, *histogram.FloatHistogram, bool) {
+func instantValue(samples []Sample, isRate bool) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
var (
- ss = make([]Sample, 0, 2)
+ ss = make([]Sample, 0, 2)
+ warn warnings.Warnings
)
if len(samples) < 2 {
- return 0, nil, false
+ return 0, nil, false, 0, nil
}
for i := range samples {
@@ -89,7 +87,7 @@ func instantValue(ctx context.Context, samples []Sample, isRate bool) (float64,
sampledInterval := ss[1].T - ss[0].T
if sampledInterval == 0 {
// Avoid dividing by 0.
- return 0, nil, false
+ return 0, nil, false, 0, nil
}
resultSample := ss[1]
@@ -106,18 +104,25 @@ func instantValue(ctx context.Context, samples []Sample, isRate bool) (float64,
resultSample.V.H = ss[1].V.H.Copy()
// irate should only be applied to counters.
if isRate && (ss[1].V.H.CounterResetHint == histogram.GaugeType || ss[0].V.H.CounterResetHint == histogram.GaugeType) {
- warnings.AddToContext(annotations.NewNativeHistogramNotCounterWarning("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnNotCounter
}
// idelta should only be applied to gauges.
if !isRate && (ss[1].V.H.CounterResetHint != histogram.GaugeType || ss[0].V.H.CounterResetHint != histogram.GaugeType) {
- warnings.AddToContext(annotations.NewNativeHistogramNotGaugeWarning("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnNotGauge
}
if !isRate || !ss[1].V.H.DetectReset(ss[0].V.H) {
- _, err := resultSample.V.H.Sub(ss[0].V.H)
+ _, _, nhcbBoundsReconciled, err := resultSample.V.H.Sub(ss[0].V.H)
if err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return 0, nil, false
+ // Convert incompatible schema error to warning
+ if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
+ warn |= warnings.WarnMixedExponentialCustomBuckets
+ return 0, nil, false, warn, nil
+ }
+ return 0, nil, false, warn, err
+ }
+ if nhcbBoundsReconciled {
+ warn |= warnings.WarnNHCBBoundsReconciled
}
}
@@ -125,9 +130,7 @@ func instantValue(ctx context.Context, samples []Sample, isRate bool) (float64,
resultSample.V.H.Compact(0)
default:
// Mix of a float and a histogram.
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
-
- return 0, nil, false
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
if isRate {
@@ -139,91 +142,102 @@ func instantValue(ctx context.Context, samples []Sample, isRate bool) (float64,
}
}
- return resultSample.V.F, resultSample.V.H, true
+ return resultSample.V.F, resultSample.V.H, true, warn, nil
}
var rangeVectorFuncs = map[string]FunctionCall{
- "sum_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "sum_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return sumOverTime(f.ctx, f.Samples)
+ return sumOverTime(f.Samples)
},
- "avg_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "avg_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return avgOverTime(f.ctx, f.Samples)
+ return avgOverTime(f.Samples)
},
- "mad_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "mad_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- val, ok := madOverTime(f.ctx, f.Samples)
- return val, nil, ok, nil
+ val, ok, warn := madOverTime(f.Samples)
+ return val, nil, ok, warn, nil
},
- "max_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "max_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- v, _, ok := maxOverTime(f.ctx, f.Samples)
- return v, nil, ok, nil
+ v, _, ok, warn := maxOverTime(f.Samples)
+ return v, nil, ok, warn, nil
},
- "min_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "min_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- v, _, ok := minOverTime(f.ctx, f.Samples)
- return v, nil, ok, nil
+ v, _, ok, warn := minOverTime(f.Samples)
+ return v, nil, ok, warn, nil
},
- "ts_of_max_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "ts_of_max_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- _, t, ok := maxOverTime(f.ctx, f.Samples)
- return float64(t) / 1000, nil, ok, nil
+ _, t, ok, warn := maxOverTime(f.Samples)
+ return float64(t) / 1000, nil, ok, warn, nil
},
- "ts_of_min_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "ts_of_min_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- _, t, ok := minOverTime(f.ctx, f.Samples)
- return float64(t) / 1000, nil, ok, nil
+ _, t, ok, warn := minOverTime(f.Samples)
+ return float64(t) / 1000, nil, ok, warn, nil
},
- "ts_of_last_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "ts_of_last_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
var t int64
for _, s := range f.Samples {
t = max(t, s.T)
}
- return float64(t) / 1000, nil, true, nil
+ return float64(t) / 1000, nil, true, 0, nil
},
- "stddev_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "ts_of_first_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- v, ok := stddevOverTime(f.ctx, f.Samples)
- return v, nil, ok, nil
+
+ t := f.Samples[0].T
+ for _, s := range f.Samples[1:] {
+ t = min(t, s.T)
+ }
+ return float64(t) / 1000, nil, true, 0, nil
},
- "stdvar_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "stddev_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- v, ok := stdvarOverTime(f.ctx, f.Samples)
- return v, nil, ok, nil
+ v, ok, warn := stddevOverTime(f.Samples)
+ return v, nil, ok, warn, nil
},
- "count_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "stdvar_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return countOverTime(f.Samples), nil, true, nil
+ v, ok, warn := stdvarOverTime(f.Samples)
+ return v, nil, ok, warn, nil
},
- "last_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "count_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
+ }
+ return countOverTime(f.Samples), nil, true, 0, nil
+ },
+ "last_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ if len(f.Samples) == 0 {
+ return 0., nil, false, 0, nil
}
var fi, hi int = -1, -1
@@ -236,33 +250,67 @@ var rangeVectorFuncs = map[string]FunctionCall{
}
if hi == -1 {
- return f.Samples[len(f.Samples)-1].V.F, nil, true, nil
+ return f.Samples[len(f.Samples)-1].V.F, nil, true, 0, nil
}
if fi == -1 {
- return 0, f.Samples[hi].V.H.Copy(), true, nil
+ return 0, f.Samples[hi].V.H.Copy(), true, 0, nil
}
if f.Samples[hi].T > f.Samples[fi].T {
- return 0, f.Samples[hi].V.H.Copy(), true, nil
+ return 0, f.Samples[hi].V.H.Copy(), true, 0, nil
+ }
+ return f.Samples[fi].V.F, nil, true, 0, nil
+ },
+ "first_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ if len(f.Samples) == 0 {
+ return 0., nil, false, 0, nil
+ }
+
+ var fi, hi int = -1, -1
+ var ft, ht int64 = math.MaxInt64, math.MaxInt64
+ for i, s := range f.Samples {
+ if s.V.H != nil {
+ if s.T < ht {
+ ht = s.T
+ hi = i
+ }
+ } else {
+ if s.T < ft {
+ ft = s.T
+ fi = i
+ }
+ }
+ }
+
+ if hi == -1 {
+ return f.Samples[fi].V.F, nil, true, 0, nil
+ }
+ if fi == -1 {
+ return 0, f.Samples[hi].V.H.Copy(), true, 0, nil
+ }
+
+ if ht < ft {
+ return 0, f.Samples[hi].V.H.Copy(), true, 0, nil
}
- return f.Samples[fi].V.F, nil, true, nil
+ return f.Samples[fi].V.F, nil, true, 0, nil
},
- "present_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "present_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return 1., nil, true, nil
+ return 1., nil, true, 0, nil
},
- "quantile_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "quantile_over_time": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
floats := make([]float64, 0, len(f.Samples))
+ var warn warnings.Warnings
for _, sample := range f.Samples {
if sample.V.H != nil {
if len(floats) > 0 {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), f.ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
}
continue
}
@@ -270,110 +318,117 @@ var rangeVectorFuncs = map[string]FunctionCall{
}
if len(floats) == 0 {
- return 0, nil, false, nil
+ return 0, nil, false, warn, nil
}
- return compute.Quantile(f.ScalarPoint, floats), nil, true, nil
+ return compute.Quantile(f.ScalarPoint, floats), nil, true, warn, nil
},
- "changes": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "changes": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return changes(f.Samples), nil, true, nil
+ return changes(f.Samples), nil, true, 0, nil
},
- "resets": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "resets": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return resets(f.Samples), nil, true, nil
+ return resets(f.Samples), nil, true, 0, nil
},
- "deriv": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "deriv": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) < 2 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- v, ok := deriv(f.ctx, f.Samples)
- return v, nil, ok, nil
+ v, ok, warn := deriv(f.Samples)
+ return v, nil, ok, warn, nil
},
- "irate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
- v, fh, ok := instantValue(f.ctx, f.Samples, true)
+ "irate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ v, fh, ok, warn, err := instantValue(f.Samples, true)
+ if err != nil {
+ return 0., nil, false, warn, err
+ }
if !ok {
- return 0., nil, false, nil
+ return 0., nil, false, warn, nil
}
- return v, fh, true, nil
+ return v, fh, true, warn, nil
},
- "idelta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
- v, fh, ok := instantValue(f.ctx, f.Samples, false)
+ "idelta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ v, fh, ok, warn, err := instantValue(f.Samples, false)
+ if err != nil {
+ return 0., nil, false, warn, err
+ }
if !ok {
- return 0., nil, false, nil
+ return 0., nil, false, warn, nil
}
- return v, fh, true, nil
+ return v, fh, true, warn, nil
},
- "rate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "rate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) < 2 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return extrapolatedRate(f.ctx, f.Samples, len(f.Samples), true, true, f.StepTime, f.SelectRange, f.Offset)
+ return extrapolatedRate(f.Samples, len(f.Samples), true, true, f.StepTime, f.SelectRange, f.Offset)
},
- "delta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "delta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) < 2 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return extrapolatedRate(f.ctx, f.Samples, len(f.Samples), false, false, f.StepTime, f.SelectRange, f.Offset)
+ return extrapolatedRate(f.Samples, len(f.Samples), false, false, f.StepTime, f.SelectRange, f.Offset)
},
- "increase": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "increase": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) < 2 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
- return extrapolatedRate(f.ctx, f.Samples, len(f.Samples), true, false, f.StepTime, f.SelectRange, f.Offset)
+ return extrapolatedRate(f.Samples, len(f.Samples), true, false, f.StepTime, f.SelectRange, f.Offset)
},
- "xrate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "xrate": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
if f.MetricAppearedTs == math.MinInt64 {
panic("BUG: we got some Samples but metric still hasn't appeared")
}
- v, h := extendedRate(f.ctx, f.Samples, true, true, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
- return v, h, true, nil
+ v, h := extendedRate(f.Samples, true, true, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
+ return v, h, true, 0, nil
},
- "xdelta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "xdelta": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
if f.MetricAppearedTs == math.MinInt64 {
panic("BUG: we got some Samples but metric still hasn't appeared")
}
- v, h := extendedRate(f.ctx, f.Samples, false, false, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
- return v, h, true, nil
+ v, h := extendedRate(f.Samples, false, false, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
+ return v, h, true, 0, nil
},
- "xincrease": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "xincrease": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if len(f.Samples) == 0 {
- return 0., nil, false, nil
+ return 0., nil, false, 0, nil
}
if f.MetricAppearedTs == math.MinInt64 {
panic("BUG: we got some Samples but metric still hasn't appeared")
}
- v, h := extendedRate(f.ctx, f.Samples, true, false, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
- return v, h, true, nil
+ v, h := extendedRate(f.Samples, true, false, f.StepTime, f.SelectRange, f.Offset, f.MetricAppearedTs)
+ return v, h, true, 0, nil
},
- "predict_linear": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
- v, ok := predictLinear(f.ctx, f.Samples, f.ScalarPoint, f.StepTime)
- return v, nil, ok, nil
+ "predict_linear": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ v, ok, warn := predictLinear(f.Samples, f.ScalarPoint, f.StepTime)
+ return v, nil, ok, warn, nil
},
- "double_exponential_smoothing": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, error) {
+ "double_exponential_smoothing": func(f FunctionArgs) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
floats, numHistograms := filterFloatOnlySamples(f.Samples)
+ var warn warnings.Warnings
if numHistograms > 0 && len(floats) > 0 {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), f.ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
}
if len(floats) < 2 {
- return 0, nil, false, nil
+ return 0, nil, false, warn, nil
}
sf := f.ScalarPoint // smoothing factor or alpha
tf := f.ScalarPoint2 // trend factor argument or beta
v, ok := doubleExponentialSmoothing(floats, sf, tf)
- return v, nil, ok, nil
+ return v, nil, ok, warn, nil
},
}
@@ -389,7 +444,7 @@ func NewRangeVectorFunc(name string) (FunctionCall, error) {
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall.
-func extrapolatedRate(ctx context.Context, samples []Sample, numSamples int, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (f float64, h *histogram.FloatHistogram, ok bool, err error) {
+func extrapolatedRate(samples []Sample, numSamples int, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64) (f float64, h *histogram.FloatHistogram, ok bool, warn warnings.Warnings, err error) {
var (
rangeStart = stepTime - (selectRange + offset)
rangeEnd = stepTime - offset
@@ -403,12 +458,15 @@ func extrapolatedRate(ctx context.Context, samples []Sample, numSamples int, isC
fd = fd || s.V.H == nil
}
if fd && hd {
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
if samples[0].V.H != nil {
- resultHistogram = histogramRate(ctx, samples, isCounter)
+ var err error
+ resultHistogram, warn, err = histogramRate(samples, isCounter)
+ if err != nil {
+ return 0, nil, false, warn, err
+ }
} else {
resultValue = samples[len(samples)-1].V.F - samples[0].V.F
if isCounter {
@@ -487,17 +545,17 @@ func extrapolatedRate(ctx context.Context, samples []Sample, numSamples int, isC
if samples[0].V.H != nil && resultHistogram == nil {
// to prevent appending sample with 0
- return 0, nil, false, nil
+ return 0, nil, false, warn, nil
}
- return resultValue, resultHistogram, true, nil
+ return resultValue, resultHistogram, true, warn, nil
}
// extendedRate is a utility function for xrate/xincrease/xdelta.
// It calculates the rate (allowing for counter resets if isCounter is true),
// taking into account the last sample before the range start, and returns
// the result as either per-second (if isRate is true) or overall.
-func extendedRate(ctx context.Context, samples []Sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) {
+func extendedRate(samples []Sample, isCounter, isRate bool, stepTime int64, selectRange int64, offset int64, metricAppearedTs int64) (float64, *histogram.FloatHistogram) {
var (
rangeStart = stepTime - (selectRange + offset)
rangeEnd = stepTime - offset
@@ -507,7 +565,7 @@ func extendedRate(ctx context.Context, samples []Sample, isCounter, isRate bool,
if samples[0].V.H != nil {
// TODO - support extended rate for histograms
- resultHistogram = histogramRate(ctx, samples, isCounter)
+ resultHistogram, _, _ = histogramRate(samples, isCounter)
return resultValue, resultHistogram
}
@@ -584,27 +642,26 @@ func extendedRate(ctx context.Context, samples []Sample, isCounter, isRate bool,
// histogramRate is a helper function for extrapolatedRate. It requires
// points[0] to be a histogram. It returns nil if any other Point in points is
// not a histogram.
-func histogramRate(ctx context.Context, points []Sample, isCounter bool) *histogram.FloatHistogram {
+func histogramRate(points []Sample, isCounter bool) (*histogram.FloatHistogram, warnings.Warnings, error) {
// Calculating a rate on a single sample is not defined.
if len(points) < 2 {
- return nil
+ return nil, 0, nil
}
var (
prev = points[0].V.H
usingCustomBuckets = prev.UsesCustomBuckets()
last = points[len(points)-1].V.H
+ warn warnings.Warnings
)
-
if last == nil {
- warnings.AddToContext(annotations.MixedFloatsHistogramsWarning, ctx)
- return nil // Range contains a mix of histograms and floats.
+ return nil, warnings.WarnMixedFloatsHistograms, nil // Range contains a mix of histograms and floats.
}
// We check for gauge type histograms in the loop below, but the loop
// below does not run on the first and last point, so check the first
// and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
- warnings.AddToContext(annotations.NewNativeHistogramNotCounterWarning("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnNotCounter
}
// Null out the 1st sample if there is a counter reset between the 1st
@@ -621,15 +678,13 @@ func histogramRate(ctx context.Context, points []Sample, isCounter bool) *histog
}
if last.UsesCustomBuckets() != usingCustomBuckets {
- warnings.AddToContext(annotations.NewMixedExponentialCustomHistogramsWarning("", posrange.PositionRange{}), ctx)
- return nil
+ return nil, warnings.WarnMixedExponentialCustomBuckets, nil
}
minSchema := min(last.Schema, prev.Schema)
if last.UsesCustomBuckets() != usingCustomBuckets {
- warnings.AddToContext(annotations.MixedExponentialCustomHistogramsWarning, ctx)
- return nil
+ return nil, warnings.WarnMixedExponentialCustomBuckets, nil
}
// https://github.com/prometheus/prometheus/blob/ccea61c7bf1e6bce2196ba8189a209945a204c5b/promql/functions.go#L183
@@ -640,28 +695,27 @@ func histogramRate(ctx context.Context, points []Sample, isCounter bool) *histog
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.V.H
if curr == nil {
- warnings.AddToContext(annotations.MixedFloatsHistogramsWarning, ctx)
- return nil // Range contains a mix of histograms and floats.
+ return nil, warn | warnings.WarnMixedFloatsHistograms, nil // Range contains a mix of histograms and floats.
}
if !isCounter {
continue
}
if curr.CounterResetHint == histogram.GaugeType {
- warnings.AddToContext(annotations.NativeHistogramNotCounterWarning, ctx)
+ warn |= warnings.WarnNotCounter
}
if curr.Schema < minSchema {
minSchema = curr.Schema
}
if curr.UsesCustomBuckets() != usingCustomBuckets {
- warnings.AddToContext(annotations.MixedExponentialCustomHistogramsWarning, ctx)
- return nil
+ return nil, warn | warnings.WarnMixedExponentialCustomBuckets, nil
}
}
h := last.CopyToSchema(minSchema)
- if _, err := h.Sub(prev); err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return nil
+ if _, _, nhcbBoundsReconciled, err := h.Sub(prev); err != nil {
+ return nil, warn, err
+ } else if nhcbBoundsReconciled {
+ warn |= warnings.WarnNHCBBoundsReconciled
}
if isCounter {
@@ -669,28 +723,28 @@ func histogramRate(ctx context.Context, points []Sample, isCounter bool) *histog
for _, currPoint := range points[1:] {
curr := currPoint.V.H
if curr.DetectReset(prev) {
- if _, err := h.Add(prev); err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return nil
+ if _, _, _, err := h.Add(prev); err != nil {
+ return nil, warn, err
}
}
prev = curr
}
} else if points[0].V.H.CounterResetHint != histogram.GaugeType || points[len(points)-1].V.H.CounterResetHint != histogram.GaugeType {
- warnings.AddToContext(annotations.NativeHistogramNotGaugeWarning, ctx)
+ warn |= warnings.WarnNotGauge
}
h.CounterResetHint = histogram.GaugeType
- return h.Compact(0)
+ return h.Compact(0), warn, nil
}
-func madOverTime(ctx context.Context, points []Sample) (float64, bool) {
+func madOverTime(points []Sample) (float64, bool, warnings.Warnings) {
values := make([]float64, 0, len(points))
var floatsDetected bool
+ var warn warnings.Warnings
for _, f := range points {
if f.V.H != nil {
if floatsDetected {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
}
continue
} else {
@@ -702,7 +756,7 @@ func madOverTime(ctx context.Context, points []Sample) (float64, bool) {
sort.Float64s(values)
if len(values) == 0 {
- return 0, false
+ return 0, false, warn
}
median := compute.Quantile(0.5, values)
@@ -711,19 +765,17 @@ func madOverTime(ctx context.Context, points []Sample) (float64, bool) {
}
sort.Float64s(values)
- return compute.Quantile(0.5, values), true
+ return compute.Quantile(0.5, values), true, warn
}
-func maxOverTime(ctx context.Context, points []Sample) (float64, int64, bool) {
+func maxOverTime(points []Sample) (float64, int64, bool, warnings.Warnings) {
resv := points[0].V.F
rest := points[0].T
- var foundFloat bool
+ var foundFloat, foundHist bool
for _, v := range points {
if v.V.H != nil {
- if foundFloat {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
- }
+ foundHist = true
} else {
foundFloat = true
}
@@ -734,21 +786,23 @@ func maxOverTime(ctx context.Context, points []Sample) (float64, int64, bool) {
}
if !foundFloat {
- return 0, 0, false
+ return 0, 0, false, 0
+ }
+ var warn warnings.Warnings
+ if foundHist {
+ warn = warnings.WarnHistogramIgnoredInMixedRange
}
- return resv, rest, true
+ return resv, rest, true, warn
}
-func minOverTime(ctx context.Context, points []Sample) (float64, int64, bool) {
+func minOverTime(points []Sample) (float64, int64, bool, warnings.Warnings) {
resv := points[0].V.F
rest := points[0].T
- var foundFloat bool
+ var foundFloat, foundHist bool
for _, v := range points {
if v.V.H != nil {
- if foundFloat {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
- }
+ foundHist = true
} else {
foundFloat = true
}
@@ -759,38 +813,39 @@ func minOverTime(ctx context.Context, points []Sample) (float64, int64, bool) {
}
if !foundFloat {
- return 0, 0, false
+ return 0, 0, false, 0
}
- return resv, rest, true
+ var warn warnings.Warnings
+ if foundHist {
+ warn = warnings.WarnHistogramIgnoredInMixedRange
+ }
+ return resv, rest, true, warn
}
func countOverTime(points []Sample) float64 {
return float64(len(points))
}
-func avgOverTime(ctx context.Context, points []Sample) (float64, *histogram.FloatHistogram, bool, error) {
+func avgOverTime(points []Sample) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
// we sniffed a histogram average
if points[0].V.H != nil {
mean := points[0].V.H.Copy()
for i, sample := range points[1:] {
if sample.V.H == nil {
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
count := float64(i + 2)
left := sample.V.H.Copy().Div(count)
right := mean.Copy().Div(count)
- toAdd, err := left.Sub(right)
+ toAdd, _, _, err := left.Sub(right)
if err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, 0, err
}
- if _, err = mean.Add(toAdd); err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return 0, nil, false, nil
+ if _, _, _, err = mean.Add(toAdd); err != nil {
+ return 0, nil, false, 0, err
}
}
- return 0, mean, true, nil
+ return 0, mean, true, 0, nil
}
// we sniffed a float average
@@ -802,8 +857,7 @@ func avgOverTime(ctx context.Context, points []Sample) (float64, *histogram.Floa
)
for i, p := range points[1:] {
if p.V.H != nil {
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
count = float64(i + 2)
if !incrementalMean {
@@ -824,55 +878,53 @@ func avgOverTime(ctx context.Context, points []Sample) (float64, *histogram.Floa
mean, kahanC = compute.KahanSumInc(p.V.F/count, q*mean, q*kahanC)
}
if incrementalMean {
- return mean + kahanC, nil, true, nil
+ return mean + kahanC, nil, true, 0, nil
}
- return sum/count + kahanC/count, nil, true, nil
+ return sum/count + kahanC/count, nil, true, 0, nil
}
-func sumOverTime(ctx context.Context, points []Sample) (float64, *histogram.FloatHistogram, bool, error) {
+func sumOverTime(points []Sample) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
// we sniffed a histogram sum
if points[0].V.H != nil {
res := points[0].V.H.Copy()
for _, v := range points[1:] {
if v.V.H == nil {
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
- if _, err := res.Add(v.V.H); err != nil {
- warnings.AddToContext(warnings.ConvertHistogramError(err), ctx)
- return 0, nil, false, nil
+ if _, _, _, err := res.Add(v.V.H); err != nil {
+ return 0, nil, false, 0, err
}
}
- return 0, res, true, nil
+ return 0, res, true, 0, nil
}
// we sniffed a float sum
res, c := points[0].V.F, 0.
for _, v := range points[1:] {
if v.V.H != nil {
- warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning("", posrange.PositionRange{}), ctx)
- return 0, nil, false, nil
+ return 0, nil, false, warnings.WarnMixedFloatsHistograms, nil
}
res, c = compute.KahanSumInc(v.V.F, res, c)
}
if math.IsInf(res, 0) {
- return res, nil, true, nil
+ return res, nil, true, 0, nil
}
- return res + c, nil, true, nil
+ return res + c, nil, true, 0, nil
}
-func stddevOverTime(ctx context.Context, points []Sample) (float64, bool) {
+func stddevOverTime(points []Sample) (float64, bool, warnings.Warnings) {
var count float64
var mean, cMean float64
var aux, cAux float64
var foundFloat bool
+ var warn warnings.Warnings
for _, v := range points {
if v.V.H == nil {
foundFloat = true
} else if foundFloat && v.V.H != nil {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
continue
}
count++
@@ -882,22 +934,23 @@ func stddevOverTime(ctx context.Context, points []Sample) (float64, bool) {
}
if !foundFloat {
- return 0, false
+ return 0, false, warn
}
- return math.Sqrt((aux + cAux) / count), true
+ return math.Sqrt((aux + cAux) / count), true, warn
}
-func stdvarOverTime(ctx context.Context, points []Sample) (float64, bool) {
+func stdvarOverTime(points []Sample) (float64, bool, warnings.Warnings) {
var count float64
var mean, cMean float64
var aux, cAux float64
var foundFloat bool
+ var warn warnings.Warnings
for _, v := range points {
if v.V.H == nil {
foundFloat = true
} else if foundFloat && v.V.H != nil {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
continue
}
count++
@@ -907,9 +960,9 @@ func stdvarOverTime(ctx context.Context, points []Sample) (float64, bool) {
}
if !foundFloat {
- return 0, false
+ return 0, false, warn
}
- return ((aux + cAux) / count), true
+ return ((aux + cAux) / count), true, warn
}
func changes(points []Sample) float64 {
@@ -934,8 +987,9 @@ func changes(points []Sample) float64 {
return count
}
-func deriv(ctx context.Context, points []Sample) (float64, bool) {
+func deriv(points []Sample) (float64, bool, warnings.Warnings) {
var floats int
+ var warn warnings.Warnings
for _, p := range points {
if p.V.H == nil {
@@ -943,12 +997,12 @@ func deriv(ctx context.Context, points []Sample) (float64, bool) {
}
if floats > 0 && p.V.H != nil {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
}
}
if floats < 2 {
- return 0, false
+ return 0, false, warn
}
fp := make([]Sample, 0, floats)
@@ -962,11 +1016,12 @@ func deriv(ctx context.Context, points []Sample) (float64, bool) {
// https://github.com/prometheus/prometheus/issues/2674
slope, _ := linearRegression(fp, fp[0].T)
- return slope, true
+ return slope, true, warn
}
-func predictLinear(ctx context.Context, points []Sample, duration float64, stepTime int64) (float64, bool) {
+func predictLinear(points []Sample, duration float64, stepTime int64) (float64, bool, warnings.Warnings) {
var floats int
+ var warn warnings.Warnings
for _, p := range points {
if p.V.H == nil {
@@ -974,12 +1029,12 @@ func predictLinear(ctx context.Context, points []Sample, duration float64, stepT
}
if floats > 0 && p.V.H != nil {
- warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo("", posrange.PositionRange{}), ctx)
+ warn |= warnings.WarnHistogramIgnoredInMixedRange
}
}
if floats < 2 {
- return 0, false
+ return 0, false, warn
}
fp := make([]Sample, 0, floats)
@@ -989,7 +1044,7 @@ func predictLinear(ctx context.Context, points []Sample, duration float64, stepT
}
}
slope, intercept := linearRegression(fp, stepTime)
- return slope*duration + intercept, true
+ return slope*duration + intercept, true, warn
}
// Based on https://github.com/prometheus/prometheus/blob/8baad1a73e471bd3cf3175a1608199e27484f179/promql/functions.go#L438
diff --git a/vendor/github.com/thanos-io/promql-engine/ringbuffer/generic.go b/vendor/github.com/thanos-io/promql-engine/ringbuffer/generic.go
index 20315587657..5357f823739 100644
--- a/vendor/github.com/thanos-io/promql-engine/ringbuffer/generic.go
+++ b/vendor/github.com/thanos-io/promql-engine/ringbuffer/generic.go
@@ -8,6 +8,7 @@ import (
"math"
"github.com/thanos-io/promql-engine/execution/telemetry"
+ "github.com/thanos-io/promql-engine/warnings"
"github.com/prometheus/prometheus/model/histogram"
)
@@ -16,7 +17,7 @@ type Buffer interface {
MaxT() int64
Push(t int64, v Value)
Reset(mint int64, evalt int64)
- Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, error)
+ Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error)
SampleCount() int
// to handle extlookback properly, only used by buffers that implement xincrease or xrate
@@ -132,9 +133,8 @@ func (r *GenericRingBuffer) Reset(mint int64, evalt int64) {
r.items = r.items[:keep]
}
-func (r *GenericRingBuffer) Eval(ctx context.Context, scalarArg float64, scalarArg2 float64, metricAppearedTs int64) (float64, *histogram.FloatHistogram, bool, error) {
+func (r *GenericRingBuffer) Eval(ctx context.Context, scalarArg float64, scalarArg2 float64, metricAppearedTs int64) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
return r.call(FunctionArgs{
- ctx: ctx,
Samples: r.items,
StepTime: r.currentStep,
SelectRange: r.selectRange,
diff --git a/vendor/github.com/thanos-io/promql-engine/ringbuffer/overtime.go b/vendor/github.com/thanos-io/promql-engine/ringbuffer/overtime.go
index 10a5f9f4ded..baf09db0236 100644
--- a/vendor/github.com/thanos-io/promql-engine/ringbuffer/overtime.go
+++ b/vendor/github.com/thanos-io/promql-engine/ringbuffer/overtime.go
@@ -13,7 +13,6 @@ import (
"github.com/thanos-io/promql-engine/warnings"
"github.com/prometheus/prometheus/model/histogram"
- "github.com/prometheus/prometheus/util/annotations"
)
// If we use $__interval as steps and $__rate_interval for the sliding window
@@ -189,26 +188,35 @@ func (r *OverTimeBuffer) Reset(mint int64, evalt int64) {
func (r *OverTimeBuffer) ReadIntoLast(func(*Sample)) {}
-func (r *OverTimeBuffer) Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, error) {
+func (r *OverTimeBuffer) Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
+ var warn warnings.Warnings
+
if r.stepStates[0].warn != nil {
- warnings.AddToContext(r.stepStates[0].warn, ctx)
+ return 0, nil, false, warn, r.stepStates[0].warn
}
if r.firstTimestamps[0] == math.MaxInt64 {
- return 0, nil, false, nil
+ return 0, nil, false, warn, nil
}
acc := r.stepStates[0].acc
f, h := acc.Value()
+ // Include accumulator warnings (mixed types, ignored histograms)
+ accWarn := acc.Warnings()
+
if acc.ValueType() == compute.MixedTypeValue {
- warnings.AddToContext(annotations.MixedFloatsHistogramsWarning, ctx)
- return 0, nil, false, nil
+ warn |= accWarn
+ return 0, nil, false, warn, nil
}
- // Float-only accumulators track skipped histograms; emit info-level warning
- if acc.HasIgnoredHistograms() && acc.ValueType() == compute.SingleTypeValue {
- warnings.AddToContext(annotations.HistogramIgnoredInMixedRangeInfo, ctx)
+ // For _over_time functions returning a float value, translate WarnHistogramIgnoredInAggregation
+ // to WarnHistogramIgnoredInMixedRange (which indicates histograms were ignored in a mixed range).
+ // Only do this when we actually have a float result, not when returning no value.
+ if acc.ValueType() == compute.SingleTypeValue && h == nil && accWarn&warnings.WarnHistogramIgnoredInAggregation != 0 {
+ accWarn = (accWarn &^ warnings.WarnHistogramIgnoredInAggregation) | warnings.WarnHistogramIgnoredInMixedRange
}
- return f, h, acc.ValueType() == compute.SingleTypeValue, nil
+ warn |= accWarn
+
+ return f, h, acc.ValueType() == compute.SingleTypeValue, warn, nil
}
diff --git a/vendor/github.com/thanos-io/promql-engine/ringbuffer/rate.go b/vendor/github.com/thanos-io/promql-engine/ringbuffer/rate.go
index c1612eb9563..a3f021d9992 100644
--- a/vendor/github.com/thanos-io/promql-engine/ringbuffer/rate.go
+++ b/vendor/github.com/thanos-io/promql-engine/ringbuffer/rate.go
@@ -10,6 +10,7 @@ import (
"github.com/thanos-io/promql-engine/execution/telemetry"
"github.com/thanos-io/promql-engine/query"
+ "github.com/thanos-io/promql-engine/warnings"
"github.com/prometheus/prometheus/model/histogram"
)
@@ -177,9 +178,9 @@ func (r *RateBuffer) Reset(mint int64, evalt int64) {
r.firstSamples[lastSample].T = math.MaxInt64
}
-func (r *RateBuffer) Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, error) {
+func (r *RateBuffer) Eval(ctx context.Context, _, _ float64, _ int64) (float64, *histogram.FloatHistogram, bool, warnings.Warnings, error) {
if r.firstSamples[0].T == math.MaxInt64 || r.firstSamples[0].T == r.lastSample.T {
- return 0, nil, false, nil
+ return 0, nil, false, 0, nil
}
r.rateBuffer = append(append(
@@ -189,7 +190,7 @@ func (r *RateBuffer) Eval(ctx context.Context, _, _ float64, _ int64) (float64,
)
r.rateBuffer = slices.CompactFunc(r.rateBuffer, func(s1 Sample, s2 Sample) bool { return s1.T == s2.T })
numSamples := r.stepRanges[0].numSamples
- return extrapolatedRate(ctx, r.rateBuffer, numSamples, r.isCounter, r.isRate, r.evalTs, r.selectRange, r.offset)
+ return extrapolatedRate(r.rateBuffer, numSamples, r.isCounter, r.isRate, r.evalTs, r.selectRange, r.offset)
}
func (r *RateBuffer) ReadIntoLast(func(*Sample)) {}
diff --git a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/matrix_selector.go b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/matrix_selector.go
index aecf7921ce0..9b9951fb162 100644
--- a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/matrix_selector.go
+++ b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/matrix_selector.go
@@ -29,8 +29,9 @@ import (
)
type matrixScanner struct {
- labels labels.Labels
- signature uint64
+ labels labels.Labels
+ metricName string
+ signature uint64
buffer ringbuffer.Buffer
iterator chunkenc.Iterator
@@ -41,7 +42,6 @@ type matrixScanner struct {
type matrixSelector struct {
telemetry telemetry.OperatorTelemetry
- vectorPool *model.VectorPool
storage SeriesSelector
scalarArg float64
scalarArg2 float64
@@ -80,7 +80,6 @@ var ErrNativeHistogramsNotSupported = errors.New("native histograms are not supp
// NewMatrixSelector creates operator which selects vector of series over time.
func NewMatrixSelector(
- pool *model.VectorPool,
selector SeriesSelector,
functionName string,
arg float64,
@@ -98,13 +97,12 @@ func NewMatrixSelector(
storage: selector,
call: call,
functionName: functionName,
- vectorPool: pool,
scalarArg: arg,
scalarArg2: arg2,
fhReader: &histogram.FloatHistogram{},
opts: opts,
- numSteps: opts.NumSteps(),
+ numSteps: opts.NumStepsPerBatch(),
mint: opts.Start.UnixMilli(),
maxt: opts.End.UnixMilli(),
step: opts.Step.Milliseconds(),
@@ -142,14 +140,10 @@ func (o *matrixSelector) Series(ctx context.Context) ([]labels.Labels, error) {
return o.series, nil
}
-func (o *matrixSelector) GetPool() *model.VectorPool {
- return o.vectorPool
-}
-
-func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *matrixSelector) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
@@ -158,16 +152,27 @@ func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) {
warnings.AddToContext(annotations.NewPossibleNonCounterInfo(o.nonCounterMetric, posrange.PositionRange{}), ctx)
}
- return nil, nil
+ return 0, nil
}
if err := o.loadSeries(ctx); err != nil {
- return nil, err
+ return 0, err
}
ts := o.currentStep
- vectors := o.vectorPool.GetVectorBatch()
- for currStep := 0; currStep < o.numSteps && ts <= o.maxt; currStep++ {
- vectors = append(vectors, o.vectorPool.GetStepVector(ts))
+ n := 0
+ maxSteps := min(o.numSteps, len(buf))
+
+ // Calculate expected samples per step: the actual number of series we'll process this batch.
+ // This is min(seriesBatchSize, remaining series to process).
+ remainingSeries := int64(len(o.scanners)) - o.currentSeries
+ expectedSamples := int(min(o.seriesBatchSize, remainingSeries))
+ if expectedSamples <= 0 {
+ expectedSamples = len(o.scanners)
+ }
+
+ for currStep := 0; currStep < maxSteps && ts <= o.maxt; currStep++ {
+ buf[n].Reset(ts)
+ n++
ts += o.step
}
@@ -180,27 +185,32 @@ func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) {
seriesTs = ts
)
- for currStep := 0; currStep < o.numSteps && seriesTs <= o.maxt; currStep++ {
+ for currStep := 0; currStep < n && seriesTs <= o.maxt; currStep++ {
maxt := seriesTs - o.offset
mint := maxt - o.selectRange
if err := scanner.selectPoints(mint, maxt, seriesTs, o.fhReader, o.isExtFunction); err != nil {
- return nil, err
+ return 0, err
}
// TODO(saswatamcode): Handle multi-arg functions for matrixSelectors.
// Also, allow operator to exist independently without being nested
// under parser.Call by implementing new data model.
// https://github.com/thanos-io/promql-engine/issues/39
- f, h, ok, err := scanner.buffer.Eval(ctx, o.scalarArg, o.scalarArg2, scanner.metricAppearedTs)
+ f, h, ok, warn, err := scanner.buffer.Eval(ctx, o.scalarArg, o.scalarArg2, scanner.metricAppearedTs)
if err != nil {
- return nil, err
+ return 0, err
+ }
+ if warn != 0 {
+ emitRingbufferWarnings(ctx, warn, scanner.metricName)
}
if ok {
- vectors[currStep].T = seriesTs
+ buf[currStep].T = seriesTs
if h != nil {
- vectors[currStep].AppendHistogram(o.vectorPool, scanner.signature, h)
+ // Lazy pre-allocate histogram slices only when we actually have histograms
+ buf[currStep].AppendHistogramWithSizeHint(scanner.signature, h, expectedSamples)
} else {
- vectors[currStep].AppendSample(o.vectorPool, scanner.signature, f)
+ // Lazy pre-allocate sample slices with capacity hint
+ buf[currStep].AppendSampleWithSizeHint(scanner.signature, f, expectedSamples)
o.hasFloats = true
}
}
@@ -209,10 +219,10 @@ func (o *matrixSelector) Next(ctx context.Context) ([]model.StepVector, error) {
}
}
if o.currentSeries == int64(len(o.scanners)) {
- o.currentStep += o.step * int64(o.numSteps)
+ o.currentStep += o.step * int64(n)
o.currentSeries = 0
}
- return vectors, nil
+ return n, nil
}
func (o *matrixSelector) loadSeries(ctx context.Context) error {
@@ -229,17 +239,14 @@ func (o *matrixSelector) loadSeries(ctx context.Context) error {
var b labels.ScratchBuilder
for i, s := range series {
- lbls := s.Labels()
- if o.functionName != "last_over_time" {
- // This modifies the array in place. Because labels.Labels
- // can be re-used between different Select() calls, it means that
- // we have to copy it here.
- // TODO(GiedriusS): could we identify somehow whether labels.Labels
- // is reused between Select() calls?
+ origLbls := s.Labels()
+ lbls := origLbls
+ if o.functionName != "last_over_time" && o.functionName != "first_over_time" {
lbls = extlabels.DropReserved(lbls, b)
}
o.scanners[i] = matrixScanner{
labels: lbls,
+ metricName: origLbls.Get(labels.MetricName),
signature: s.Signature,
iterator: s.Iterator(nil),
lastSample: ringbuffer.Sample{T: math.MinInt64},
@@ -252,7 +259,6 @@ func (o *matrixSelector) loadSeries(ctx context.Context) error {
if o.seriesBatchSize == 0 || numSeries < o.seriesBatchSize {
o.seriesBatchSize = numSeries
}
- o.vectorPool.SetStepSize(int(o.seriesBatchSize))
// Add a warning if rate or increase is applied on metrics which are not named like counters.
if o.functionName == "rate" || o.functionName == "increase" {
@@ -398,3 +404,31 @@ func (m *matrixScanner) selectPoints(
}
return m.iterator.Err()
}
+
+// emitRingbufferWarnings converts warnings.Warnings flags to proper annotations with metric names.
+func emitRingbufferWarnings(ctx context.Context, warn warnings.Warnings, metricName string) {
+ if warn&warnings.WarnNotCounter != 0 {
+ warnings.AddToContext(annotations.NewNativeHistogramNotCounterWarning(metricName, posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnNotGauge != 0 {
+ warnings.AddToContext(annotations.NewNativeHistogramNotGaugeWarning(metricName, posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnMixedFloatsHistograms != 0 {
+ warnings.AddToContext(annotations.NewMixedFloatsHistogramsWarning(metricName, posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnMixedExponentialCustomBuckets != 0 {
+ warnings.AddToContext(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnHistogramIgnoredInMixedRange != 0 {
+ warnings.AddToContext(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, posrange.PositionRange{}), ctx)
+ }
+ if warn&warnings.WarnCounterResetCollision != 0 {
+ warnings.AddToContext(annotations.NewHistogramCounterResetCollisionWarning(posrange.PositionRange{}, annotations.HistogramAgg), ctx)
+ }
+ if warn&warnings.WarnNHCBBoundsReconciled != 0 {
+ warnings.AddToContext(annotations.NewMismatchedCustomBucketsHistogramsInfo(posrange.PositionRange{}, annotations.HistogramSub), ctx)
+ }
+ if warn&warnings.WarnNHCBBoundsReconciledAgg != 0 {
+ warnings.AddToContext(annotations.NewMismatchedCustomBucketsHistogramsInfo(posrange.PositionRange{}, annotations.HistogramAgg), ctx)
+ }
+}
diff --git a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/scanners.go b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/scanners.go
index dc7888f5710..bac38431cd0 100644
--- a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/scanners.go
+++ b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/scanners.go
@@ -68,7 +68,6 @@ func (p Scanners) NewVectorSelector(
for i := range opts.DecodingConcurrency {
operator := exchange.NewConcurrent(
NewVectorSelector(
- model.NewVectorPool(opts.StepsBatch),
selector,
opts,
logicalNode.Offset,
@@ -80,7 +79,7 @@ func (p Scanners) NewVectorSelector(
operators = append(operators, operator)
}
- return exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, logicalNode.BatchSize*int64(opts.DecodingConcurrency), operators...), nil
+ return exchange.NewCoalesce(opts, logicalNode.BatchSize*int64(opts.DecodingConcurrency), operators...), nil
}
func (p Scanners) NewMatrixSelector(
@@ -140,7 +139,6 @@ func (p Scanners) NewMatrixSelector(
operators := make([]model.VectorOperator, 0, opts.DecodingConcurrency)
for i := range opts.DecodingConcurrency {
operator, err := NewMatrixSelector(
- model.NewVectorPool(opts.StepsBatch),
selector,
call.Func.Name,
arg,
@@ -158,7 +156,7 @@ func (p Scanners) NewMatrixSelector(
operators = append(operators, exchange.NewConcurrent(operator, 2, opts))
}
- return exchange.NewCoalesce(model.NewVectorPool(opts.StepsBatch), opts, vs.BatchSize*int64(opts.DecodingConcurrency), operators...), nil
+ return exchange.NewCoalesce(opts, vs.BatchSize*int64(opts.DecodingConcurrency), operators...), nil
}
type histogramStatsSelector struct {
diff --git a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/vector_selector.go b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/vector_selector.go
index 725616ab0ee..ff7e11933e1 100644
--- a/vendor/github.com/thanos-io/promql-engine/storage/prometheus/vector_selector.go
+++ b/vendor/github.com/thanos-io/promql-engine/storage/prometheus/vector_selector.go
@@ -35,8 +35,7 @@ type vectorSelector struct {
scanners []vectorScanner
series []labels.Labels
- once sync.Once
- vectorPool *model.VectorPool
+ once sync.Once
numSteps int
mint int64
@@ -57,7 +56,6 @@ type vectorSelector struct {
// NewVectorSelector creates operator which selects vector of series.
func NewVectorSelector(
- pool *model.VectorPool,
selector SeriesSelector,
queryOpts *query.Options,
offset time.Duration,
@@ -66,8 +64,7 @@ func NewVectorSelector(
shard, numShards int,
) model.VectorOperator {
o := &vectorSelector{
- storage: selector,
- vectorPool: pool,
+ storage: selector,
mint: queryOpts.Start.UnixMilli(),
maxt: queryOpts.End.UnixMilli(),
@@ -75,7 +72,7 @@ func NewVectorSelector(
currentStep: queryOpts.Start.UnixMilli(),
lookbackDelta: queryOpts.LookbackDelta.Milliseconds(),
offset: offset.Milliseconds(),
- numSteps: queryOpts.NumSteps(),
+ numSteps: queryOpts.NumStepsPerBatch(),
seriesBatchSize: batchSize,
shard: shard,
@@ -109,28 +106,35 @@ func (o *vectorSelector) Series(ctx context.Context) ([]labels.Labels, error) {
return o.series, nil
}
-func (o *vectorSelector) GetPool() *model.VectorPool {
- return o.vectorPool
-}
-
-func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) {
+func (o *vectorSelector) Next(ctx context.Context, buf []model.StepVector) (int, error) {
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return 0, ctx.Err()
default:
}
if o.currentStep > o.maxt {
- return nil, nil
+ return 0, nil
}
if err := o.loadSeries(ctx); err != nil {
- return nil, err
+ return 0, err
}
ts := o.currentStep
- vectors := o.vectorPool.GetVectorBatch()
- for currStep := 0; currStep < o.numSteps && ts <= o.maxt; currStep++ {
- vectors = append(vectors, o.vectorPool.GetStepVector(ts))
+ n := 0
+ maxSteps := min(o.numSteps, len(buf))
+
+ // Calculate expected samples per step: the actual number of series we'll process this batch.
+ // This is min(seriesBatchSize, remaining series to process).
+ remainingSeries := int64(len(o.scanners)) - o.currentSeries
+ expectedSamples := int(min(o.seriesBatchSize, remainingSeries))
+ if expectedSamples <= 0 {
+ expectedSamples = len(o.scanners)
+ }
+
+ for currStep := 0; currStep < maxSteps && ts <= o.maxt; currStep++ {
+ buf[n].Reset(ts)
+ n++
ts += o.step
}
@@ -144,21 +148,23 @@ func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) {
series = o.scanners[o.currentSeries]
seriesTs = ts
)
- for currStep := 0; currStep < o.numSteps && seriesTs <= o.maxt; currStep++ {
+ for currStep := 0; currStep < n && seriesTs <= o.maxt; currStep++ {
currStepSamples = 0
t, v, h, ok, err := selectPoint(series.samples, seriesTs, o.lookbackDelta, o.offset)
if err != nil {
- return nil, err
+ return 0, err
}
if o.selectTimestamp {
v = float64(t) / 1000
}
if ok {
if h != nil && !o.selectTimestamp {
- vectors[currStep].AppendHistogram(o.vectorPool, series.signature, h)
+ // Lazy pre-allocate histogram slices only when we actually have histograms
+ buf[currStep].AppendHistogramWithSizeHint(series.signature, h, expectedSamples)
currStepSamples += telemetry.CalculateHistogramSampleCount(h)
} else {
- vectors[currStep].AppendSample(o.vectorPool, series.signature, v)
+ // Lazy pre-allocate sample slices with capacity hint
+ buf[currStep].AppendSampleWithSizeHint(series.signature, v, expectedSamples)
currStepSamples++
}
}
@@ -168,10 +174,10 @@ func (o *vectorSelector) Next(ctx context.Context) ([]model.StepVector, error) {
}
if o.currentSeries == int64(len(o.scanners)) {
- o.currentStep += o.step * int64(o.numSteps)
+ o.currentStep += o.step * int64(n)
o.currentSeries = 0
}
- return vectors, nil
+ return n, nil
}
func (o *vectorSelector) loadSeries(ctx context.Context) error {
@@ -207,7 +213,6 @@ func (o *vectorSelector) loadSeries(ctx context.Context) error {
if o.seriesBatchSize == 0 || numSeries < o.seriesBatchSize {
o.seriesBatchSize = numSeries
}
- o.vectorPool.SetStepSize(int(o.seriesBatchSize))
})
return err
}
diff --git a/vendor/github.com/thanos-io/promql-engine/warnings/context.go b/vendor/github.com/thanos-io/promql-engine/warnings/context.go
index 8409f661353..3d9b7c01f9a 100644
--- a/vendor/github.com/thanos-io/promql-engine/warnings/context.go
+++ b/vendor/github.com/thanos-io/promql-engine/warnings/context.go
@@ -21,6 +21,24 @@ import (
//lint:ignore faillint We need fmt.Errorf to match Prometheus error format exactly.
var MixedFloatsHistogramsAggWarning = fmt.Errorf("%w aggregation", annotations.MixedFloatsHistogramsWarning)
+// Warnings is a bitset of warning flags that can be returned by functions
+// to indicate warning conditions. The actual warning messages with metric
+// names are emitted by operators that have access to series labels.
+type Warnings uint32
+
+const (
+ WarnNotCounter Warnings = 1 << iota
+ WarnNotGauge
+ WarnMixedFloatsHistograms
+ WarnMixedExponentialCustomBuckets
+ WarnHistogramIgnoredInMixedRange // for _over_time functions, only when both floats and histograms
+ WarnHistogramIgnoredInAggregation // for aggregations (max, min, stddev, etc.), always when histograms ignored
+ WarnCounterResetCollision
+ WarnNHCBBoundsReconciled // for subtraction operations (rate, irate, delta)
+ WarnNHCBBoundsReconciledAgg // for aggregation operations (sum, avg, sum_over_time, avg_over_time)
+ WarnIncompatibleTypesInBinOp
+)
+
type warningKey string
const key warningKey = "promql-warnings"
@@ -87,8 +105,5 @@ func ConvertHistogramError(err error) error {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
return annotations.MixedExponentialCustomHistogramsWarning
}
- if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
- return annotations.IncompatibleCustomBucketsHistogramsWarning
- }
return err
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/batch.go b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/batch.go
new file mode 100644
index 00000000000..6de6fa012a3
--- /dev/null
+++ b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/batch.go
@@ -0,0 +1,30 @@
+// Copyright (c) The Thanos Authors.
+// Licensed under the Apache License 2.0.
+
+package querypb
+
+import (
+ "github.com/thanos-io/thanos/pkg/store/storepb/prompb"
+)
+
+// NewQueryBatchResponse creates a QueryResponse with a batch of timeseries.
+func NewQueryBatchResponse(series []*prompb.TimeSeries) *QueryResponse {
+ return &QueryResponse{
+ Result: &QueryResponse_TimeseriesBatch{
+ TimeseriesBatch: &TimeSeriesBatch{
+ Series: series,
+ },
+ },
+ }
+}
+
+// NewQueryRangeBatchResponse creates a QueryRangeResponse with a batch of timeseries.
+func NewQueryRangeBatchResponse(series []*prompb.TimeSeries) *QueryRangeResponse {
+ return &QueryRangeResponse{
+ Result: &QueryRangeResponse_TimeseriesBatch{
+ TimeseriesBatch: &TimeSeriesBatch{
+ Series: series,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.pb.go b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.pb.go
index ebeec71d80b..0ceeccc2ffe 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.pb.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.pb.go
@@ -96,6 +96,43 @@ func (m *QueryStats) XXX_DiscardUnknown() {
var xxx_messageInfo_QueryStats proto.InternalMessageInfo
+type TimeSeriesBatch struct {
+ Series []*prompb.TimeSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"`
+}
+
+func (m *TimeSeriesBatch) Reset() { *m = TimeSeriesBatch{} }
+func (m *TimeSeriesBatch) String() string { return proto.CompactTextString(m) }
+func (*TimeSeriesBatch) ProtoMessage() {}
+func (*TimeSeriesBatch) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4b2aba43925d729f, []int{1}
+}
+func (m *TimeSeriesBatch) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TimeSeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TimeSeriesBatch.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TimeSeriesBatch) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeriesBatch.Merge(m, src)
+}
+func (m *TimeSeriesBatch) XXX_Size() int {
+ return m.Size()
+}
+func (m *TimeSeriesBatch) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeriesBatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeriesBatch proto.InternalMessageInfo
+
type QueryRequest struct {
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
QueryPlan *QueryPlan `protobuf:"bytes,14,opt,name=queryPlan,proto3" json:"queryPlan,omitempty"`
@@ -110,13 +147,16 @@ type QueryRequest struct {
ShardInfo *storepb.ShardInfo `protobuf:"bytes,11,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"`
LookbackDeltaSeconds int64 `protobuf:"varint,12,opt,name=lookback_delta_seconds,json=lookbackDeltaSeconds,proto3" json:"lookback_delta_seconds,omitempty"`
Engine EngineType `protobuf:"varint,13,opt,name=engine,proto3,enum=thanos.EngineType" json:"engine,omitempty"`
+ // response_batch_size controls how many TimeSeries are batched per response message.
+ // If set to 0 or 1, each TimeSeries is sent as a separate response (default behavior).
+ ResponseBatchSize int64 `protobuf:"varint,15,opt,name=response_batch_size,json=responseBatchSize,proto3" json:"response_batch_size,omitempty"`
}
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{1}
+ return fileDescriptor_4b2aba43925d729f, []int{2}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -153,7 +193,7 @@ func (m *StoreMatchers) Reset() { *m = StoreMatchers{} }
func (m *StoreMatchers) String() string { return proto.CompactTextString(m) }
func (*StoreMatchers) ProtoMessage() {}
func (*StoreMatchers) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{2}
+ return fileDescriptor_4b2aba43925d729f, []int{3}
}
func (m *StoreMatchers) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -187,6 +227,7 @@ type QueryResponse struct {
// *QueryResponse_Warnings
// *QueryResponse_Timeseries
// *QueryResponse_Stats
+ // *QueryResponse_TimeseriesBatch
Result isQueryResponse_Result `protobuf_oneof:"result"`
}
@@ -194,7 +235,7 @@ func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (m *QueryResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{3}
+ return fileDescriptor_4b2aba43925d729f, []int{4}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -238,10 +279,14 @@ type QueryResponse_Timeseries struct {
type QueryResponse_Stats struct {
Stats *QueryStats `protobuf:"bytes,3,opt,name=stats,proto3,oneof" json:"stats,omitempty"`
}
+type QueryResponse_TimeseriesBatch struct {
+ TimeseriesBatch *TimeSeriesBatch `protobuf:"bytes,4,opt,name=timeseries_batch,json=timeseriesBatch,proto3,oneof" json:"timeseries_batch,omitempty"`
+}
-func (*QueryResponse_Warnings) isQueryResponse_Result() {}
-func (*QueryResponse_Timeseries) isQueryResponse_Result() {}
-func (*QueryResponse_Stats) isQueryResponse_Result() {}
+func (*QueryResponse_Warnings) isQueryResponse_Result() {}
+func (*QueryResponse_Timeseries) isQueryResponse_Result() {}
+func (*QueryResponse_Stats) isQueryResponse_Result() {}
+func (*QueryResponse_TimeseriesBatch) isQueryResponse_Result() {}
func (m *QueryResponse) GetResult() isQueryResponse_Result {
if m != nil {
@@ -271,12 +316,20 @@ func (m *QueryResponse) GetStats() *QueryStats {
return nil
}
+func (m *QueryResponse) GetTimeseriesBatch() *TimeSeriesBatch {
+ if x, ok := m.GetResult().(*QueryResponse_TimeseriesBatch); ok {
+ return x.TimeseriesBatch
+ }
+ return nil
+}
+
// XXX_OneofWrappers is for the internal use of the proto package.
func (*QueryResponse) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*QueryResponse_Warnings)(nil),
(*QueryResponse_Timeseries)(nil),
(*QueryResponse_Stats)(nil),
+ (*QueryResponse_TimeseriesBatch)(nil),
}
}
@@ -290,7 +343,7 @@ func (m *QueryPlan) Reset() { *m = QueryPlan{} }
func (m *QueryPlan) String() string { return proto.CompactTextString(m) }
func (*QueryPlan) ProtoMessage() {}
func (*QueryPlan) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{4}
+ return fileDescriptor_4b2aba43925d729f, []int{5}
}
func (m *QueryPlan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -368,13 +421,16 @@ type QueryRangeRequest struct {
ShardInfo *storepb.ShardInfo `protobuf:"bytes,13,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"`
LookbackDeltaSeconds int64 `protobuf:"varint,14,opt,name=lookback_delta_seconds,json=lookbackDeltaSeconds,proto3" json:"lookback_delta_seconds,omitempty"`
Engine EngineType `protobuf:"varint,15,opt,name=engine,proto3,enum=thanos.EngineType" json:"engine,omitempty"`
+ // response_batch_size controls how many TimeSeries are batched per response message.
+ // If set to 0 or 1, each TimeSeries is sent as a separate response (default behavior).
+ ResponseBatchSize int64 `protobuf:"varint,17,opt,name=response_batch_size,json=responseBatchSize,proto3" json:"response_batch_size,omitempty"`
}
func (m *QueryRangeRequest) Reset() { *m = QueryRangeRequest{} }
func (m *QueryRangeRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRangeRequest) ProtoMessage() {}
func (*QueryRangeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{5}
+ return fileDescriptor_4b2aba43925d729f, []int{6}
}
func (m *QueryRangeRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -408,6 +464,7 @@ type QueryRangeResponse struct {
// *QueryRangeResponse_Warnings
// *QueryRangeResponse_Timeseries
// *QueryRangeResponse_Stats
+ // *QueryRangeResponse_TimeseriesBatch
Result isQueryRangeResponse_Result `protobuf_oneof:"result"`
}
@@ -415,7 +472,7 @@ func (m *QueryRangeResponse) Reset() { *m = QueryRangeResponse{} }
func (m *QueryRangeResponse) String() string { return proto.CompactTextString(m) }
func (*QueryRangeResponse) ProtoMessage() {}
func (*QueryRangeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4b2aba43925d729f, []int{6}
+ return fileDescriptor_4b2aba43925d729f, []int{7}
}
func (m *QueryRangeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -459,10 +516,14 @@ type QueryRangeResponse_Timeseries struct {
type QueryRangeResponse_Stats struct {
Stats *QueryStats `protobuf:"bytes,3,opt,name=stats,proto3,oneof" json:"stats,omitempty"`
}
+type QueryRangeResponse_TimeseriesBatch struct {
+ TimeseriesBatch *TimeSeriesBatch `protobuf:"bytes,4,opt,name=timeseries_batch,json=timeseriesBatch,proto3,oneof" json:"timeseries_batch,omitempty"`
+}
-func (*QueryRangeResponse_Warnings) isQueryRangeResponse_Result() {}
-func (*QueryRangeResponse_Timeseries) isQueryRangeResponse_Result() {}
-func (*QueryRangeResponse_Stats) isQueryRangeResponse_Result() {}
+func (*QueryRangeResponse_Warnings) isQueryRangeResponse_Result() {}
+func (*QueryRangeResponse_Timeseries) isQueryRangeResponse_Result() {}
+func (*QueryRangeResponse_Stats) isQueryRangeResponse_Result() {}
+func (*QueryRangeResponse_TimeseriesBatch) isQueryRangeResponse_Result() {}
func (m *QueryRangeResponse) GetResult() isQueryRangeResponse_Result {
if m != nil {
@@ -492,18 +553,27 @@ func (m *QueryRangeResponse) GetStats() *QueryStats {
return nil
}
+func (m *QueryRangeResponse) GetTimeseriesBatch() *TimeSeriesBatch {
+ if x, ok := m.GetResult().(*QueryRangeResponse_TimeseriesBatch); ok {
+ return x.TimeseriesBatch
+ }
+ return nil
+}
+
// XXX_OneofWrappers is for the internal use of the proto package.
func (*QueryRangeResponse) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*QueryRangeResponse_Warnings)(nil),
(*QueryRangeResponse_Timeseries)(nil),
(*QueryRangeResponse_Stats)(nil),
+ (*QueryRangeResponse_TimeseriesBatch)(nil),
}
}
func init() {
proto.RegisterEnum("thanos.EngineType", EngineType_name, EngineType_value)
proto.RegisterType((*QueryStats)(nil), "thanos.QueryStats")
+ proto.RegisterType((*TimeSeriesBatch)(nil), "thanos.TimeSeriesBatch")
proto.RegisterType((*QueryRequest)(nil), "thanos.QueryRequest")
proto.RegisterType((*StoreMatchers)(nil), "thanos.StoreMatchers")
proto.RegisterType((*QueryResponse)(nil), "thanos.QueryResponse")
@@ -515,60 +585,65 @@ func init() {
func init() { proto.RegisterFile("api/query/querypb/query.proto", fileDescriptor_4b2aba43925d729f) }
var fileDescriptor_4b2aba43925d729f = []byte{
- // 848 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x8e, 0xdb, 0x44,
- 0x1c, 0xb6, 0xbb, 0x49, 0x36, 0xf9, 0x39, 0xc9, 0xba, 0xa3, 0x2c, 0xb8, 0x01, 0x82, 0x09, 0xaa,
- 0x08, 0x2b, 0xd8, 0x54, 0xa1, 0x70, 0x43, 0x82, 0xa5, 0x95, 0x16, 0x54, 0xa4, 0xd6, 0xc9, 0x89,
- 0x8b, 0x35, 0x89, 0x7f, 0x9b, 0x98, 0x38, 0x33, 0xae, 0x67, 0x0c, 0xdd, 0x17, 0xe0, 0xcc, 0x33,
- 0x70, 0xe1, 0x15, 0x78, 0x84, 0x3d, 0x56, 0xe2, 0xc2, 0x09, 0xc1, 0xee, 0x8b, 0x20, 0x8f, 0xff,
- 0xc4, 0xae, 0x56, 0x90, 0xa8, 0x17, 0x2e, 0xf6, 0xcc, 0xf7, 0x7d, 0x33, 0xe3, 0xdf, 0xe7, 0xdf,
- 0x27, 0x1b, 0xde, 0xa1, 0xa1, 0x3f, 0x7e, 0x1e, 0x63, 0x74, 0x99, 0x5e, 0xc3, 0x79, 0x7a, 0x3f,
- 0x0d, 0x23, 0x2e, 0x39, 0x69, 0xc8, 0x15, 0x65, 0x5c, 0xf4, 0x7b, 0x4b, 0xbe, 0xe4, 0x0a, 0x1a,
- 0x27, 0xa3, 0x94, 0xed, 0xdf, 0x13, 0x92, 0x47, 0x38, 0x56, 0xd7, 0x70, 0x3e, 0x96, 0x97, 0x21,
- 0x8a, 0x8c, 0x7a, 0xb3, 0x4a, 0x45, 0xe1, 0x22, 0x23, 0xec, 0x2a, 0x11, 0x46, 0x7c, 0x53, 0x5d,
- 0x3a, 0x9c, 0x01, 0x3c, 0x4b, 0x1e, 0x61, 0x2a, 0xa9, 0x14, 0xe4, 0x7d, 0xe8, 0x08, 0xba, 0x09,
- 0x03, 0x14, 0xae, 0xe4, 0x92, 0x06, 0x96, 0x6e, 0xeb, 0xa3, 0x03, 0xa7, 0x9d, 0x81, 0xb3, 0x04,
- 0x23, 0xef, 0x41, 0x3b, 0x44, 0xba, 0x76, 0x33, 0xd0, 0xba, 0xa3, 0x34, 0x46, 0x82, 0x4d, 0x53,
- 0x68, 0xf8, 0x7b, 0x0d, 0xda, 0x6a, 0x5b, 0x07, 0x9f, 0xc7, 0x28, 0x24, 0xe9, 0x41, 0x5d, 0x55,
- 0xaa, 0x36, 0x6c, 0x39, 0xe9, 0x84, 0x8c, 0xa1, 0xa5, 0x06, 0x4f, 0x03, 0xca, 0xac, 0xae, 0xad,
- 0x8f, 0x8c, 0xc9, 0xdd, 0xd3, 0xd4, 0x84, 0xd3, 0x82, 0x70, 0xb6, 0x9a, 0xe4, 0x68, 0xe9, 0x6f,
- 0xd0, 0x15, 0xb8, 0xe0, 0xcc, 0x2b, 0x8e, 0x4e, 0xb0, 0x69, 0x0a, 0x91, 0x0f, 0xe0, 0x28, 0x99,
- 0xf2, 0x58, 0x16, 0xaa, 0x03, 0xa5, 0xea, 0x66, 0x70, 0x2e, 0x7c, 0x08, 0x6f, 0x6c, 0xe8, 0x0b,
- 0x37, 0x42, 0xc1, 0x83, 0x58, 0xfa, 0x9c, 0x15, 0xfa, 0x9a, 0xd2, 0xf7, 0x36, 0xf4, 0x85, 0x53,
- 0x90, 0xf9, 0xaa, 0xfb, 0xd0, 0x8d, 0x30, 0x0c, 0xfc, 0x05, 0x75, 0x03, 0x3a, 0xc7, 0x40, 0x58,
- 0x75, 0xfb, 0x60, 0xd4, 0x72, 0x3a, 0x19, 0xfa, 0x44, 0x81, 0xe4, 0x4b, 0xe8, 0x28, 0xd3, 0xbf,
- 0xa5, 0x72, 0xb1, 0xc2, 0x48, 0x58, 0x0d, 0xfb, 0x60, 0x64, 0x4c, 0x8e, 0xf3, 0xea, 0xa6, 0x65,
- 0xf2, 0xac, 0x76, 0xf5, 0xe7, 0xbb, 0x9a, 0x53, 0x5d, 0x41, 0x6c, 0x30, 0x90, 0xd1, 0x79, 0x80,
- 0x8f, 0xd0, 0x8b, 0x43, 0xeb, 0xd0, 0xd6, 0x47, 0x4d, 0xa7, 0x0c, 0x91, 0x87, 0x70, 0x9c, 0x4e,
- 0x9f, 0xd2, 0x48, 0xfa, 0x34, 0x70, 0x50, 0x84, 0x9c, 0x09, 0xb4, 0x9a, 0x4a, 0x7b, 0x3b, 0x49,
- 0x06, 0x00, 0x62, 0xed, 0x87, 0x5f, 0xad, 0x62, 0xb6, 0x16, 0x16, 0x28, 0x69, 0x09, 0x21, 0x0f,
- 0x00, 0xc4, 0x8a, 0x46, 0x9e, 0xeb, 0xb3, 0x0b, 0x6e, 0x19, 0xd5, 0xb7, 0x32, 0x4d, 0x98, 0xaf,
- 0xd9, 0x05, 0x77, 0x5a, 0x22, 0x1f, 0x26, 0x4e, 0x06, 0x9c, 0xaf, 0xe7, 0x74, 0xb1, 0x76, 0x3d,
- 0x0c, 0x24, 0x2d, 0x9c, 0x6c, 0xa7, 0x4e, 0xe6, 0xec, 0xa3, 0x84, 0xcc, 0x9d, 0x3c, 0x81, 0x06,
- 0xb2, 0xa5, 0xcf, 0xd0, 0xea, 0xd8, 0xfa, 0xa8, 0x3b, 0x21, 0xf9, 0x19, 0x8f, 0x15, 0x3a, 0xbb,
- 0x0c, 0xd1, 0xc9, 0x14, 0xdf, 0xd4, 0x9a, 0x2d, 0x13, 0x86, 0xcf, 0xa0, 0x53, 0xf1, 0x8d, 0x7c,
- 0x01, 0x1d, 0xf5, 0x12, 0x0a, 0x97, 0x75, 0xe5, 0x72, 0x2f, 0xdf, 0xe9, 0x49, 0x89, 0xcc, 0x4d,
- 0xae, 0x2c, 0x18, 0xfe, 0xa2, 0x43, 0x27, 0x6b, 0xd4, 0xcc, 0x9e, 0xb7, 0xa1, 0xf9, 0x23, 0x8d,
- 0x98, 0xcf, 0x96, 0x22, 0x6d, 0xd6, 0x73, 0xcd, 0x29, 0x10, 0xf2, 0x39, 0x40, 0xd2, 0x46, 0x02,
- 0x23, 0x3f, 0xeb, 0x7c, 0x63, 0xf2, 0x56, 0x12, 0xa5, 0x0d, 0xca, 0x15, 0xc6, 0xc2, 0x5d, 0xf0,
- 0xf0, 0xf2, 0x74, 0xa6, 0xfa, 0x31, 0x91, 0x9c, 0x6b, 0x4e, 0x69, 0x01, 0x39, 0x81, 0xba, 0x48,
- 0x82, 0xa6, 0x5a, 0xd2, 0xd8, 0x96, 0xbc, 0x8d, 0xe0, 0xb9, 0xe6, 0xa4, 0x92, 0xb3, 0x26, 0x34,
- 0x22, 0x14, 0x71, 0x20, 0x87, 0x1f, 0x97, 0x62, 0x42, 0x7a, 0x50, 0xfb, 0x5e, 0x70, 0xa6, 0x9e,
- 0xad, 0x7d, 0xae, 0x39, 0x6a, 0x76, 0x06, 0xd0, 0x44, 0xb6, 0xe0, 0x9e, 0xcf, 0x96, 0xc3, 0xdf,
- 0xea, 0x70, 0x37, 0xad, 0x89, 0xb2, 0x25, 0xee, 0x91, 0x40, 0x73, 0x87, 0x04, 0x7e, 0x04, 0x44,
- 0x48, 0x1a, 0x49, 0xf7, 0x96, 0x1c, 0x9a, 0x8a, 0x99, 0x95, 0xc2, 0x38, 0x02, 0x13, 0x99, 0x57,
- 0xd5, 0x66, 0x69, 0x44, 0xe6, 0x95, 0x95, 0x1f, 0x82, 0xe9, 0x33, 0x89, 0xd1, 0x0f, 0x34, 0x78,
- 0x25, 0x87, 0x47, 0x39, 0xfe, 0x2f, 0x09, 0xaf, 0xef, 0x99, 0xf0, 0xc6, 0x5e, 0x09, 0x3f, 0xdc,
- 0x29, 0xe1, 0xcd, 0xd7, 0x4d, 0x78, 0x6b, 0x8f, 0x84, 0xc3, 0xee, 0x09, 0x6f, 0xff, 0x47, 0xc2,
- 0x3b, 0xaf, 0x95, 0xf0, 0xee, 0x4e, 0x09, 0x3f, 0xda, 0x21, 0xe1, 0x86, 0xd9, 0x1e, 0xfe, 0xaa,
- 0x03, 0x29, 0xb7, 0xee, 0xff, 0x36, 0x93, 0x27, 0x9f, 0x02, 0x6c, 0xab, 0x20, 0x06, 0x1c, 0x7a,
- 0x78, 0x41, 0xe3, 0x40, 0x9a, 0x1a, 0xe9, 0x02, 0x6c, 0x0f, 0x37, 0x75, 0x02, 0x90, 0x7d, 0xd8,
- 0xcd, 0x3b, 0x93, 0x9f, 0x74, 0xa8, 0xab, 0x8d, 0xc9, 0x67, 0xf9, 0xa0, 0x57, 0x39, 0x30, 0x8b,
- 0x6b, 0xff, 0xf8, 0x15, 0x34, 0x75, 0xe2, 0x81, 0x4e, 0x1e, 0x67, 0x1f, 0x6c, 0xe5, 0x10, 0xb9,
- 0x57, 0x95, 0x95, 0x02, 0xdf, 0xef, 0xdf, 0x46, 0xe5, 0xdb, 0x9c, 0xdd, 0xbf, 0xfa, 0x7b, 0xa0,
- 0x5d, 0x5d, 0x0f, 0xf4, 0x97, 0xd7, 0x03, 0xfd, 0xaf, 0xeb, 0x81, 0xfe, 0xf3, 0xcd, 0x40, 0x7b,
- 0x79, 0x33, 0xd0, 0xfe, 0xb8, 0x19, 0x68, 0xdf, 0x1d, 0x66, 0xbf, 0x27, 0xf3, 0x86, 0xfa, 0x4b,
- 0xf8, 0xe4, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x44, 0xb0, 0xce, 0xba, 0x08, 0x00, 0x00,
+ // 926 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xcf, 0x73, 0xdb, 0x44,
+ 0x14, 0x96, 0x92, 0xd8, 0xb1, 0x9f, 0xfc, 0x43, 0x59, 0x1c, 0xaa, 0x1a, 0x30, 0xc6, 0x4c, 0x07,
+ 0x93, 0x01, 0xbb, 0xe3, 0x16, 0x6e, 0xcc, 0x80, 0x49, 0x19, 0xc3, 0x94, 0x99, 0x56, 0xf6, 0x89,
+ 0x8b, 0x66, 0x6d, 0xbf, 0xd8, 0xc2, 0xf2, 0xae, 0xaa, 0x5d, 0x41, 0xd3, 0x2b, 0x33, 0x9c, 0xf9,
+ 0xb3, 0x72, 0xec, 0x91, 0x13, 0x03, 0xc9, 0xa5, 0x17, 0xfe, 0x07, 0x46, 0x2b, 0xc9, 0x96, 0x32,
+ 0xa1, 0x24, 0xe4, 0xd6, 0x8b, 0xbd, 0xfb, 0x7d, 0xef, 0xd9, 0x6f, 0xbf, 0x7d, 0xef, 0x9b, 0x85,
+ 0xf7, 0xa8, 0xef, 0xf6, 0x9f, 0x85, 0x18, 0x9c, 0xc6, 0x9f, 0xfe, 0x34, 0xfe, 0xee, 0xf9, 0x01,
+ 0x97, 0x9c, 0x14, 0xe5, 0x92, 0x32, 0x2e, 0x9a, 0x8d, 0x05, 0x5f, 0x70, 0x05, 0xf5, 0xa3, 0x55,
+ 0xcc, 0x36, 0xef, 0x0a, 0xc9, 0x03, 0xec, 0xab, 0x4f, 0x7f, 0xda, 0x97, 0xa7, 0x3e, 0x8a, 0x84,
+ 0xba, 0x93, 0xa7, 0x02, 0x7f, 0x96, 0x10, 0xed, 0x3c, 0xe1, 0x07, 0x7c, 0x9d, 0x4f, 0xed, 0x4c,
+ 0x00, 0x9e, 0x46, 0x25, 0x8c, 0x25, 0x95, 0x82, 0x7c, 0x08, 0x55, 0x41, 0xd7, 0xbe, 0x87, 0xc2,
+ 0x91, 0x5c, 0x52, 0xcf, 0xd2, 0xdb, 0x7a, 0x77, 0xd7, 0xae, 0x24, 0xe0, 0x24, 0xc2, 0xc8, 0x07,
+ 0x50, 0xf1, 0x91, 0xae, 0x9c, 0x04, 0xb4, 0x76, 0x54, 0x8c, 0x11, 0x61, 0xe3, 0x18, 0xea, 0x7c,
+ 0x03, 0xf5, 0x89, 0xbb, 0xc6, 0x31, 0x06, 0x2e, 0x8a, 0x21, 0x95, 0xb3, 0x25, 0x79, 0x00, 0x45,
+ 0xa1, 0xb6, 0x96, 0xde, 0xde, 0xed, 0x1a, 0x83, 0x77, 0xa2, 0x02, 0xd6, 0x28, 0x97, 0x18, 0x0a,
+ 0x67, 0xc6, 0xfd, 0xd3, 0xde, 0x36, 0xc3, 0x4e, 0x42, 0x3b, 0xbf, 0x14, 0xa0, 0xa2, 0xca, 0xb3,
+ 0xf1, 0x59, 0x88, 0x42, 0x92, 0x06, 0x14, 0x94, 0x62, 0xaa, 0xb0, 0xb2, 0x1d, 0x6f, 0x48, 0x1f,
+ 0xca, 0x6a, 0xf1, 0xc4, 0xa3, 0xcc, 0xaa, 0xb5, 0xf5, 0xae, 0x31, 0x38, 0xe8, 0xc5, 0x62, 0xf6,
+ 0x36, 0x84, 0xbd, 0x8d, 0x89, 0x8e, 0x20, 0xdd, 0x35, 0x3a, 0x02, 0x67, 0x9c, 0xcd, 0x37, 0x47,
+ 0x90, 0xaa, 0x02, 0x05, 0x91, 0x8f, 0xa0, 0x1e, 0x6d, 0x79, 0x28, 0x37, 0x51, 0xbb, 0x2a, 0xaa,
+ 0x96, 0xc0, 0x69, 0xe0, 0x43, 0x78, 0x7b, 0x4d, 0x9f, 0x3b, 0x01, 0x0a, 0xee, 0x85, 0xd2, 0xe5,
+ 0x6c, 0x13, 0xbf, 0xa7, 0xe2, 0x1b, 0x6b, 0xfa, 0xdc, 0xde, 0x90, 0x69, 0xd6, 0x3d, 0xa8, 0x05,
+ 0xe8, 0x7b, 0xee, 0x8c, 0x3a, 0x1e, 0x9d, 0xa2, 0x27, 0xac, 0x42, 0x7b, 0xb7, 0x5b, 0xb6, 0xab,
+ 0x09, 0xfa, 0x58, 0x81, 0xe4, 0x2b, 0xa8, 0xaa, 0xcb, 0xfb, 0x3e, 0xd2, 0x10, 0x03, 0x61, 0x15,
+ 0x95, 0x78, 0x87, 0xe9, 0xe9, 0xc6, 0x59, 0x72, 0xb8, 0x77, 0xf6, 0xc7, 0xfb, 0x9a, 0x9d, 0xcf,
+ 0x20, 0x6d, 0x30, 0x90, 0xd1, 0xa9, 0x87, 0xc7, 0x38, 0x0f, 0x7d, 0x6b, 0xbf, 0xad, 0x77, 0x4b,
+ 0x76, 0x16, 0x22, 0x0f, 0xe1, 0x30, 0xde, 0x3e, 0xa1, 0x81, 0x74, 0xa9, 0x67, 0xa3, 0xf0, 0x39,
+ 0x13, 0x68, 0x95, 0x54, 0xec, 0xd5, 0x24, 0x69, 0x01, 0x88, 0x95, 0xeb, 0x7f, 0xbd, 0x0c, 0xd9,
+ 0x4a, 0x58, 0xa0, 0x42, 0x33, 0x08, 0xb9, 0x0f, 0x20, 0x96, 0x34, 0x98, 0x3b, 0x2e, 0x3b, 0xe1,
+ 0x96, 0x91, 0xbf, 0x95, 0x71, 0xc4, 0x7c, 0xcb, 0x4e, 0xb8, 0x5d, 0x16, 0xe9, 0x32, 0x52, 0xd2,
+ 0xe3, 0x7c, 0x35, 0xa5, 0xb3, 0x95, 0x33, 0x47, 0x4f, 0xd2, 0x8d, 0x92, 0x95, 0x58, 0xc9, 0x94,
+ 0x3d, 0x8e, 0xc8, 0x54, 0xc9, 0x23, 0x28, 0x22, 0x5b, 0xb8, 0x0c, 0xad, 0x6a, 0x5b, 0xef, 0xd6,
+ 0x06, 0x24, 0xfd, 0x8f, 0x47, 0x0a, 0x9d, 0x9c, 0xfa, 0x68, 0x27, 0x11, 0xa4, 0x07, 0x6f, 0x05,
+ 0x49, 0xfd, 0xce, 0x34, 0x12, 0xc8, 0x11, 0xee, 0x0b, 0xb4, 0xea, 0xea, 0xe7, 0x0f, 0x52, 0x4a,
+ 0x35, 0xec, 0xd8, 0x7d, 0x81, 0xdf, 0xed, 0x95, 0xca, 0x26, 0x74, 0x9e, 0x42, 0x35, 0xa7, 0x33,
+ 0xf9, 0x12, 0xaa, 0xea, 0xd2, 0x36, 0xb7, 0x12, 0xb7, 0x74, 0x23, 0xfd, 0xe7, 0xc7, 0x19, 0x32,
+ 0xbd, 0x94, 0x5c, 0x42, 0xe7, 0x95, 0x0e, 0xd5, 0xa4, 0xb1, 0x13, 0x39, 0xdf, 0x85, 0xd2, 0xcf,
+ 0x34, 0x60, 0x2e, 0x5b, 0x88, 0xb8, 0xb9, 0x47, 0x9a, 0xbd, 0x41, 0xc8, 0x17, 0x00, 0x51, 0xdb,
+ 0x25, 0x13, 0xb4, 0xa3, 0xc4, 0x7c, 0xdd, 0x04, 0x8d, 0x34, 0x3b, 0x93, 0x40, 0x8e, 0xa0, 0x20,
+ 0xa2, 0x01, 0x57, 0x2d, 0x6c, 0x6c, 0x25, 0xda, 0x8e, 0xfe, 0x48, 0xb3, 0xe3, 0x10, 0x72, 0x0c,
+ 0xe6, 0x36, 0x33, 0x56, 0x49, 0x75, 0xb2, 0x31, 0xb8, 0x93, 0xa6, 0x5d, 0x9a, 0xed, 0x91, 0x66,
+ 0xd7, 0xb7, 0x29, 0x0a, 0x1a, 0x96, 0xa0, 0x18, 0xa0, 0x08, 0x3d, 0xd9, 0xf9, 0x34, 0x33, 0x9c,
+ 0xa4, 0x01, 0x7b, 0x3f, 0x0a, 0xce, 0xd4, 0x09, 0x2b, 0x23, 0xcd, 0x56, 0xbb, 0x21, 0x40, 0x09,
+ 0xd9, 0x8c, 0xcf, 0x5d, 0xb6, 0xe8, 0xbc, 0x2a, 0xc0, 0x41, 0xac, 0x0c, 0x65, 0x0b, 0xbc, 0xc1,
+ 0xdc, 0x9b, 0xd7, 0x98, 0xfb, 0x4f, 0x80, 0x08, 0x49, 0x03, 0xe9, 0x5c, 0x31, 0xfd, 0xa6, 0x62,
+ 0x26, 0x19, 0x0b, 0xe8, 0x82, 0x89, 0x6c, 0x9e, 0x8f, 0x4d, 0x3c, 0x00, 0xd9, 0x3c, 0x1b, 0xf9,
+ 0x31, 0x98, 0x2e, 0x93, 0x18, 0xfc, 0x44, 0xbd, 0x4b, 0xd3, 0x5f, 0x4f, 0xf1, 0xd7, 0xf8, 0x4a,
+ 0xe1, 0x86, 0xbe, 0x52, 0xbc, 0x91, 0xaf, 0xec, 0x5f, 0xcb, 0x57, 0x4a, 0xb7, 0xf5, 0x95, 0xf2,
+ 0x0d, 0x7c, 0x05, 0xae, 0xef, 0x2b, 0x95, 0xff, 0xf0, 0x95, 0xea, 0xad, 0x7c, 0xa5, 0x76, 0x2d,
+ 0x5f, 0xa9, 0xff, 0x5f, 0x5f, 0x39, 0xf8, 0x77, 0x5f, 0x31, 0xcc, 0x4a, 0xe7, 0x6f, 0x1d, 0x48,
+ 0xb6, 0xd5, 0xdf, 0x70, 0x27, 0x38, 0xfa, 0x0c, 0x60, 0xab, 0x1d, 0x31, 0x60, 0x7f, 0x8e, 0x27,
+ 0x34, 0xf4, 0xa4, 0xa9, 0x91, 0x1a, 0xc0, 0xf6, 0x08, 0xa6, 0x4e, 0x00, 0x92, 0xc7, 0x90, 0xb9,
+ 0x33, 0xf8, 0x55, 0x87, 0x82, 0x2a, 0x8f, 0x7c, 0x9e, 0x2e, 0x1a, 0xb9, 0xb2, 0x13, 0x93, 0x68,
+ 0x1e, 0x5e, 0x42, 0x63, 0x3d, 0xef, 0xeb, 0xe4, 0x51, 0xf2, 0xc8, 0x51, 0x3a, 0x93, 0xbb, 0xf9,
+ 0xb0, 0x8c, 0xcd, 0x34, 0x9b, 0x57, 0x51, 0xe9, 0xcf, 0x0c, 0xef, 0x9d, 0xfd, 0xd5, 0xd2, 0xce,
+ 0xce, 0x5b, 0xfa, 0xcb, 0xf3, 0x96, 0xfe, 0xe7, 0x79, 0x4b, 0xff, 0xed, 0xa2, 0xa5, 0xbd, 0xbc,
+ 0x68, 0x69, 0xbf, 0x5f, 0xb4, 0xb4, 0x1f, 0xf6, 0x93, 0x27, 0xdd, 0xb4, 0xa8, 0x5e, 0x56, 0x0f,
+ 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xdd, 0xe8, 0xcf, 0xee, 0x09, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -774,6 +849,43 @@ func (m *QueryStats) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *TimeSeriesBatch) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimeSeriesBatch) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimeSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Series[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *QueryRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -794,6 +906,11 @@ func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.ResponseBatchSize != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.ResponseBatchSize))
+ i--
+ dAtA[i] = 0x78
+ }
if m.QueryPlan != nil {
{
size, err := m.QueryPlan.MarshalToSizedBuffer(dAtA[:i])
@@ -1031,6 +1148,27 @@ func (m *QueryResponse_Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
return len(dAtA) - i, nil
}
+func (m *QueryResponse_TimeseriesBatch) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryResponse_TimeseriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TimeseriesBatch != nil {
+ {
+ size, err := m.TimeseriesBatch.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
func (m *QueryPlan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1099,6 +1237,13 @@ func (m *QueryRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.ResponseBatchSize != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.ResponseBatchSize))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
+ }
if m.QueryPlan != nil {
{
size, err := m.QueryPlan.MarshalToSizedBuffer(dAtA[:i])
@@ -1311,6 +1456,27 @@ func (m *QueryRangeResponse_Stats) MarshalToSizedBuffer(dAtA []byte) (int, error
}
return len(dAtA) - i, nil
}
+func (m *QueryRangeResponse_TimeseriesBatch) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryRangeResponse_TimeseriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TimeseriesBatch != nil {
+ {
+ size, err := m.TimeseriesBatch.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
offset -= sovQuery(v)
base := offset
@@ -1337,6 +1503,21 @@ func (m *QueryStats) Size() (n int) {
return n
}
+func (m *TimeSeriesBatch) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, e := range m.Series {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ return n
+}
+
func (m *QueryRequest) Size() (n int) {
if m == nil {
return 0
@@ -1391,6 +1572,9 @@ func (m *QueryRequest) Size() (n int) {
l = m.QueryPlan.Size()
n += 1 + l + sovQuery(uint64(l))
}
+ if m.ResponseBatchSize != 0 {
+ n += 1 + sovQuery(uint64(m.ResponseBatchSize))
+ }
return n
}
@@ -1455,6 +1639,18 @@ func (m *QueryResponse_Stats) Size() (n int) {
}
return n
}
+func (m *QueryResponse_TimeseriesBatch) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TimeseriesBatch != nil {
+ l = m.TimeseriesBatch.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
func (m *QueryPlan) Size() (n int) {
if m == nil {
return 0
@@ -1539,6 +1735,9 @@ func (m *QueryRangeRequest) Size() (n int) {
l = m.QueryPlan.Size()
n += 2 + l + sovQuery(uint64(l))
}
+ if m.ResponseBatchSize != 0 {
+ n += 2 + sovQuery(uint64(m.ResponseBatchSize))
+ }
return n
}
@@ -1588,6 +1787,18 @@ func (m *QueryRangeResponse_Stats) Size() (n int) {
}
return n
}
+func (m *QueryRangeResponse_TimeseriesBatch) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TimeseriesBatch != nil {
+ l = m.TimeseriesBatch.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
func sovQuery(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
@@ -1683,6 +1894,90 @@ func (m *QueryStats) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *TimeSeriesBatch) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimeSeriesBatch: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimeSeriesBatch: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Series = append(m.Series, &prompb.TimeSeries{})
+ if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *QueryRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -2037,6 +2332,25 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseBatchSize", wireType)
+ }
+ m.ResponseBatchSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ResponseBatchSize |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
@@ -2273,6 +2587,41 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
}
m.Result = &QueryResponse_Stats{v}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeseriesBatch", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TimeSeriesBatch{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Result = &QueryResponse_TimeseriesBatch{v}
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
@@ -2769,6 +3118,25 @@ func (m *QueryRangeRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseBatchSize", wireType)
+ }
+ m.ResponseBatchSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ResponseBatchSize |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
@@ -2921,6 +3289,41 @@ func (m *QueryRangeResponse) Unmarshal(dAtA []byte) error {
}
m.Result = &QueryRangeResponse_Stats{v}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeseriesBatch", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TimeSeriesBatch{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Result = &QueryRangeResponse_TimeseriesBatch{v}
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
diff --git a/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.proto b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.proto
index 6a55365a1b5..0f4e29e6e83 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.proto
+++ b/vendor/github.com/thanos-io/thanos/pkg/api/query/querypb/query.proto
@@ -33,6 +33,10 @@ message QueryStats {
int64 peak_samples = 2;
}
+message TimeSeriesBatch {
+ repeated prometheus_copy.TimeSeries series = 1;
+}
+
message QueryRequest {
string query = 1;
queryPlan queryPlan = 14;
@@ -54,6 +58,10 @@ message QueryRequest {
int64 lookback_delta_seconds = 12;
EngineType engine = 13;
+ // response_batch_size controls how many TimeSeries are batched per response message.
+ // If set to 0 or 1, each TimeSeries is sent as a separate response (default behavior).
+ int64 response_batch_size = 15;
+
reserved 9;
}
@@ -71,6 +79,9 @@ message QueryResponse {
// performance stats
QueryStats stats = 3;
+
+ /// timeseries_batch is a batch of series from the result of the executed query.
+ TimeSeriesBatch timeseries_batch = 4;
}
}
@@ -103,6 +114,10 @@ message QueryRangeRequest {
int64 lookback_delta_seconds = 14;
EngineType engine = 15;
+ // response_batch_size controls how many TimeSeries are batched per response message.
+ // If set to 0 or 1, each TimeSeries is sent as a separate response (default behavior).
+ int64 response_batch_size = 17;
+
reserved 11;
}
@@ -116,6 +131,9 @@ message QueryRangeResponse {
// performance stats
QueryStats stats = 3;
+
+ /// timeseries_batch is a batch of series from the result of the executed query.
+ TimeSeriesBatch timeseries_batch = 4;
}
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
index 76c14d80a94..fe25e3c62c4 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
@@ -24,6 +24,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ prommodel "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/thanos-io/objstore"
@@ -1174,6 +1175,11 @@ func ParseRelabelConfig(contentYaml []byte, supportedActions map[relabel.Action]
if err := yaml.Unmarshal(contentYaml, &relabelConfig); err != nil {
return nil, errors.Wrap(err, "parsing relabel configuration")
}
+ for _, cfg := range relabelConfig {
+ if err := cfg.Validate(prommodel.UTF8Validation); err != nil {
+ return nil, errors.Wrap(err, "validate relabel config")
+ }
+ }
if supportedActions != nil {
for _, cfg := range relabelConfig {
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
index 3c0b07d03a3..5e479a5ca2d 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
@@ -136,6 +136,11 @@ func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt
return NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.metrics.lazyReader, p.metrics.binaryReader, p.onLazyReaderClosed, p.lazyDownloadFunc(meta))
})
+ if err != nil {
+ level.Error(p.logger).Log("msg", "failed to create lazy index-header reader", "block", id.String(), "err", err)
+ return nil, err
+ }
+
reader := lazyReader.(Reader)
// Keep track of lazy readers only if required.
diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go
index f6a6f9472ee..043a419f4de 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go
@@ -338,9 +338,11 @@ func (h *histogramAggregator) reset() {
h.sum = nil
}
-func mustHistogramOp(_ *histogram.FloatHistogram, err error) {
+func mustHistogramOp(_ *histogram.FloatHistogram, _, _ bool, err error) {
// NOTE(GiedriusS): this can only happen with custom
// boundaries. We do not support them yet.
+ // The two boolean return values are for NHCB (Native Histogram Custom Buckets)
+ // which we don't support yet, so we ignore them.
if err != nil {
panic(fmt.Sprintf("unexpected error: %v", err))
}
@@ -367,7 +369,8 @@ func (h *histogramAggregator) add(s sample) {
mustHistogramOp(h.counter.Add(fh))
} else {
// Add delta with previous value to the counter.
- deltaFh, err := fh.Copy().Sub(h.previous)
+ // TODO: support NHCB.
+ deltaFh, _, _, err := fh.Copy().Sub(h.previous)
if err != nil {
// TODO(GiedriusS): support native histograms with custom buckets.
// This can only happen with custom buckets.
diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go
index c723981b013..582fb9735c4 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go
@@ -340,7 +340,6 @@ func (e *EndpointSet) Update(ctx context.Context) {
)
for _, spec := range e.endpointSpecs() {
-
if er, existingRef := e.endpoints[spec.Addr()]; existingRef {
wg.Add(1)
go func(spec *GRPCEndpointSpec) {
@@ -620,12 +619,8 @@ func (e *EndpointSet) GetEndpointStatus() []EndpointStatus {
statuses := make([]EndpointStatus, 0, len(e.endpoints))
for _, v := range e.endpoints {
- v.mtx.RLock()
- defer v.mtx.RUnlock()
-
- status := v.status
- if status != nil {
- statuses = append(statuses, *status)
+ if status, ok := v.Status(); ok {
+ statuses = append(statuses, status)
}
}
@@ -782,6 +777,16 @@ func (er *endpointRef) HasStatusAPI() bool {
return er.metadata != nil && er.metadata.Status != nil
}
+func (er *endpointRef) Status() (EndpointStatus, bool) {
+ er.mtx.RLock()
+ defer er.mtx.RUnlock()
+
+ if er.status == nil {
+ return EndpointStatus{}, false
+ }
+ return *er.status, true
+}
+
func (er *endpointRef) LabelSets() []labels.Labels {
er.mtx.RLock()
defer er.mtx.RUnlock()
diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go
index 3fd4e45994b..c6bbec019a5 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go
@@ -31,6 +31,10 @@ import (
grpc_tracing "github.com/thanos-io/thanos/pkg/tracing/tracing_middleware"
)
+// defaultResponseBatchSize is the default number of timeseries to batch per gRPC response message.
+// This value provides a good balance between reducing per-message overhead and keeping message sizes reasonable.
+const defaultResponseBatchSize = 64
+
type RemoteEndpointsCreator func(
replicaLabels []string,
partialResponse bool,
@@ -341,6 +345,7 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result {
ReplicaLabels: r.opts.ReplicaLabels,
MaxResolutionSeconds: maxResolution,
EnableDedup: true,
+ ResponseBatchSize: defaultResponseBatchSize,
}
qry, err := r.client.Query(qctx, request)
@@ -370,22 +375,33 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result {
qryStats = *s
continue
}
-
- ts := msg.GetTimeseries()
- if ts == nil {
+ if batch := msg.GetTimeseriesBatch(); batch != nil {
+ for _, ts := range batch.Series {
+ builder.Reset()
+ for _, l := range ts.Labels {
+ builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
+ }
+ if len(ts.Histograms) > 0 {
+ result = append(result, promql.Sample{Metric: builder.Labels(), H: prompb.FromProtoHistogram(ts.Histograms[0]), T: r.start.UnixMilli()})
+ } else {
+ result = append(result, promql.Sample{Metric: builder.Labels(), F: ts.Samples[0].Value, T: r.start.UnixMilli()})
+ }
+ }
continue
}
- builder.Reset()
- for _, l := range ts.Labels {
- builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
- }
- // Point might have a different timestamp, force it to the evaluation
- // timestamp as that is when we ran the evaluation.
- // See https://github.com/prometheus/prometheus/blob/b727e69b7601b069ded5c34348dca41b80988f4b/promql/engine.go#L693-L699
- if len(ts.Histograms) > 0 {
- result = append(result, promql.Sample{Metric: builder.Labels(), H: prompb.FromProtoHistogram(ts.Histograms[0]), T: r.start.UnixMilli()})
- } else {
- result = append(result, promql.Sample{Metric: builder.Labels(), F: ts.Samples[0].Value, T: r.start.UnixMilli()})
+ if ts := msg.GetTimeseries(); ts != nil {
+ builder.Reset()
+ for _, l := range ts.Labels {
+ builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
+ }
+ // Point might have a different timestamp, force it to the evaluation
+ // timestamp as that is when we ran the evaluation.
+ // See https://github.com/prometheus/prometheus/blob/b727e69b7601b069ded5c34348dca41b80988f4b/promql/engine.go#L693-L699
+ if len(ts.Histograms) > 0 {
+ result = append(result, promql.Sample{Metric: builder.Labels(), H: prompb.FromProtoHistogram(ts.Histograms[0]), T: r.start.UnixMilli()})
+ } else {
+ result = append(result, promql.Sample{Metric: builder.Labels(), F: ts.Samples[0].Value, T: r.start.UnixMilli()})
+ }
}
}
r.samplesStats.UpdatePeak(int(qryStats.PeakSamples))
@@ -408,6 +424,7 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result {
ReplicaLabels: r.opts.ReplicaLabels,
MaxResolutionSeconds: maxResolution,
EnableDedup: true,
+ ResponseBatchSize: defaultResponseBatchSize,
}
qry, err := r.client.QueryRange(qctx, request)
if err != nil {
@@ -437,33 +454,57 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result {
qryStats = *s
continue
}
-
- ts := msg.GetTimeseries()
- if ts == nil {
+ if batch := msg.GetTimeseriesBatch(); batch != nil {
+ for _, ts := range batch.Series {
+ builder.Reset()
+ for _, l := range ts.Labels {
+ builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
+ }
+ series := promql.Series{
+ Metric: builder.Labels(),
+ Floats: make([]promql.FPoint, 0, len(ts.Samples)),
+ Histograms: make([]promql.HPoint, 0, len(ts.Histograms)),
+ }
+ for _, s := range ts.Samples {
+ series.Floats = append(series.Floats, promql.FPoint{
+ T: s.Timestamp,
+ F: s.Value,
+ })
+ }
+ for _, hp := range ts.Histograms {
+ series.Histograms = append(series.Histograms, promql.HPoint{
+ T: hp.Timestamp,
+ H: prompb.FloatHistogramProtoToFloatHistogram(hp),
+ })
+ }
+ result = append(result, series)
+ }
continue
}
- builder.Reset()
- for _, l := range ts.Labels {
- builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
- }
- series := promql.Series{
- Metric: builder.Labels(),
- Floats: make([]promql.FPoint, 0, len(ts.Samples)),
- Histograms: make([]promql.HPoint, 0, len(ts.Histograms)),
- }
- for _, s := range ts.Samples {
- series.Floats = append(series.Floats, promql.FPoint{
- T: s.Timestamp,
- F: s.Value,
- })
- }
- for _, hp := range ts.Histograms {
- series.Histograms = append(series.Histograms, promql.HPoint{
- T: hp.Timestamp,
- H: prompb.FloatHistogramProtoToFloatHistogram(hp),
- })
+ if ts := msg.GetTimeseries(); ts != nil {
+ builder.Reset()
+ for _, l := range ts.Labels {
+ builder.Add(strings.Clone(l.Name), strings.Clone(l.Value))
+ }
+ series := promql.Series{
+ Metric: builder.Labels(),
+ Floats: make([]promql.FPoint, 0, len(ts.Samples)),
+ Histograms: make([]promql.HPoint, 0, len(ts.Histograms)),
+ }
+ for _, s := range ts.Samples {
+ series.Floats = append(series.Floats, promql.FPoint{
+ T: s.Timestamp,
+ F: s.Value,
+ })
+ }
+ for _, hp := range ts.Histograms {
+ series.Histograms = append(series.Histograms, promql.HPoint{
+ T: hp.Timestamp,
+ H: prompb.FloatHistogramProtoToFloatHistogram(hp),
+ })
+ }
+ result = append(result, series)
}
- result = append(result, series)
}
r.samplesStats.UpdatePeak(int(qryStats.PeakSamples))
r.samplesStats.TotalSamples = qryStats.SamplesTotal
diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go
index 5fbabfa9900..ff511a478a6 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go
@@ -85,6 +85,7 @@ type Shipper struct {
allowOutOfOrderUploads bool
skipCorruptedBlocks bool
hashFunc metadata.HashFunc
+ uploadConcurrency int
labels func() labels.Labels
mtx sync.RWMutex
@@ -104,6 +105,7 @@ type shipperOptions struct {
uploadCompacted bool
allowOutOfOrderUploads bool
skipCorruptedBlocks bool
+ uploadConcurrency int
}
type Option func(*shipperOptions)
@@ -171,6 +173,13 @@ func WithSkipCorruptedBlocks(skip bool) Option {
}
}
+// WithUploadConcurrency sets the number of goroutines to use when uploading block files.
+func WithUploadConcurrency(concurrency int) Option {
+ return func(o *shipperOptions) {
+ o.uploadConcurrency = concurrency
+ }
+}
+
func applyOptions(opts []Option) *shipperOptions {
so := new(shipperOptions)
for _, o := range opts {
@@ -209,6 +218,7 @@ func New(bucket objstore.Bucket, dir string, opts ...Option) *Shipper {
skipCorruptedBlocks: options.skipCorruptedBlocks,
uploadCompacted: options.uploadCompacted,
hashFunc: options.hashFunc,
+ uploadConcurrency: options.uploadConcurrency,
metadataFilePath: filepath.Join(dir, filepath.Clean(options.metaFileName)),
}
}
@@ -463,7 +473,11 @@ func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error {
if err := meta.WriteToDir(s.logger, updir); err != nil {
return errors.Wrap(err, "write meta file")
}
- return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc)
+ var uploadOptions []objstore.UploadOption
+ if s.uploadConcurrency > 0 {
+ uploadOptions = append(uploadOptions, objstore.WithUploadConcurrency(s.uploadConcurrency))
+ }
+ return block.Upload(ctx, s.logger, s.bucket, updir, s.hashFunc, uploadOptions...)
}
// blockMetasFromOldest returns the block meta of each block found in dir
diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go
index ca35ce45ab8..f4e9303daaf 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/e2eutil/prometheus.go
@@ -546,7 +546,6 @@ func createBlock(
headOpts := tsdb.DefaultHeadOptions()
headOpts.ChunkDirRoot = filepath.Join(dir, "chunks")
headOpts.ChunkRange = 10000000000
- headOpts.EnableNativeHistograms = *atomic.NewBool(true)
h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil)
if err != nil {
return id, errors.Wrap(err, "create head block")
diff --git a/vendor/go.opentelemetry.io/collector/component/identifiable.go b/vendor/go.opentelemetry.io/collector/component/identifiable.go
index 6b814768161..417b45b982b 100644
--- a/vendor/go.opentelemetry.io/collector/component/identifiable.go
+++ b/vendor/go.opentelemetry.io/collector/component/identifiable.go
@@ -4,6 +4,7 @@
package component // import "go.opentelemetry.io/collector/component"
import (
+ "encoding"
"errors"
"fmt"
"regexp"
@@ -49,7 +50,7 @@ func (t Type) MarshalText() ([]byte, error) {
// - start with an ASCII alphabetic character and
// - can only contain ASCII alphanumeric characters and '_'.
func NewType(ty string) (Type, error) {
- if len(ty) == 0 {
+ if ty == "" {
return Type{}, errors.New("id must not be empty")
}
if !typeRegexp.MatchString(ty) {
@@ -71,6 +72,12 @@ func MustNewType(strType string) Type {
return ty
}
+var (
+ _ fmt.Stringer = ID{}
+ _ encoding.TextMarshaler = ID{}
+ _ encoding.TextUnmarshaler = (*ID)(nil)
+)
+
// ID represents the identity for a component. It combines two values:
// * type - the Type of the component.
// * name - the name of that component.
@@ -100,7 +107,7 @@ func NewIDWithName(typeVal Type, nameVal string) ID {
// MustNewIDWithName builds a Type and returns a new ID with the given Type and name.
// This is equivalent to NewIDWithName(MustNewType(typeVal), nameVal).
// See MustNewType to check the valid values of typeVal.
-func MustNewIDWithName(typeVal string, nameVal string) ID {
+func MustNewIDWithName(typeVal, nameVal string) ID {
return NewIDWithName(MustNewType(typeVal), nameVal)
}
@@ -116,7 +123,7 @@ func (id ID) Name() string {
// MarshalText implements the encoding.TextMarshaler interface.
// This marshals the type and name as one string in the config.
-func (id ID) MarshalText() (text []byte, err error) {
+func (id ID) MarshalText() ([]byte, error) {
return []byte(id.String()), nil
}
diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go
index 461dead4b32..7d39c18ab1d 100644
--- a/vendor/go.opentelemetry.io/collector/component/telemetry.go
+++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go
@@ -4,8 +4,31 @@
package component // import "go.opentelemetry.io/collector/component"
import (
- "go.opentelemetry.io/collector/internal/telemetry"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
)
// TelemetrySettings provides components with APIs to report telemetry.
-type TelemetrySettings = telemetry.TelemetrySettings
+type TelemetrySettings struct {
+ // Logger that the factory can use during creation and can pass to the created
+ // component to be used later as well.
+ Logger *zap.Logger
+
+ // TracerProvider that the factory can pass to other instrumented third-party libraries.
+ //
+ // The service may wrap this provider for attribute injection. The wrapper may implement an
+ // additional `Unwrap() trace.TracerProvider` method to grant access to the underlying SDK.
+ TracerProvider trace.TracerProvider
+
+ // MeterProvider that the factory can pass to other instrumented third-party libraries.
+ MeterProvider metric.MeterProvider
+
+ // Resource contains the resource attributes for the collector's telemetry.
+ Resource pcommon.Resource
+
+ // prevent unkeyed literal initialization
+ _ struct{}
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/README.md b/vendor/go.opentelemetry.io/collector/confmap/README.md
index ca11a972956..237615c8754 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/README.md
+++ b/vendor/go.opentelemetry.io/collector/confmap/README.md
@@ -129,8 +129,6 @@ service:
```yaml
# extra_extension.yaml
-processors:
- batch:
extensions:
healthcheckv2:
@@ -138,7 +136,6 @@ service:
extensions: [ healthcheckv2 ]
pipelines:
traces:
- processors: [ batch ]
```
If you run the Collector with following command,
@@ -157,7 +154,6 @@ processors:
- key: key
value: "value"
action: upsert
- batch:
exporters:
otlp/out:
extensions:
@@ -168,7 +164,7 @@ service:
pipelines:
traces:
receivers: [ otlp/in ]
- processors: [ attributes/example, batch ]
+ processors: [ attributes/example ]
exporters: [ otlp/out ]
extensions: [ file_storage, healthcheckv2 ]
```
diff --git a/vendor/go.opentelemetry.io/collector/confmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/confmap.go
index 4009cd78653..a4848a53cb9 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/confmap.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/confmap.go
@@ -6,677 +6,46 @@
package confmap // import "go.opentelemetry.io/collector/confmap"
import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "slices"
- "strings"
-
- "github.com/go-viper/mapstructure/v2"
- "github.com/knadh/koanf/maps"
- "github.com/knadh/koanf/providers/confmap"
- "github.com/knadh/koanf/v2"
-
- encoder "go.opentelemetry.io/collector/confmap/internal/mapstructure"
- "go.opentelemetry.io/collector/confmap/internal/third_party/composehook"
+ "go.opentelemetry.io/collector/confmap/internal"
)
-const (
- // KeyDelimiter is used as the default key delimiter in the default koanf instance.
- KeyDelimiter = "::"
-)
+// KeyDelimiter is used as the default key delimiter in the default koanf instance.
+var KeyDelimiter = internal.KeyDelimiter
-const (
- // MapstructureTag is the struct field tag used to record marshaling/unmarshaling settings.
- // See https://pkg.go.dev/github.com/go-viper/mapstructure/v2 for supported values.
- MapstructureTag = "mapstructure"
-)
+// MapstructureTag is the struct field tag used to record marshaling/unmarshaling settings.
+// See https://pkg.go.dev/github.com/go-viper/mapstructure/v2 for supported values.
+var MapstructureTag = internal.MapstructureTag
// New creates a new empty confmap.Conf instance.
func New() *Conf {
- return &Conf{k: koanf.New(KeyDelimiter), isNil: false}
+ return internal.New()
}
// NewFromStringMap creates a confmap.Conf from a map[string]any.
func NewFromStringMap(data map[string]any) *Conf {
- p := New()
- if data == nil {
- p.isNil = true
- } else {
- // Cannot return error because the koanf instance is empty.
- _ = p.k.Load(confmap.Provider(data, KeyDelimiter), nil)
- }
- return p
+ return internal.NewFromStringMap(data)
}
// Conf represents the raw configuration map for the OpenTelemetry Collector.
// The confmap.Conf can be unmarshalled into the Collector's config using the "service" package.
-type Conf struct {
- k *koanf.Koanf
- // If true, upon unmarshaling do not call the Unmarshal function on the struct
- // if it implements Unmarshaler and is the top-level struct.
- // This avoids running into an infinite recursion where Unmarshaler.Unmarshal and
- // Conf.Unmarshal would call each other.
- skipTopLevelUnmarshaler bool
- // isNil is true if this Conf was created from a nil field, as opposed to an empty map.
- // AllKeys must return an empty slice if this is true.
- isNil bool
-}
-
-// AllKeys returns all keys holding a value, regardless of where they are set.
-// Nested keys are returned with a KeyDelimiter separator.
-func (l *Conf) AllKeys() []string {
- return l.k.Keys()
-}
-
-type UnmarshalOption interface {
- apply(*unmarshalOption)
-}
+type Conf = internal.Conf
-type unmarshalOption struct {
- ignoreUnused bool
-}
+type UnmarshalOption = internal.UnmarshalOption
// WithIgnoreUnused sets an option to ignore errors if existing
// keys in the original Conf were unused in the decoding process
// (extra keys).
func WithIgnoreUnused() UnmarshalOption {
- return unmarshalOptionFunc(func(uo *unmarshalOption) {
- uo.ignoreUnused = true
- })
-}
-
-type unmarshalOptionFunc func(*unmarshalOption)
-
-func (fn unmarshalOptionFunc) apply(set *unmarshalOption) {
- fn(set)
-}
-
-// Unmarshal unmarshalls the config into a struct using the given options.
-// Tags on the fields of the structure must be properly set.
-func (l *Conf) Unmarshal(result any, opts ...UnmarshalOption) error {
- set := unmarshalOption{}
- for _, opt := range opts {
- opt.apply(&set)
- }
- return decodeConfig(l, result, !set.ignoreUnused, l.skipTopLevelUnmarshaler)
-}
-
-type marshalOption struct{}
-
-type MarshalOption interface {
- apply(*marshalOption)
-}
-
-// Marshal encodes the config and merges it into the Conf.
-func (l *Conf) Marshal(rawVal any, _ ...MarshalOption) error {
- enc := encoder.New(encoderConfig(rawVal))
- data, err := enc.Encode(rawVal)
- if err != nil {
- return err
- }
- out, ok := data.(map[string]any)
- if !ok {
- return errors.New("invalid config encoding")
- }
- return l.Merge(NewFromStringMap(out))
-}
-
-func (l *Conf) unsanitizedGet(key string) any {
- return l.k.Get(key)
-}
-
-// sanitize recursively removes expandedValue references from the given data.
-// It uses the expandedValue.Value field to replace the expandedValue references.
-func sanitize(a any) any {
- return sanitizeExpanded(a, false)
-}
-
-// sanitizeToStringMap recursively removes expandedValue references from the given data.
-// It uses the expandedValue.Original field to replace the expandedValue references.
-func sanitizeToStr(a any) any {
- return sanitizeExpanded(a, true)
-}
-
-func sanitizeExpanded(a any, useOriginal bool) any {
- switch m := a.(type) {
- case map[string]any:
- c := maps.Copy(m)
- for k, v := range m {
- c[k] = sanitizeExpanded(v, useOriginal)
- }
- return c
- case []any:
- // If the value is nil, return nil.
- var newSlice []any
- if m == nil {
- return newSlice
- }
- newSlice = make([]any, 0, len(m))
- for _, e := range m {
- newSlice = append(newSlice, sanitizeExpanded(e, useOriginal))
- }
- return newSlice
- case expandedValue:
- if useOriginal {
- return m.Original
- }
- return m.Value
- }
- return a
-}
-
-// Get can retrieve any value given the key to use.
-func (l *Conf) Get(key string) any {
- val := l.unsanitizedGet(key)
- return sanitizeExpanded(val, false)
-}
-
-// IsSet checks to see if the key has been set in any of the data locations.
-func (l *Conf) IsSet(key string) bool {
- return l.k.Exists(key)
-}
-
-// Merge merges the input given configuration into the existing config.
-// Note that the given map may be modified.
-func (l *Conf) Merge(in *Conf) error {
- if enableMergeAppendOption.IsEnabled() {
- // only use MergeAppend when enableMergeAppendOption featuregate is enabled.
- return l.mergeAppend(in)
- }
- l.isNil = l.isNil && in.isNil
- return l.k.Merge(in.k)
-}
-
-// Delete a path from the Conf.
-// If the path exists, deletes it and returns true.
-// If the path does not exist, does nothing and returns false.
-func (l *Conf) Delete(key string) bool {
- wasSet := l.IsSet(key)
- l.k.Delete(key)
- return wasSet
-}
-
-// mergeAppend merges the input given configuration into the existing config.
-// Note that the given map may be modified.
-// Additionally, mergeAppend performs deduplication when merging lists.
-// For example, if listA = [extension1, extension2] and listB = [extension1, extension3],
-// the resulting list will be [extension1, extension2, extension3].
-func (l *Conf) mergeAppend(in *Conf) error {
- err := l.k.Load(confmap.Provider(in.ToStringMap(), ""), nil, koanf.WithMergeFunc(mergeAppend))
- if err != nil {
- return err
- }
- l.isNil = l.isNil && in.isNil
- return nil
-}
-
-// Sub returns new Conf instance representing a sub-config of this instance.
-// It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists.
-func (l *Conf) Sub(key string) (*Conf, error) {
- // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type.
- data := l.unsanitizedGet(key)
- if data == nil {
- c := New()
- c.isNil = true
- return c, nil
- }
-
- switch v := data.(type) {
- case map[string]any:
- return NewFromStringMap(v), nil
- case expandedValue:
- if m, ok := v.Value.(map[string]any); ok {
- return NewFromStringMap(m), nil
- } else if v.Value == nil {
- // If the value is nil, return a new empty Conf.
- c := New()
- c.isNil = true
- return c, nil
- }
- // override data with the original value to make the error message more informative.
- data = v.Value
- }
-
- return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind())
-}
-
-func (l *Conf) toStringMapWithExpand() map[string]any {
- if l.isNil {
- return nil
- }
- m := maps.Unflatten(l.k.All(), KeyDelimiter)
- return m
-}
-
-// ToStringMap creates a map[string]any from a Conf.
-// Values with multiple representations
-// are normalized with the YAML parsed representation.
-//
-// For example, for a Conf created from `foo: ${env:FOO}` and `FOO=123`
-// ToStringMap will return `map[string]any{"foo": 123}`.
-//
-// For any map `m`, `NewFromStringMap(m).ToStringMap() == m`.
-// In particular, if the Conf was created from a nil value,
-// ToStringMap will return map[string]any(nil).
-func (l *Conf) ToStringMap() map[string]any {
- return sanitize(l.toStringMapWithExpand()).(map[string]any)
-}
-
-// decodeConfig decodes the contents of the Conf into the result argument, using a
-// mapstructure decoder with the following notable behaviors. Ensures that maps whose
-// values are nil pointer structs resolved to the zero value of the target struct (see
-// expandNilStructPointers). Converts string to []string by splitting on ','. Ensures
-// uniqueness of component IDs (see mapKeyStringToMapKeyTextUnmarshalerHookFunc).
-// Decodes time.Duration from strings. Allows custom unmarshaling for structs implementing
-// encoding.TextUnmarshaler. Allows custom unmarshaling for structs implementing confmap.Unmarshaler.
-func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler bool) error {
- dc := &mapstructure.DecoderConfig{
- ErrorUnused: errorUnused,
- Result: result,
- TagName: MapstructureTag,
- WeaklyTypedInput: false,
- MatchName: caseSensitiveMatchName,
- DecodeNil: true,
- DecodeHook: composehook.ComposeDecodeHookFunc(
- useExpandValue(),
- expandNilStructPointersHookFunc(),
- mapstructure.StringToSliceHookFunc(","),
- mapKeyStringToMapKeyTextUnmarshalerHookFunc(),
- mapstructure.StringToTimeDurationHookFunc(),
- mapstructure.TextUnmarshallerHookFunc(),
- unmarshalerHookFunc(result, skipTopLevelUnmarshaler),
- // after the main unmarshaler hook is called,
- // we unmarshal the embedded structs if present to merge with the result:
- unmarshalerEmbeddedStructsHookFunc(),
- zeroSliceAndMapHookFunc(),
- ),
- }
- decoder, err := mapstructure.NewDecoder(dc)
- if err != nil {
- return err
- }
- if err = decoder.Decode(m.toStringMapWithExpand()); err != nil {
- if strings.HasPrefix(err.Error(), "error decoding ''") {
- return errors.Unwrap(err)
- }
- return err
- }
- return nil
+ return internal.WithIgnoreUnused()
}
-// encoderConfig returns a default encoder.EncoderConfig that includes
-// an EncodeHook that handles both TextMarshaller and Marshaler
-// interfaces.
-func encoderConfig(rawVal any) *encoder.EncoderConfig {
- return &encoder.EncoderConfig{
- EncodeHook: mapstructure.ComposeDecodeHookFunc(
- encoder.YamlMarshalerHookFunc(),
- encoder.TextMarshalerHookFunc(),
- marshalerHookFunc(rawVal),
- ),
- }
-}
-
-// case-sensitive version of the callback to be used in the MatchName property
-// of the DecoderConfig. The default for MatchEqual is to use strings.EqualFold,
-// which is case-insensitive.
-func caseSensitiveMatchName(a, b string) bool {
- return a == b
-}
-
-func castTo(exp expandedValue, useOriginal bool) any {
- // If the target field is a string, use `exp.Original` or fail if not available.
- if useOriginal {
- return exp.Original
- }
- // Otherwise, use the parsed value (previous behavior).
- return exp.Value
-}
-
-// Check if a reflect.Type is of the form T, where:
-// X is any type or interface
-// T = string | map[X]T | []T | [n]T
-func isStringyStructure(t reflect.Type) bool {
- if t.Kind() == reflect.String {
- return true
- }
- if t.Kind() == reflect.Map {
- return isStringyStructure(t.Elem())
- }
- if t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
- return isStringyStructure(t.Elem())
- }
- return false
-}
-
-// safeWrapDecodeHookFunc wraps a DecodeHookFuncValue to ensure fromVal is a valid `reflect.Value`
-// object and therefore it is safe to call `reflect.Value` methods on fromVal.
-//
-// Use this only if the hook does not need to be called on untyped nil values.
-// Typed nil values are safe to call and will be passed to the hook.
-// See https://github.com/golang/go/issues/51649
-func safeWrapDecodeHookFunc(
- f mapstructure.DecodeHookFuncValue,
-) mapstructure.DecodeHookFuncValue {
- return func(fromVal reflect.Value, toVal reflect.Value) (any, error) {
- if !fromVal.IsValid() {
- return nil, nil
- }
- return f(fromVal, toVal)
- }
-}
-
-// When a value has been loaded from an external source via a provider, we keep both the
-// parsed value and the original string value. This allows us to expand the value to its
-// original string representation when decoding into a string field, and use the original otherwise.
-func useExpandValue() mapstructure.DecodeHookFuncType {
- return func(
- _ reflect.Type,
- to reflect.Type,
- data any,
- ) (any, error) {
- if exp, ok := data.(expandedValue); ok {
- v := castTo(exp, to.Kind() == reflect.String)
- // See https://github.com/open-telemetry/opentelemetry-collector/issues/10949
- // If the `to.Kind` is not a string, then expandValue's original value is useless and
- // the casted-to value will be nil. In that scenario, we need to use the default value of `to`'s kind.
- if v == nil {
- return reflect.Zero(to).Interface(), nil
- }
- return v, nil
- }
-
- switch to.Kind() {
- case reflect.Array, reflect.Slice, reflect.Map:
- if isStringyStructure(to) {
- // If the target field is a stringy structure, sanitize to use the original string value everywhere.
- return sanitizeToStr(data), nil
- }
- // Otherwise, sanitize to use the parsed value everywhere.
- return sanitize(data), nil
- }
- return data, nil
- }
-}
-
-// In cases where a config has a mapping of something to a struct pointers
-// we want nil values to resolve to a pointer to the zero value of the
-// underlying struct just as we want nil values of a mapping of something
-// to a struct to resolve to the zero value of that struct.
-//
-// e.g. given a config type:
-// type Config struct { Thing *SomeStruct `mapstructure:"thing"` }
-//
-// and yaml of:
-// config:
-//
-// thing:
-//
-// we want an unmarshaled Config to be equivalent to
-// Config{Thing: &SomeStruct{}} instead of Config{Thing: nil}
-func expandNilStructPointersHookFunc() mapstructure.DecodeHookFuncValue {
- return safeWrapDecodeHookFunc(func(from reflect.Value, to reflect.Value) (any, error) {
- // ensure we are dealing with map to map comparison
- if from.Kind() == reflect.Map && to.Kind() == reflect.Map {
- toElem := to.Type().Elem()
- // ensure that map values are pointers to a struct
- // (that may be nil and require manual setting w/ zero value)
- if toElem.Kind() == reflect.Ptr && toElem.Elem().Kind() == reflect.Struct {
- fromRange := from.MapRange()
- for fromRange.Next() {
- fromKey := fromRange.Key()
- fromValue := fromRange.Value()
- // ensure that we've run into a nil pointer instance
- if fromValue.IsNil() {
- newFromValue := reflect.New(toElem.Elem())
- from.SetMapIndex(fromKey, newFromValue)
- }
- }
- }
- }
- return from.Interface(), nil
- })
-}
-
-// mapKeyStringToMapKeyTextUnmarshalerHookFunc returns a DecodeHookFuncType that checks that a conversion from
-// map[string]any to map[encoding.TextUnmarshaler]any does not overwrite keys,
-// when UnmarshalText produces equal elements from different strings (e.g. trims whitespaces).
-//
-// This is needed in combination with ComponentID, which may produce equal IDs for different strings,
-// and an error needs to be returned in that case, otherwise the last equivalent ID overwrites the previous one.
-func mapKeyStringToMapKeyTextUnmarshalerHookFunc() mapstructure.DecodeHookFuncType {
- return func(from reflect.Type, to reflect.Type, data any) (any, error) {
- if from.Kind() != reflect.Map || from.Key().Kind() != reflect.String {
- return data, nil
- }
-
- if to.Kind() != reflect.Map {
- return data, nil
- }
-
- // Checks that the key type of to implements the TextUnmarshaler interface.
- if _, ok := reflect.New(to.Key()).Interface().(encoding.TextUnmarshaler); !ok {
- return data, nil
- }
-
- // Create a map with key value of to's key to bool.
- fieldNameSet := reflect.MakeMap(reflect.MapOf(to.Key(), reflect.TypeOf(true)))
- for k := range data.(map[string]any) {
- // Create a new value of the to's key type.
- tKey := reflect.New(to.Key())
-
- // Use tKey to unmarshal the key of the map.
- if err := tKey.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(k)); err != nil {
- return nil, err
- }
- // Checks if the key has already been decoded in a previous iteration.
- if fieldNameSet.MapIndex(reflect.Indirect(tKey)).IsValid() {
- return nil, fmt.Errorf("duplicate name %q after unmarshaling %v", k, tKey)
- }
- fieldNameSet.SetMapIndex(reflect.Indirect(tKey), reflect.ValueOf(true))
- }
- return data, nil
- }
-}
-
-// unmarshalerEmbeddedStructsHookFunc provides a mechanism for embedded structs to define their own unmarshal logic,
-// by implementing the Unmarshaler interface.
-func unmarshalerEmbeddedStructsHookFunc() mapstructure.DecodeHookFuncValue {
- return safeWrapDecodeHookFunc(func(from reflect.Value, to reflect.Value) (any, error) {
- if to.Type().Kind() != reflect.Struct {
- return from.Interface(), nil
- }
- fromAsMap, ok := from.Interface().(map[string]any)
- if !ok {
- return from.Interface(), nil
- }
- for i := 0; i < to.Type().NumField(); i++ {
- // embedded structs passed in via `squash` cannot be pointers. We just check if they are structs:
- f := to.Type().Field(i)
- if f.IsExported() && slices.Contains(strings.Split(f.Tag.Get(MapstructureTag), ","), "squash") {
- if unmarshaler, ok := to.Field(i).Addr().Interface().(Unmarshaler); ok {
- c := NewFromStringMap(fromAsMap)
- c.skipTopLevelUnmarshaler = true
- if err := unmarshaler.Unmarshal(c); err != nil {
- return nil, err
- }
- // the struct we receive from this unmarshaling only contains fields related to the embedded struct.
- // we merge this partially unmarshaled struct with the rest of the result.
- // note we already unmarshaled the main struct earlier, and therefore merge with it.
- conf := New()
- if err := conf.Marshal(unmarshaler); err != nil {
- return nil, err
- }
- resultMap := conf.ToStringMap()
- if fromAsMap == nil && len(resultMap) > 0 {
- fromAsMap = make(map[string]any, len(resultMap))
- }
- for k, v := range resultMap {
- fromAsMap[k] = v
- }
- }
- }
- }
- return fromAsMap, nil
- })
-}
-
-// Provides a mechanism for individual structs to define their own unmarshal logic,
-// by implementing the Unmarshaler interface, unless skipTopLevelUnmarshaler is
-// true and the struct matches the top level object being unmarshaled.
-func unmarshalerHookFunc(result any, skipTopLevelUnmarshaler bool) mapstructure.DecodeHookFuncValue {
- return safeWrapDecodeHookFunc(func(from reflect.Value, to reflect.Value) (any, error) {
- if !to.CanAddr() {
- return from.Interface(), nil
- }
-
- toPtr := to.Addr().Interface()
- // Need to ignore the top structure to avoid running into an infinite recursion
- // where Unmarshaler.Unmarshal and Conf.Unmarshal would call each other.
- if toPtr == result && skipTopLevelUnmarshaler {
- return from.Interface(), nil
- }
-
- unmarshaler, ok := toPtr.(Unmarshaler)
- if !ok {
- return from.Interface(), nil
- }
-
- if _, ok = from.Interface().(map[string]any); !ok {
- return from.Interface(), nil
- }
-
- // Use the current object if not nil (to preserve other configs in the object), otherwise zero initialize.
- if to.Addr().IsNil() {
- unmarshaler = reflect.New(to.Type()).Interface().(Unmarshaler)
- }
-
- c := NewFromStringMap(from.Interface().(map[string]any))
- c.skipTopLevelUnmarshaler = true
- if err := unmarshaler.Unmarshal(c); err != nil {
- return nil, err
- }
-
- return unmarshaler, nil
- })
-}
-
-// marshalerHookFunc returns a DecodeHookFuncValue that checks structs that aren't
-// the original to see if they implement the Marshaler interface.
-func marshalerHookFunc(orig any) mapstructure.DecodeHookFuncValue {
- origType := reflect.TypeOf(orig)
- return safeWrapDecodeHookFunc(func(from reflect.Value, _ reflect.Value) (any, error) {
- if from.Kind() != reflect.Struct {
- return from.Interface(), nil
- }
-
- // ignore original to avoid infinite loop.
- if from.Type() == origType && reflect.DeepEqual(from.Interface(), orig) {
- return from.Interface(), nil
- }
- marshaler, ok := from.Interface().(Marshaler)
- if !ok {
- return from.Interface(), nil
- }
- conf := NewFromStringMap(nil)
- if err := marshaler.Marshal(conf); err != nil {
- return nil, err
- }
-
- stringMap := conf.ToStringMap()
- if stringMap == nil {
- // If conf is still nil after marshaling, we want to encode it as an untyped nil
- // instead of a map-typed nil. This ensures the value is a proper null value
- // in the final marshaled output instead of an empty map. We hit this case
- // when marshaling wrapper structs that have no direct representation
- // in the marshaled output that aren't tagged with "squash" on the fields
- // they're used on.
- return nil, nil
- }
- return stringMap, nil
- })
-}
+type MarshalOption = internal.MarshalOption
// Unmarshaler interface may be implemented by types to customize their behavior when being unmarshaled from a Conf.
// Only types with struct or pointer to struct kind are supported.
-type Unmarshaler interface {
- // Unmarshal a Conf into the struct in a custom way.
- // The Conf for this specific component may be nil or empty if no config available.
- // This method should only be called by decoding hooks when calling Conf.Unmarshal.
- Unmarshal(component *Conf) error
-}
+type Unmarshaler = internal.Unmarshaler
// Marshaler defines an optional interface for custom configuration marshaling.
// A configuration struct can implement this interface to override the default
// marshaling.
-type Marshaler interface {
- // Marshal the config into a Conf in a custom way.
- // The Conf will be empty and can be merged into.
- Marshal(component *Conf) error
-}
-
-// This hook is used to solve the issue: https://github.com/open-telemetry/opentelemetry-collector/issues/4001
-// We adopt the suggestion provided in this issue: https://github.com/mitchellh/mapstructure/issues/74#issuecomment-279886492
-// We should empty every slice before unmarshalling unless user provided slice is nil.
-// Assume that we had a struct with a field of type slice called `keys`, which has default values of ["a", "b"]
-//
-// type Config struct {
-// Keys []string `mapstructure:"keys"`
-// }
-//
-// The configuration provided by users may have following cases
-// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overridden
-// - for example, input is {"keys", ["c"]}, then output is Config{ Keys: ["c"]}
-//
-// 2. configuration have `keys` field and have an empty slice for this key, the output should be overridden by empty slices
-// - for example, input is {"keys", []}, then output is Config{ Keys: []}
-//
-// 3. configuration have `keys` field and have nil value for this key, the output should be default config
-// - for example, input is {"keys": nil}, then output is Config{ Keys: ["a", "b"]}
-//
-// 4. configuration have no `keys` field specified, the output should be default config
-// - for example, input is {}, then output is Config{ Keys: ["a", "b"]}
-//
-// This hook is also used to solve https://github.com/open-telemetry/opentelemetry-collector/issues/13117.
-// Since v0.127.0, we decode nil values to avoid creating empty map objects.
-// The nil value is not well understood when layered on top of a default map non-nil value.
-// The fix is to avoid the assignment and return the previous value.
-func zeroSliceAndMapHookFunc() mapstructure.DecodeHookFuncValue {
- return safeWrapDecodeHookFunc(func(from reflect.Value, to reflect.Value) (any, error) {
- if to.CanSet() && to.Kind() == reflect.Slice && from.Kind() == reflect.Slice {
- if !from.IsNil() {
- // input slice is not nil, set the output slice to a new slice of the same type.
- to.Set(reflect.MakeSlice(to.Type(), from.Len(), from.Cap()))
- }
- }
- if to.CanSet() && to.Kind() == reflect.Map && from.Kind() == reflect.Map {
- if from.IsNil() {
- return to.Interface(), nil
- }
- }
-
- return from.Interface(), nil
- })
-}
-
-type moduleFactory[T any, S any] interface {
- Create(s S) T
-}
-
-type createConfmapFunc[T any, S any] func(s S) T
-
-type confmapModuleFactory[T any, S any] struct {
- f createConfmapFunc[T, S]
-}
-
-func (c confmapModuleFactory[T, S]) Create(s S) T {
- return c.f(s)
-}
-
-func newConfmapModuleFactory[T any, S any](f createConfmapFunc[T, S]) moduleFactory[T, S] {
- return confmapModuleFactory[T, S]{
- f: f,
- }
-}
+type Marshaler = internal.Marshaler
diff --git a/vendor/go.opentelemetry.io/collector/confmap/expand.go b/vendor/go.opentelemetry.io/collector/confmap/expand.go
index 42f3b6296da..4cd13ad1608 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/expand.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/expand.go
@@ -9,6 +9,8 @@ import (
"fmt"
"regexp"
"strings"
+
+ "go.opentelemetry.io/collector/confmap/internal"
)
// schemePattern defines the regexp pattern for scheme names.
@@ -24,7 +26,7 @@ var (
)
func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, error) {
- for i := 0; i < 1000; i++ {
+ for range 1000 {
val, changed, err := mr.expandValue(ctx, value)
if err != nil {
return nil, err
@@ -39,14 +41,14 @@ func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any,
func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, error) {
switch v := value.(type) {
- case expandedValue:
+ case internal.ExpandedValue:
expanded, changed, err := mr.expandValue(ctx, v.Value)
if err != nil {
return nil, false, err
}
switch exp := expanded.(type) {
- case expandedValue, string:
+ case internal.ExpandedValue, string:
// Return expanded values or strings verbatim.
return exp, changed, nil
}
@@ -60,7 +62,7 @@ func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, erro
if originalExpanded, ok := originalExpanded.(string); ok {
// If the original representation is a string, return the expanded value with the original representation.
- return expandedValue{
+ return internal.ExpandedValue{
Value: expanded,
Original: originalExpanded,
}, changed || originalChanged, nil
@@ -141,17 +143,6 @@ func (mr *Resolver) findURI(input string) string {
return input[openIndex : closeIndex+1]
}
-// expandedValue holds the YAML parsed value and original representation of a value.
-// It keeps track of the original representation to be used by the 'useExpandValue' hook
-// if the target field is a string. We need to keep both representations because we don't know
-// what the target field type is until `Unmarshal` is called.
-type expandedValue struct {
- // Value is the expanded value.
- Value any
- // Original is the original representation of the value.
- Original string
-}
-
// findAndExpandURI attempts to find and expand the first occurrence of an expandable URI in input. If an expandable URI is found it
// returns the input with the URI expanded, true and nil. Otherwise, it returns the unchanged input, false and the expanding error.
// This method expects input to start with ${ and end with }
@@ -175,7 +166,7 @@ func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bo
}
if asStr, err2 := ret.AsString(); err2 == nil {
- return expandedValue{
+ return internal.ExpandedValue{
Value: val,
Original: asStr,
}, true, nil
diff --git a/vendor/go.opentelemetry.io/collector/confmap/factory.go b/vendor/go.opentelemetry.io/collector/confmap/factory.go
new file mode 100644
index 00000000000..31e3fe9e456
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/factory.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package confmap // import "go.opentelemetry.io/collector/confmap"
+
+type moduleFactory[T any, S any] interface {
+ Create(s S) T
+}
+
+type createConfmapFunc[T any, S any] func(s S) T
+
+type confmapModuleFactory[T any, S any] struct {
+ f createConfmapFunc[T, S]
+}
+
+func (c confmapModuleFactory[T, S]) Create(s S) T {
+ return c.f(s)
+}
+
+func newConfmapModuleFactory[T, S any](f createConfmapFunc[T, S]) moduleFactory[T, S] {
+ return confmapModuleFactory[T, S]{
+ f: f,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/conf.go b/vendor/go.opentelemetry.io/collector/confmap/internal/conf.go
new file mode 100644
index 00000000000..1760cdd425c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/conf.go
@@ -0,0 +1,235 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/knadh/koanf/maps"
+ "github.com/knadh/koanf/providers/confmap"
+ "github.com/knadh/koanf/v2"
+
+ encoder "go.opentelemetry.io/collector/confmap/internal/mapstructure"
+)
+
+const (
+ // KeyDelimiter is used as the default key delimiter in the default koanf instance.
+ KeyDelimiter = "::"
+)
+
+// Conf represents the raw configuration map for the OpenTelemetry Collector.
+// The confmap.Conf can be unmarshalled into the Collector's config using the "service" package.
+type Conf struct {
+ k *koanf.Koanf
+ // If true, upon unmarshaling do not call the Unmarshal function on the struct
+ // if it implements Unmarshaler and is the top-level struct.
+ // This avoids running into an infinite recursion where Unmarshaler.Unmarshal and
+ // Conf.Unmarshal would call each other.
+ skipTopLevelUnmarshaler bool
+ // isNil is true if this Conf was created from a nil field, as opposed to an empty map.
+ // AllKeys must return an empty slice if this is true.
+ isNil bool
+}
+
+// New creates a new empty confmap.Conf instance.
+func New() *Conf {
+ return &Conf{k: koanf.New(KeyDelimiter), isNil: false}
+}
+
+// NewFromStringMap creates a confmap.Conf from a map[string]any.
+func NewFromStringMap(data map[string]any) *Conf {
+ p := New()
+ if data == nil {
+ p.isNil = true
+ } else {
+ // Cannot return error because the koanf instance is empty.
+ _ = p.k.Load(confmap.Provider(data, KeyDelimiter), nil)
+ }
+ return p
+}
+
+// Unmarshal unmarshalls the config into a struct using the given options.
+// Tags on the fields of the structure must be properly set.
+func (l *Conf) Unmarshal(result any, opts ...UnmarshalOption) error {
+ set := UnmarshalOptions{}
+ for _, opt := range opts {
+ opt.apply(&set)
+ }
+ return Decode(l.toStringMapWithExpand(), result, set, l.skipTopLevelUnmarshaler)
+}
+
+// Marshal encodes the config and merges it into the Conf.
+func (l *Conf) Marshal(rawVal any, opts ...MarshalOption) error {
+ set := MarshalOptions{}
+ for _, opt := range opts {
+ opt.apply(&set)
+ }
+ enc := encoder.New(EncoderConfig(rawVal, set))
+ data, err := enc.Encode(rawVal)
+ if err != nil {
+ return err
+ }
+ out, ok := data.(map[string]any)
+ if !ok {
+ return errors.New("invalid config encoding")
+ }
+ return l.Merge(NewFromStringMap(out))
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a KeyDelimiter separator.
+func (l *Conf) AllKeys() []string {
+ return l.k.Keys()
+}
+
+// Get can retrieve any value given the key to use.
+func (l *Conf) Get(key string) any {
+ val := l.unsanitizedGet(key)
+ return sanitizeExpanded(val, false)
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+func (l *Conf) IsSet(key string) bool {
+ return l.k.Exists(key)
+}
+
+// Merge merges the input given configuration into the existing config.
+// Note that the given map may be modified.
+func (l *Conf) Merge(in *Conf) error {
+ if EnableMergeAppendOption.IsEnabled() {
+ // only use MergeAppend when EnableMergeAppendOption featuregate is enabled.
+ return l.mergeAppend(in)
+ }
+ l.isNil = l.isNil && in.isNil
+ return l.k.Merge(in.k)
+}
+
+// Delete a path from the Conf.
+// If the path exists, deletes it and returns true.
+// If the path does not exist, does nothing and returns false.
+func (l *Conf) Delete(key string) bool {
+ wasSet := l.IsSet(key)
+ l.k.Delete(key)
+ return wasSet
+}
+
+// mergeAppend merges the input given configuration into the existing config.
+// Note that the given map may be modified.
+// Additionally, mergeAppend performs deduplication when merging lists.
+// For example, if listA = [extension1, extension2] and listB = [extension1, extension3],
+// the resulting list will be [extension1, extension2, extension3].
+func (l *Conf) mergeAppend(in *Conf) error {
+ err := l.k.Load(confmap.Provider(in.ToStringMap(), ""), nil, koanf.WithMergeFunc(mergeAppend))
+ if err != nil {
+ return err
+ }
+ l.isNil = l.isNil && in.isNil
+ return nil
+}
+
+// Sub returns new Conf instance representing a sub-config of this instance.
+// It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists.
+func (l *Conf) Sub(key string) (*Conf, error) {
+ // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type.
+ data := l.unsanitizedGet(key)
+ if data == nil {
+ c := New()
+ c.isNil = true
+ return c, nil
+ }
+
+ switch v := data.(type) {
+ case map[string]any:
+ return NewFromStringMap(v), nil
+ case ExpandedValue:
+ if m, ok := v.Value.(map[string]any); ok {
+ return NewFromStringMap(m), nil
+ } else if v.Value == nil {
+ // If the value is nil, return a new empty Conf.
+ c := New()
+ c.isNil = true
+ return c, nil
+ }
+ // override data with the original value to make the error message more informative.
+ data = v.Value
+ }
+
+ return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind())
+}
+
+func (l *Conf) toStringMapWithExpand() map[string]any {
+ if l.isNil {
+ return nil
+ }
+ m := maps.Unflatten(l.k.All(), KeyDelimiter)
+ return m
+}
+
+// ToStringMap creates a map[string]any from a Conf.
+// Values with multiple representations
+// are normalized with the YAML parsed representation.
+//
+// For example, for a Conf created from `foo: ${env:FOO}` and `FOO=123`
+// ToStringMap will return `map[string]any{"foo": 123}`.
+//
+// For any map `m`, `NewFromStringMap(m).ToStringMap() == m`.
+// In particular, if the Conf was created from a nil value,
+// ToStringMap will return map[string]any(nil).
+func (l *Conf) ToStringMap() map[string]any {
+ return sanitize(l.toStringMapWithExpand()).(map[string]any)
+}
+
+func (l *Conf) unsanitizedGet(key string) any {
+ return l.k.Get(key)
+}
+
+// sanitize recursively removes expandedValue references from the given data.
+// It uses the expandedValue.Value field to replace the expandedValue references.
+func sanitize(a any) any {
+ return sanitizeExpanded(a, false)
+}
+
+// sanitizeToStringMap recursively removes expandedValue references from the given data.
+// It uses the expandedValue.Original field to replace the expandedValue references.
+func sanitizeToStr(a any) any {
+ return sanitizeExpanded(a, true)
+}
+
+func sanitizeExpanded(a any, useOriginal bool) any {
+ switch m := a.(type) {
+ case map[string]any:
+ c := maps.Copy(m)
+ for k, v := range m {
+ c[k] = sanitizeExpanded(v, useOriginal)
+ }
+ return c
+ case []any:
+ // If the value is nil, return nil.
+ var newSlice []any
+ if m == nil {
+ return newSlice
+ }
+ newSlice = make([]any, 0, len(m))
+ for _, e := range m {
+ newSlice = append(newSlice, sanitizeExpanded(e, useOriginal))
+ }
+ return newSlice
+ case ExpandedValue:
+ if useOriginal {
+ return m.Original
+ }
+ return m.Value
+ }
+ return a
+}
+
+type UnsanitizedGetter struct {
+ Conf *Conf
+}
+
+func (ug *UnsanitizedGetter) UnsanitizedGet(key string) any {
+ return ug.Conf.unsanitizedGet(key)
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/internal/confmap.go
new file mode 100644
index 00000000000..c242873a241
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/confmap.go
@@ -0,0 +1,22 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+// Unmarshaler interface may be implemented by types to customize their behavior when being unmarshaled from a Conf.
+// Only types with struct or pointer to struct kind are supported.
+type Unmarshaler interface {
+ // Unmarshal a Conf into the struct in a custom way.
+ // The Conf for this specific component may be nil or empty if no config available.
+ // This method should only be called by decoding hooks when calling Conf.Unmarshal.
+ Unmarshal(component *Conf) error
+}
+
+// Marshaler defines an optional interface for custom configuration marshaling.
+// A configuration struct can implement this interface to override the default
+// marshaling.
+type Marshaler interface {
+ // Marshal the config into a Conf in a custom way.
+ // The Conf will be empty and can be merged into.
+ Marshal(component *Conf) error
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go
new file mode 100644
index 00000000000..17f172a2ec3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/decoder.go
@@ -0,0 +1,361 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "maps"
+ "reflect"
+ "slices"
+ "strings"
+
+ "github.com/go-viper/mapstructure/v2"
+
+ "go.opentelemetry.io/collector/confmap/internal/third_party/composehook"
+)
+
+const (
+ // MapstructureTag is the struct field tag used to record marshaling/unmarshaling settings.
+ // See https://pkg.go.dev/github.com/go-viper/mapstructure/v2 for supported values.
+ MapstructureTag = "mapstructure"
+)
+
+// WithIgnoreUnused sets an option to ignore errors if existing
+// keys in the original Conf were unused in the decoding process
+// (extra keys).
+func WithIgnoreUnused() UnmarshalOption {
+ return UnmarshalOptionFunc(func(uo *UnmarshalOptions) {
+ uo.IgnoreUnused = true
+ })
+}
+
+// Decode decodes the contents of the Conf into the result argument, using a
+// mapstructure decoder with the following notable behaviors. Ensures that maps whose
+// values are nil pointer structs resolved to the zero value of the target struct (see
+// expandNilStructPointers). Converts string to []string by splitting on ','. Ensures
+// uniqueness of component IDs (see mapKeyStringToMapKeyTextUnmarshalerHookFunc).
+// Decodes time.Duration from strings. Allows custom unmarshaling for structs implementing
+// encoding.TextUnmarshaler. Allows custom unmarshaling for structs implementing confmap.Unmarshaler.
+func Decode(input, result any, settings UnmarshalOptions, skipTopLevelUnmarshaler bool) error {
+ dc := &mapstructure.DecoderConfig{
+ ErrorUnused: !settings.IgnoreUnused,
+ Result: result,
+ TagName: MapstructureTag,
+ WeaklyTypedInput: false,
+ MatchName: caseSensitiveMatchName,
+ DecodeNil: true,
+ DecodeHook: composehook.ComposeDecodeHookFunc(
+ useExpandValue(),
+ expandNilStructPointersHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ mapKeyStringToMapKeyTextUnmarshalerHookFunc(),
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.TextUnmarshallerHookFunc(),
+ unmarshalerHookFunc(result, skipTopLevelUnmarshaler),
+ // after the main unmarshaler hook is called,
+ // we unmarshal the embedded structs if present to merge with the result:
+ unmarshalerEmbeddedStructsHookFunc(),
+ zeroSliceAndMapHookFunc(),
+ ),
+ }
+ decoder, err := mapstructure.NewDecoder(dc)
+ if err != nil {
+ return err
+ }
+ if err = decoder.Decode(input); err != nil {
+ if strings.HasPrefix(err.Error(), "error decoding ''") {
+ return errors.Unwrap(err)
+ }
+ return err
+ }
+ return nil
+}
+
+// When a value has been loaded from an external source via a provider, we keep both the
+// parsed value and the original string value. This allows us to expand the value to its
+// original string representation when decoding into a string field, and use the original otherwise.
+func useExpandValue() mapstructure.DecodeHookFuncType {
+ return func(
+ _ reflect.Type,
+ to reflect.Type,
+ data any,
+ ) (any, error) {
+ if exp, ok := data.(ExpandedValue); ok {
+ v := castTo(exp, to.Kind() == reflect.String)
+ // See https://github.com/open-telemetry/opentelemetry-collector/issues/10949
+ // If the `to.Kind` is not a string, then expandValue's original value is useless and
+ // the casted-to value will be nil. In that scenario, we need to use the default value of `to`'s kind.
+ if v == nil {
+ return reflect.Zero(to).Interface(), nil
+ }
+ return v, nil
+ }
+
+ switch to.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map:
+ if isStringyStructure(to) {
+ // If the target field is a stringy structure, sanitize to use the original string value everywhere.
+ return sanitizeToStr(data), nil
+ }
+ // Otherwise, sanitize to use the parsed value everywhere.
+ return sanitize(data), nil
+ }
+ return data, nil
+ }
+}
+
+// In cases where a config has a mapping of something to a struct pointers
+// we want nil values to resolve to a pointer to the zero value of the
+// underlying struct just as we want nil values of a mapping of something
+// to a struct to resolve to the zero value of that struct.
+//
+// e.g. given a config type:
+// type Config struct { Thing *SomeStruct `mapstructure:"thing"` }
+//
+// and yaml of:
+// config:
+//
+// thing:
+//
+// we want an unmarshaled Config to be equivalent to
+// Config{Thing: &SomeStruct{}} instead of Config{Thing: nil}
+func expandNilStructPointersHookFunc() mapstructure.DecodeHookFuncValue {
+ return safeWrapDecodeHookFunc(func(from, to reflect.Value) (any, error) {
+ // ensure we are dealing with map to map comparison
+ if from.Kind() == reflect.Map && to.Kind() == reflect.Map {
+ toElem := to.Type().Elem()
+ // ensure that map values are pointers to a struct
+ // (that may be nil and require manual setting w/ zero value)
+ if toElem.Kind() == reflect.Ptr && toElem.Elem().Kind() == reflect.Struct {
+ fromRange := from.MapRange()
+ for fromRange.Next() {
+ fromKey := fromRange.Key()
+ fromValue := fromRange.Value()
+ // ensure that we've run into a nil pointer instance
+ if fromValue.IsNil() {
+ newFromValue := reflect.New(toElem.Elem())
+ from.SetMapIndex(fromKey, newFromValue)
+ }
+ }
+ }
+ }
+ return from.Interface(), nil
+ })
+}
+
+// mapKeyStringToMapKeyTextUnmarshalerHookFunc returns a DecodeHookFuncType that checks that a conversion from
+// map[string]any to map[encoding.TextUnmarshaler]any does not overwrite keys,
+// when UnmarshalText produces equal elements from different strings (e.g. trims whitespaces).
+//
+// This is needed in combination with ComponentID, which may produce equal IDs for different strings,
+// and an error needs to be returned in that case, otherwise the last equivalent ID overwrites the previous one.
+func mapKeyStringToMapKeyTextUnmarshalerHookFunc() mapstructure.DecodeHookFuncType {
+ return func(from, to reflect.Type, data any) (any, error) {
+ if from.Kind() != reflect.Map || from.Key().Kind() != reflect.String {
+ return data, nil
+ }
+
+ if to.Kind() != reflect.Map {
+ return data, nil
+ }
+
+ // Checks that the key type of to implements the TextUnmarshaler interface.
+ if _, ok := reflect.New(to.Key()).Interface().(encoding.TextUnmarshaler); !ok {
+ return data, nil
+ }
+
+ // Create a map with key value of to's key to bool.
+ fieldNameSet := reflect.MakeMap(reflect.MapOf(to.Key(), reflect.TypeOf(true)))
+ for k := range data.(map[string]any) {
+ // Create a new value of the to's key type.
+ tKey := reflect.New(to.Key())
+
+ // Use tKey to unmarshal the key of the map.
+ if err := tKey.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(k)); err != nil {
+ return nil, err
+ }
+ // Checks if the key has already been decoded in a previous iteration.
+ if fieldNameSet.MapIndex(reflect.Indirect(tKey)).IsValid() {
+ return nil, fmt.Errorf("duplicate name %q after unmarshaling %v", k, tKey)
+ }
+ fieldNameSet.SetMapIndex(reflect.Indirect(tKey), reflect.ValueOf(true))
+ }
+ return data, nil
+ }
+}
+
+// unmarshalerEmbeddedStructsHookFunc provides a mechanism for embedded structs to define their own unmarshal logic,
+// by implementing the Unmarshaler interface.
+func unmarshalerEmbeddedStructsHookFunc() mapstructure.DecodeHookFuncValue {
+ return safeWrapDecodeHookFunc(func(from, to reflect.Value) (any, error) {
+ if to.Type().Kind() != reflect.Struct {
+ return from.Interface(), nil
+ }
+ fromAsMap, ok := from.Interface().(map[string]any)
+ if !ok {
+ return from.Interface(), nil
+ }
+ for i := 0; i < to.Type().NumField(); i++ {
+ // embedded structs passed in via `squash` cannot be pointers. We just check if they are structs:
+ f := to.Type().Field(i)
+ if f.IsExported() && slices.Contains(strings.Split(f.Tag.Get(MapstructureTag), ","), "squash") {
+ if unmarshaler, ok := to.Field(i).Addr().Interface().(Unmarshaler); ok {
+ c := NewFromStringMap(fromAsMap)
+ c.skipTopLevelUnmarshaler = true
+ if err := unmarshaler.Unmarshal(c); err != nil {
+ return nil, err
+ }
+ // the struct we receive from this unmarshaling only contains fields related to the embedded struct.
+ // we merge this partially unmarshaled struct with the rest of the result.
+ // note we already unmarshaled the main struct earlier, and therefore merge with it.
+ conf := New()
+ if err := conf.Marshal(unmarshaler); err != nil {
+ return nil, err
+ }
+ resultMap := conf.ToStringMap()
+ if fromAsMap == nil && len(resultMap) > 0 {
+ fromAsMap = make(map[string]any, len(resultMap))
+ }
+ maps.Copy(fromAsMap, resultMap)
+ }
+ }
+ }
+ return fromAsMap, nil
+ })
+}
+
+// Provides a mechanism for individual structs to define their own unmarshal logic,
+// by implementing the Unmarshaler interface, unless skipTopLevelUnmarshaler is
+// true and the struct matches the top level object being unmarshaled.
+func unmarshalerHookFunc(result any, skipTopLevelUnmarshaler bool) mapstructure.DecodeHookFuncValue {
+ return safeWrapDecodeHookFunc(func(from, to reflect.Value) (any, error) {
+ if !to.CanAddr() {
+ return from.Interface(), nil
+ }
+
+ toPtr := to.Addr().Interface()
+ // Need to ignore the top structure to avoid running into an infinite recursion
+ // where Unmarshaler.Unmarshal and Conf.Unmarshal would call each other.
+ if toPtr == result && skipTopLevelUnmarshaler {
+ return from.Interface(), nil
+ }
+
+ unmarshaler, ok := toPtr.(Unmarshaler)
+ if !ok {
+ return from.Interface(), nil
+ }
+
+ if _, ok = from.Interface().(map[string]any); !ok {
+ return from.Interface(), nil
+ }
+
+ // Use the current object if not nil (to preserve other configs in the object), otherwise zero initialize.
+ if to.Addr().IsNil() {
+ unmarshaler = reflect.New(to.Type()).Interface().(Unmarshaler)
+ }
+
+ c := NewFromStringMap(from.Interface().(map[string]any))
+ c.skipTopLevelUnmarshaler = true
+ if err := unmarshaler.Unmarshal(c); err != nil {
+ return nil, err
+ }
+
+ return unmarshaler, nil
+ })
+}
+
+// safeWrapDecodeHookFunc wraps a DecodeHookFuncValue to ensure fromVal is a valid `reflect.Value`
+// object and therefore it is safe to call `reflect.Value` methods on fromVal.
+//
+// Use this only if the hook does not need to be called on untyped nil values.
+// Typed nil values are safe to call and will be passed to the hook.
+// See https://github.com/golang/go/issues/51649
+func safeWrapDecodeHookFunc(
+ f mapstructure.DecodeHookFuncValue,
+) mapstructure.DecodeHookFuncValue {
+ return func(fromVal, toVal reflect.Value) (any, error) {
+ if !fromVal.IsValid() {
+ return nil, nil
+ }
+ return f(fromVal, toVal)
+ }
+}
+
+// This hook is used to solve the issue: https://github.com/open-telemetry/opentelemetry-collector/issues/4001
+// We adopt the suggestion provided in this issue: https://github.com/mitchellh/mapstructure/issues/74#issuecomment-279886492
+// We should empty every slice before unmarshalling unless user provided slice is nil.
+// Assume that we had a struct with a field of type slice called `keys`, which has default values of ["a", "b"]
+//
+// type Config struct {
+// Keys []string `mapstructure:"keys"`
+// }
+//
+// The configuration provided by users may have following cases
+// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overridden
+// - for example, input is {"keys", ["c"]}, then output is Config{ Keys: ["c"]}
+//
+// 2. configuration have `keys` field and have an empty slice for this key, the output should be overridden by empty slices
+// - for example, input is {"keys", []}, then output is Config{ Keys: []}
+//
+// 3. configuration have `keys` field and have nil value for this key, the output should be default config
+// - for example, input is {"keys": nil}, then output is Config{ Keys: ["a", "b"]}
+//
+// 4. configuration have no `keys` field specified, the output should be default config
+// - for example, input is {}, then output is Config{ Keys: ["a", "b"]}
+//
+// This hook is also used to solve https://github.com/open-telemetry/opentelemetry-collector/issues/13117.
+// Since v0.127.0, we decode nil values to avoid creating empty map objects.
+// The nil value is not well understood when layered on top of a default map non-nil value.
+// The fix is to avoid the assignment and return the previous value.
+func zeroSliceAndMapHookFunc() mapstructure.DecodeHookFuncValue {
+ return safeWrapDecodeHookFunc(func(from, to reflect.Value) (any, error) {
+ if to.CanSet() && to.Kind() == reflect.Slice && from.Kind() == reflect.Slice {
+ if !from.IsNil() {
+ // input slice is not nil, set the output slice to a new slice of the same type.
+ to.Set(reflect.MakeSlice(to.Type(), from.Len(), from.Cap()))
+ }
+ }
+ if to.CanSet() && to.Kind() == reflect.Map && from.Kind() == reflect.Map {
+ if from.IsNil() {
+ return to.Interface(), nil
+ }
+ }
+
+ return from.Interface(), nil
+ })
+}
+
+// case-sensitive version of the callback to be used in the MatchName property
+// of the DecoderConfig. The default for MatchEqual is to use strings.EqualFold,
+// which is case-insensitive.
+func caseSensitiveMatchName(a, b string) bool {
+ return a == b
+}
+
+func castTo(exp ExpandedValue, useOriginal bool) any {
+ // If the target field is a string, use `exp.Original` or fail if not available.
+ if useOriginal {
+ return exp.Original
+ }
+ // Otherwise, use the parsed value (previous behavior).
+ return exp.Value
+}
+
+// Check if a reflect.Type is of the form T, where:
+// X is any type or interface
+// T = string | map[X]T | []T | [n]T
+func isStringyStructure(t reflect.Type) bool {
+ if t.Kind() == reflect.String {
+ return true
+ }
+ if t.Kind() == reflect.Map {
+ return isStringyStructure(t.Elem())
+ }
+ if t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
+ return isStringyStructure(t.Elem())
+ }
+ return false
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/encoder.go
new file mode 100644
index 00000000000..6df9675b41a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/encoder.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+import (
+ "reflect"
+
+ "github.com/go-viper/mapstructure/v2"
+
+ encoder "go.opentelemetry.io/collector/confmap/internal/mapstructure"
+)
+
+// EncoderConfig returns a default encoder.EncoderConfig that includes
+// an EncodeHook that handles both TextMarshaler and Marshaler
+// interfaces.
+func EncoderConfig(rawVal any, _ MarshalOptions) *encoder.EncoderConfig {
+ return &encoder.EncoderConfig{
+ EncodeHook: mapstructure.ComposeDecodeHookFunc(
+ encoder.YamlMarshalerHookFunc(),
+ encoder.TextMarshalerHookFunc(),
+ marshalerHookFunc(rawVal),
+ ),
+ }
+}
+
+// marshalerHookFunc returns a DecodeHookFuncValue that checks structs that aren't
+// the original to see if they implement the Marshaler interface.
+func marshalerHookFunc(orig any) mapstructure.DecodeHookFuncValue {
+ origType := reflect.TypeOf(orig)
+ return safeWrapDecodeHookFunc(func(from, _ reflect.Value) (any, error) {
+ if from.Kind() != reflect.Struct {
+ return from.Interface(), nil
+ }
+
+ // ignore original to avoid infinite loop.
+ if from.Type() == origType && reflect.DeepEqual(from.Interface(), orig) {
+ return from.Interface(), nil
+ }
+ marshaler, ok := from.Interface().(Marshaler)
+ if !ok {
+ return from.Interface(), nil
+ }
+ conf := NewFromStringMap(nil)
+ if err := marshaler.Marshal(conf); err != nil {
+ return nil, err
+ }
+
+ stringMap := conf.ToStringMap()
+ if stringMap == nil {
+ // If conf is still nil after marshaling, we want to encode it as an untyped nil
+ // instead of a map-typed nil. This ensures the value is a proper null value
+ // in the final marshaled output instead of an empty map. We hit this case
+ // when marshaling wrapper structs that have no direct representation
+ // in the marshaled output that aren't tagged with "squash" on the fields
+ // they're used on.
+ return nil, nil
+ }
+ return stringMap, nil
+ })
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/expand.go b/vendor/go.opentelemetry.io/collector/confmap/internal/expand.go
new file mode 100644
index 00000000000..bf3a4053f04
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/expand.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+// ExpandedValue holds the YAML parsed value and original representation of a value.
+// It keeps track of the original representation to be used by the 'useExpandValue' hook
+// if the target field is a string. We need to keep both representations because we don't know
+// what the target field type is until `Unmarshal` is called.
+type ExpandedValue struct {
+ // Value is the expanded value.
+ Value any
+ // Original is the original representation of the value.
+ Original string
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go
new file mode 100644
index 00000000000..47f9348dc42
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/featuregates.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+import "go.opentelemetry.io/collector/featuregate"
+
+var EnableMergeAppendOption = featuregate.GlobalRegistry().MustRegister(
+ "confmap.enableMergeAppendOption",
+ featuregate.StageAlpha,
+ featuregate.WithRegisterFromVersion("v0.120.0"),
+ featuregate.WithRegisterDescription("Combines lists when resolving configs from different sources. This feature gate will not be stabilized 'as is'; the current behavior will remain the default."),
+ featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/8754"),
+)
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go
index 948c17d4d54..d8208747d37 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go
@@ -7,11 +7,12 @@ import (
"encoding"
"errors"
"fmt"
+ "maps"
"reflect"
"strings"
"github.com/go-viper/mapstructure/v2"
- yaml "sigs.k8s.io/yaml/goyaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
const (
@@ -121,9 +122,7 @@ func (e *Encoder) encodeStruct(value reflect.Value) (any, error) {
}
if info.squash {
if m, ok := encoded.(map[string]any); ok {
- for k, v := range m {
- result[k] = v
- }
+ maps.Copy(result, m)
}
} else {
result[info.name] = encoded
@@ -225,7 +224,7 @@ func getTagInfo(field reflect.StructField) *tagInfo {
// for the encoding.TextMarshaler interface and calls the MarshalText
// function if found.
func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue {
- return func(from reflect.Value, _ reflect.Value) (any, error) {
+ return func(from, _ reflect.Value) (any, error) {
marshaler, ok := from.Interface().(encoding.TextMarshaler)
if !ok {
return from.Interface(), nil
@@ -243,7 +242,7 @@ func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue {
// to map[string]any using the yaml package, which respects the yaml tags. Ultimately,
// this allows mapstructure to later marshal the map[string]any in a generic way.
func YamlMarshalerHookFunc() mapstructure.DecodeHookFuncValue {
- return func(from reflect.Value, _ reflect.Value) (any, error) {
+ return func(from, _ reflect.Value) (any, error) {
if from.Kind() == reflect.Struct {
for i := 0; i < from.NumField(); i++ {
if _, ok := from.Type().Field(i).Tag.Lookup("mapstructure"); ok {
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/marshaloption.go b/vendor/go.opentelemetry.io/collector/confmap/internal/marshaloption.go
new file mode 100644
index 00000000000..06d5726b918
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/marshaloption.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+type MarshalOption interface {
+ apply(*MarshalOptions)
+}
+
+// MarshalOptions is used by (*Conf).Marshal to toggle unmarshaling settings.
+// It is in the `internal` package so experimental options can be added in xconfmap.
+type MarshalOptions struct{}
+
+type MarshalOptionFunc func(*MarshalOptions)
+
+func (fn MarshalOptionFunc) apply(set *MarshalOptions) {
+ fn(set)
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/merge.go b/vendor/go.opentelemetry.io/collector/confmap/internal/merge.go
similarity index 94%
rename from vendor/go.opentelemetry.io/collector/confmap/merge.go
rename to vendor/go.opentelemetry.io/collector/confmap/internal/merge.go
index 10252c8aeab..aeddad87492 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/merge.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/merge.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package confmap // import "go.opentelemetry.io/collector/confmap"
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
import (
"reflect"
@@ -84,7 +84,7 @@ func mergeSlice(src, dest reflect.Value) any {
return slice.Interface()
}
-func isPresent(slice reflect.Value, val reflect.Value) bool {
+func isPresent(slice, val reflect.Value) bool {
for i := 0; i < slice.Len(); i++ {
if reflect.DeepEqual(val.Interface(), slice.Index(i).Interface()) {
return true
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/third_party/composehook/compose_hook.go b/vendor/go.opentelemetry.io/collector/confmap/internal/third_party/composehook/compose_hook.go
index f51050f66ed..3b24122c8d5 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/internal/third_party/composehook/compose_hook.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/third_party/composehook/compose_hook.go
@@ -41,7 +41,7 @@ func typedDecodeHook(h mapstructure.DecodeHookFunc) mapstructure.DecodeHookFunc
func cachedDecodeHook(raw mapstructure.DecodeHookFunc) func(reflect.Value, reflect.Value) (any, error) {
switch f := typedDecodeHook(raw).(type) {
case mapstructure.DecodeHookFuncType:
- return func(from reflect.Value, to reflect.Value) (any, error) {
+ return func(from, to reflect.Value) (any, error) {
// CHANGE FROM UPSTREAM: check if from is valid and return nil if not
if !from.IsValid() {
return nil, nil
@@ -49,7 +49,7 @@ func cachedDecodeHook(raw mapstructure.DecodeHookFunc) func(reflect.Value, refle
return f(from.Type(), to.Type(), from.Interface())
}
case mapstructure.DecodeHookFuncKind:
- return func(from reflect.Value, to reflect.Value) (any, error) {
+ return func(from, to reflect.Value) (any, error) {
// CHANGE FROM UPSTREAM: check if from is valid and return nil if not
if !from.IsValid() {
return nil, nil
@@ -57,7 +57,7 @@ func cachedDecodeHook(raw mapstructure.DecodeHookFunc) func(reflect.Value, refle
return f(from.Kind(), to.Kind(), from.Interface())
}
case mapstructure.DecodeHookFuncValue:
- return func(from reflect.Value, to reflect.Value) (any, error) {
+ return func(from, to reflect.Value) (any, error) {
return f(from, to)
}
default:
@@ -80,7 +80,7 @@ func ComposeDecodeHookFunc(fs ...mapstructure.DecodeHookFunc) mapstructure.Decod
for _, f := range fs {
cached = append(cached, cachedDecodeHook(f))
}
- return func(f reflect.Value, t reflect.Value) (any, error) {
+ return func(f, t reflect.Value) (any, error) {
var err error
// CHANGE FROM UPSTREAM: check if f is valid before calling f.Interface()
diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go b/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go
new file mode 100644
index 00000000000..e82ce6fd0cc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/confmap/internal/unmarshaloption.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/confmap/internal"
+
+type UnmarshalOption interface {
+ apply(*UnmarshalOptions)
+}
+
+// UnmarshalOptions is used by (*Conf).Unmarshal to toggle unmarshaling settings.
+// It is in the `internal` package so experimental options can be added in xconfmap.
+type UnmarshalOptions struct {
+ IgnoreUnused bool
+}
+
+type UnmarshalOptionFunc func(*UnmarshalOptions)
+
+func (fn UnmarshalOptionFunc) apply(set *UnmarshalOptions) {
+ fn(set)
+}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go
index 7d2704fe01b..ab3e9a18a5b 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/provider.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go
@@ -9,7 +9,7 @@ import (
"time"
"go.uber.org/zap"
- yaml "sigs.k8s.io/yaml/goyaml.v3"
+ yaml "go.yaml.in/yaml/v3"
)
// ProviderSettings are the settings to initialize a Provider.
@@ -86,6 +86,8 @@ type Provider interface {
// This method must be called when the Collector service ends, either in case of
// success or error. Retrieve cannot be called after Shutdown.
//
+ // Provider MUST shutdown and wait for any goroutine(s) that were created to call `watcher`, if any.
+ //
// Should never be called concurrently with itself or with Retrieve.
// If ctx is cancelled should return immediately with an error.
Shutdown(ctx context.Context) error
diff --git a/vendor/go.opentelemetry.io/collector/confmap/resolver.go b/vendor/go.opentelemetry.io/collector/confmap/resolver.go
index ee8058a5658..4fa84f01632 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/resolver.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/resolver.go
@@ -13,15 +13,7 @@ import (
"go.uber.org/multierr"
"go.uber.org/zap"
- "go.opentelemetry.io/collector/featuregate"
-)
-
-var enableMergeAppendOption = featuregate.GlobalRegistry().MustRegister(
- "confmap.enableMergeAppendOption",
- featuregate.StageAlpha,
- featuregate.WithRegisterFromVersion("v0.120.0"),
- featuregate.WithRegisterDescription("Combines lists when resolving configs from different sources. This feature gate will not be stabilized 'as is'; the current behavior will remain the default."),
- featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/8754"),
+ "go.opentelemetry.io/collector/confmap/internal"
)
// follows drive-letter specification:
@@ -191,7 +183,8 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) {
cfgMap := make(map[string]any)
for _, k := range retMap.AllKeys() {
- val, err := mr.expandValueRecursively(ctx, retMap.unsanitizedGet(k))
+ ug := internal.UnsanitizedGetter{Conf: retMap}
+ val, err := mr.expandValueRecursively(ctx, ug.UnsanitizedGet(k))
if err != nil {
return nil, err
}
@@ -213,7 +206,7 @@ func escapeDollarSigns(val any) any {
switch v := val.(type) {
case string:
return strings.ReplaceAll(v, "$$", "$")
- case expandedValue:
+ case internal.ExpandedValue:
v.Original = strings.ReplaceAll(v.Original, "$$", "$")
v.Value = escapeDollarSigns(v.Value)
return v
@@ -250,14 +243,14 @@ func (mr *Resolver) Watch() <-chan error {
//
// Should never be called concurrently with itself or Get.
func (mr *Resolver) Shutdown(ctx context.Context) error {
- close(mr.watcher)
-
var errs error
errs = multierr.Append(errs, mr.closeIfNeeded(ctx))
for _, p := range mr.providers {
errs = multierr.Append(errs, p.Shutdown(ctx))
}
+ close(mr.watcher)
+
return errs
}
diff --git a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go
index b9e8edbbaa5..ba90b54bc28 100644
--- a/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go
+++ b/vendor/go.opentelemetry.io/collector/confmap/xconfmap/config.go
@@ -172,7 +172,7 @@ func fieldName(field reflect.StructField) string {
}
// Even if the mapstructure tag exists, the field name may not
// be available, so set it if it is still blank.
- if len(fieldName) == 0 {
+ if fieldName == "" {
fieldName = strings.ToLower(field.Name)
}
@@ -180,20 +180,17 @@ func fieldName(field reflect.StructField) string {
}
func stringifyMapKey(val reflect.Value) string {
- var key string
-
- if str, ok := val.Interface().(string); ok {
- key = str
- } else if stringer, ok := val.Interface().(fmt.Stringer); ok {
- key = stringer.String()
- } else {
+ switch v := val.Interface().(type) {
+ case string:
+ return v
+ case fmt.Stringer:
+ return v.String()
+ default:
switch val.Kind() {
case reflect.Ptr, reflect.Interface, reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
- key = fmt.Sprintf("[%T key]", val.Interface())
+ return fmt.Sprintf("[%T key]", val.Interface())
default:
- key = fmt.Sprintf("%v", val.Interface())
+ return fmt.Sprintf("%v", val.Interface())
}
}
-
- return key
}
diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go
index 9309024c38b..00e6ec21518 100644
--- a/vendor/go.opentelemetry.io/collector/featuregate/registry.go
+++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go
@@ -20,7 +20,7 @@ var (
// idRegexp is used to validate the ID of a Gate.
// IDs' characters must be alphanumeric or dots.
- idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`)
+ idRegexp = regexp.MustCompile(`^[0-9a-zA-Z.]*$`)
)
// ErrAlreadyRegistered is returned when adding a Gate that is already registered.
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE b/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile b/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile
deleted file mode 100644
index ded7a36092d..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-include ../../Makefile.Common
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go
deleted file mode 100644
index a246af4da39..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/attribute.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute"
-
-import (
- "slices"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-const (
- ComponentKindKey = "otelcol.component.kind"
- ComponentIDKey = "otelcol.component.id"
- PipelineIDKey = "otelcol.pipeline.id"
- SignalKey = "otelcol.signal"
- SignalOutputKey = "otelcol.signal.output"
-)
-
-func RemoveAttributes(attrs attribute.Set, fields ...string) attribute.Set {
- attrs, _ = attribute.NewSetWithFiltered(attrs.ToSlice(), func(kv attribute.KeyValue) bool {
- return !slices.Contains(fields, string(kv.Key))
- })
- return attrs
-}
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go
deleted file mode 100644
index bf499ddb03e..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/logger_zap.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute"
-
-import (
- "reflect"
- "time"
-
- "go.opentelemetry.io/contrib/bridges/otelzap"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/log"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-// Interface for Zap cores that support setting and resetting a set of component attributes.
-//
-// There are three wrappers that implement this interface:
-//
-// - [NewConsoleCoreWithAttributes] injects component attributes as Zap fields.
-//
-// This is used for the Collector's console output.
-//
-// - [NewOTelTeeCoreWithAttributes] copies logs to a [log.LoggerProvider] using [otelzap]. For the
-// copied logs, component attributes are injected as instrumentation scope attributes.
-//
-// This is used when service::telemetry::logs::processors is configured.
-//
-// - [NewWrapperCoreWithAttributes] applies a wrapper function to a core, similar to
-// [zap.WrapCore]. It allows setting component attributes on the inner core and reapplying the
-// wrapper function when needed.
-//
-// This is used when adding [zapcore.NewSamplerWithOptions] to our logger stack.
-type coreWithAttributes interface {
- zapcore.Core
- withAttributeSet(attribute.Set) zapcore.Core
-}
-
-// Tries setting the component attribute set for a Zap core.
-//
-// Does nothing if the core does not implement [coreWithAttributes].
-func tryWithAttributeSet(c zapcore.Core, attrs attribute.Set) zapcore.Core {
- if cwa, ok := c.(coreWithAttributes); ok {
- return cwa.withAttributeSet(attrs)
- }
- zap.New(c).Debug("Logger core does not support injecting component attributes")
- return c
-}
-
-type consoleCoreWithAttributes struct {
- zapcore.Core
- from zapcore.Core
-}
-
-var _ coreWithAttributes = (*consoleCoreWithAttributes)(nil)
-
-// NewConsoleCoreWithAttributes wraps a Zap core in order to inject component attributes as Zap fields.
-//
-// This is used for the Collector's console output.
-func NewConsoleCoreWithAttributes(c zapcore.Core, attrs attribute.Set) zapcore.Core {
- var fields []zap.Field
- for _, kv := range attrs.ToSlice() {
- fields = append(fields, zap.String(string(kv.Key), kv.Value.AsString()))
- }
- return &consoleCoreWithAttributes{
- Core: c.With(fields),
- from: c,
- }
-}
-
-func (ccwa *consoleCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core {
- return NewConsoleCoreWithAttributes(ccwa.from, attrs)
-}
-
-type otelTeeCoreWithAttributes struct {
- zapcore.Core
- consoleCore zapcore.Core
- lp log.LoggerProvider
- scopeName string
- level zapcore.Level
-}
-
-var _ coreWithAttributes = (*otelTeeCoreWithAttributes)(nil)
-
-// NewOTelTeeCoreWithAttributes wraps a Zap core in order to copy logs to a [log.LoggerProvider] using [otelzap]. For the copied
-// logs, component attributes are injected as instrumentation scope attributes.
-//
-// This is used when service::telemetry::logs::processors is configured.
-func NewOTelTeeCoreWithAttributes(consoleCore zapcore.Core, lp log.LoggerProvider, scopeName string, level zapcore.Level, attrs attribute.Set) zapcore.Core {
- otelCore, err := zapcore.NewIncreaseLevelCore(otelzap.NewCore(
- scopeName,
- otelzap.WithLoggerProvider(lp),
- otelzap.WithAttributes(attrs.ToSlice()...),
- ), zap.NewAtomicLevelAt(level))
- if err != nil {
- panic(err)
- }
-
- return &otelTeeCoreWithAttributes{
- Core: zapcore.NewTee(consoleCore, otelCore),
- consoleCore: consoleCore,
- lp: lp,
- scopeName: scopeName,
- level: level,
- }
-}
-
-func (ocwa *otelTeeCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core {
- return NewOTelTeeCoreWithAttributes(tryWithAttributeSet(ocwa.consoleCore, attrs), ocwa.lp, ocwa.scopeName, ocwa.level, attrs)
-}
-
-type samplerCoreWithAttributes struct {
- zapcore.Core
- from zapcore.Core
-}
-
-var _ coreWithAttributes = (*samplerCoreWithAttributes)(nil)
-
-func NewSamplerCoreWithAttributes(inner zapcore.Core, tick time.Duration, first int, thereafter int) zapcore.Core {
- return &samplerCoreWithAttributes{
- Core: zapcore.NewSamplerWithOptions(inner, tick, first, thereafter),
- from: inner,
- }
-}
-
-func checkSamplerType(ty reflect.Type) bool {
- if ty.Kind() != reflect.Pointer {
- return false
- }
- ty = ty.Elem()
- if ty.Kind() != reflect.Struct {
- return false
- }
- innerField, ok := ty.FieldByName("Core")
- if !ok {
- return false
- }
- return reflect.TypeFor[zapcore.Core]().AssignableTo(innerField.Type)
-}
-
-func (ssc *samplerCoreWithAttributes) withAttributeSet(attrs attribute.Set) zapcore.Core {
- newInner := tryWithAttributeSet(ssc.from, attrs)
-
- // Relevant Zap code: https://github.com/uber-go/zap/blob/fcf8ee58669e358bbd6460bef5c2ee7a53c0803a/zapcore/sampler.go#L168
- // We need to create a new Zap sampler core with the same settings but with a new inner core,
- // while reusing the very RAM-intensive `counters` data structure.
- // The `With` method does something similar, but it only replaces pre-set fields, not the Core.
- // However, we can use `reflect` to accomplish this.
- // This hack can be removed once Zap supports this use case.
- // Tracking issue: https://github.com/uber-go/zap/issues/1498
- val1 := reflect.ValueOf(ssc.Core)
- if !checkSamplerType(val1.Type()) { // To avoid a more esoteric panic message below
- panic("Unexpected Zap sampler type; see github.com/open-telemetry/opentelemetry-collector/issues/13014")
- }
- val2 := reflect.New(val1.Type().Elem()) // core2 := new(sampler)
- val2.Elem().Set(val1.Elem()) // *core2 = *core1
- val2.Elem().FieldByName("Core").Set(reflect.ValueOf(newInner)) // core2.Core = newInner
- newSampler := val2.Interface().(zapcore.Core)
-
- return samplerCoreWithAttributes{
- Core: newSampler,
- from: newInner,
- }
-}
-
-// ZapLoggerWithAttributes creates a Zap Logger with a new set of injected component attributes.
-func ZapLoggerWithAttributes(logger *zap.Logger, attrs attribute.Set) *zap.Logger {
- return logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core {
- return tryWithAttributeSet(c, attrs)
- }))
-}
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go
deleted file mode 100644
index d17732dde58..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/meter_provider.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute"
-
-import (
- "slices"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
-)
-
-type meterProviderWithAttributes struct {
- metric.MeterProvider
- attrs []attribute.KeyValue
-}
-
-// MeterProviderWithAttributes creates a MeterProvider with a new set of injected instrumentation scope attributes.
-func MeterProviderWithAttributes(mp metric.MeterProvider, attrs attribute.Set) metric.MeterProvider {
- if mpwa, ok := mp.(meterProviderWithAttributes); ok {
- mp = mpwa.MeterProvider
- }
- return meterProviderWithAttributes{
- MeterProvider: mp,
- attrs: attrs.ToSlice(),
- }
-}
-
-func (mpwa meterProviderWithAttributes) Meter(name string, opts ...metric.MeterOption) metric.Meter {
- conf := metric.NewMeterConfig(opts...)
- attrSet := conf.InstrumentationAttributes()
- // prepend our attributes so they can be overwritten
- newAttrs := append(slices.Clone(mpwa.attrs), attrSet.ToSlice()...)
- // append our attribute set option to overwrite the old one
- opts = append(opts, metric.WithInstrumentationAttributes(newAttrs...))
- return mpwa.MeterProvider.Meter(name, opts...)
-}
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go
deleted file mode 100644
index de77ab0eeda..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/componentattribute/tracer_provider.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package componentattribute // import "go.opentelemetry.io/collector/internal/telemetry/componentattribute"
-
-import (
- "slices"
-
- "go.opentelemetry.io/otel/attribute"
- sdkTrace "go.opentelemetry.io/otel/sdk/trace"
- "go.opentelemetry.io/otel/trace"
-)
-
-type tracerProviderWithAttributes struct {
- trace.TracerProvider
- attrs []attribute.KeyValue
-}
-
-// Necessary for components that use SDK-only methods, such as zpagesextension
-type tracerProviderWithAttributesSdk struct {
- *sdkTrace.TracerProvider
- attrs []attribute.KeyValue
-}
-
-// TracerProviderWithAttributes creates a TracerProvider with a new set of injected instrumentation scope attributes.
-func TracerProviderWithAttributes(tp trace.TracerProvider, attrs attribute.Set) trace.TracerProvider {
- if tpwa, ok := tp.(tracerProviderWithAttributesSdk); ok {
- tp = tpwa.TracerProvider
- } else if tpwa, ok := tp.(tracerProviderWithAttributes); ok {
- tp = tpwa.TracerProvider
- }
- if tpSdk, ok := tp.(*sdkTrace.TracerProvider); ok {
- return tracerProviderWithAttributesSdk{
- TracerProvider: tpSdk,
- attrs: attrs.ToSlice(),
- }
- }
- return tracerProviderWithAttributes{
- TracerProvider: tp,
- attrs: attrs.ToSlice(),
- }
-}
-
-func tracerWithAttributes(tp trace.TracerProvider, attrs []attribute.KeyValue, name string, opts ...trace.TracerOption) trace.Tracer {
- conf := trace.NewTracerConfig(opts...)
- attrSet := conf.InstrumentationAttributes()
- // prepend our attributes so they can be overwritten
- newAttrs := append(slices.Clone(attrs), attrSet.ToSlice()...)
- // append our attribute set option to overwrite the old one
- opts = append(opts, trace.WithInstrumentationAttributes(newAttrs...))
- return tp.Tracer(name, opts...)
-}
-
-func (tpwa tracerProviderWithAttributes) Tracer(name string, options ...trace.TracerOption) trace.Tracer {
- return tracerWithAttributes(tpwa.TracerProvider, tpwa.attrs, name, options...)
-}
-
-func (tpwa tracerProviderWithAttributesSdk) Tracer(name string, options ...trace.TracerOption) trace.Tracer {
- return tracerWithAttributes(tpwa.TracerProvider, tpwa.attrs, name, options...)
-}
diff --git a/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go
deleted file mode 100644
index 5ebe2d55dde..00000000000
--- a/vendor/go.opentelemetry.io/collector/internal/telemetry/telemetry.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package telemetry // import "go.opentelemetry.io/collector/internal/telemetry"
-
-import (
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
-
- "go.opentelemetry.io/collector/featuregate"
- "go.opentelemetry.io/collector/internal/telemetry/componentattribute"
- "go.opentelemetry.io/collector/pdata/pcommon"
-)
-
-var NewPipelineTelemetryGate = featuregate.GlobalRegistry().MustRegister(
- "telemetry.newPipelineTelemetry",
- featuregate.StageAlpha,
- featuregate.WithRegisterFromVersion("v0.123.0"),
- featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/component-universal-telemetry.md"),
- featuregate.WithRegisterDescription("Injects component-identifying scope attributes in internal Collector metrics"),
-)
-
-// IMPORTANT: This struct is reexported as part of the public API of
-// go.opentelemetry.io/collector/component, a stable module.
-// DO NOT MAKE BREAKING CHANGES TO EXPORTED FIELDS.
-type TelemetrySettings struct {
- // Logger that the factory can use during creation and can pass to the created
- // component to be used later as well.
- Logger *zap.Logger
-
- // TracerProvider that the factory can pass to other instrumented third-party libraries.
- TracerProvider trace.TracerProvider
-
- // MeterProvider that the factory can pass to other instrumented third-party libraries.
- MeterProvider metric.MeterProvider
-
- // Resource contains the resource attributes for the collector's telemetry.
- Resource pcommon.Resource
-
- // Extra attributes added to instrumentation scopes
- extraAttributes attribute.Set
-}
-
-// The publicization of this API is tracked in https://github.com/open-telemetry/opentelemetry-collector/issues/12405
-
-func WithoutAttributes(ts TelemetrySettings, fields ...string) TelemetrySettings {
- return WithAttributeSet(ts, componentattribute.RemoveAttributes(ts.extraAttributes, fields...))
-}
-
-func WithAttributeSet(ts TelemetrySettings, attrs attribute.Set) TelemetrySettings {
- ts.extraAttributes = attrs
- ts.Logger = componentattribute.ZapLoggerWithAttributes(ts.Logger, ts.extraAttributes)
- ts.TracerProvider = componentattribute.TracerProviderWithAttributes(ts.TracerProvider, ts.extraAttributes)
- if NewPipelineTelemetryGate.IsEnabled() {
- ts.MeterProvider = componentattribute.MeterProviderWithAttributes(ts.MeterProvider, ts.extraAttributes)
- }
- return ts
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go
new file mode 100644
index 00000000000..dbf368d3d11
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/bytesid.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+// Called by Protobuf JSON deserialization.
+func unmarshalJSON(dst []byte, iter *json.Iterator) {
+ src := iter.ReadStringAsSlice()
+ if len(src) == 0 {
+ return
+ }
+
+ if len(dst) != hex.DecodedLen(len(src)) {
+ iter.ReportError("ID.UnmarshalJSONIter", "length mismatch")
+ return
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ iter.ReportError("ID.UnmarshalJSONIter", err.Error())
+ return
+ }
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go
deleted file mode 100644
index ca86912af90..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
-)
-
-// marshalJSON converts trace id into a hex string enclosed in quotes.
-// Called by Protobuf JSON deserialization.
-func marshalJSON(id []byte) ([]byte, error) {
- // Plus 2 quote chars at the start and end.
- hexLen := hex.EncodedLen(len(id)) + 2
-
- b := make([]byte, hexLen)
- hex.Encode(b[1:hexLen-1], id)
- b[0], b[hexLen-1] = '"', '"'
-
- return b, nil
-}
-
-// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-// Called by Protobuf JSON deserialization.
-func unmarshalJSON(dst []byte, src []byte) error {
- if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
- src = src[1 : l-1]
- }
- nLen := len(src)
- if nLen == 0 {
- return nil
- }
-
- if len(dst) != hex.DecodedLen(nLen) {
- return errors.New("invalid length for ID")
- }
-
- _, err := hex.Decode(dst, src)
- if err != nil {
- return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go
deleted file mode 100644
index 5b4e6f53ceb..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "errors"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const profileIDSize = 16
-
-var (
- errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID")
- errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
-)
-
-// ProfileID is a custom data type that is used for all profile_id fields in OTLP
-// Protobuf messages.
-type ProfileID [profileIDSize]byte
-
-var _ proto.Sizer = (*SpanID)(nil)
-
-// Size returns the size of the data to serialize.
-func (tid ProfileID) Size() int {
- if tid.IsEmpty() {
- return 0
- }
- return profileIDSize
-}
-
-// IsEmpty returns true if id contains at leas one non-zero byte.
-func (tid ProfileID) IsEmpty() bool {
- return tid == [profileIDSize]byte{}
-}
-
-// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization.
-func (tid ProfileID) MarshalTo(data []byte) (n int, err error) {
- if tid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < profileIDSize {
- return 0, errMarshalProfileID
- }
-
- return copy(data, tid[:]), nil
-}
-
-// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization.
-func (tid *ProfileID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *tid = [profileIDSize]byte{}
- return nil
- }
-
- if len(data) != profileIDSize {
- return errUnmarshalProfileID
- }
-
- copy(tid[:], data)
- return nil
-}
-
-// MarshalJSON converts profile id into a hex string enclosed in quotes.
-func (tid ProfileID) MarshalJSON() ([]byte, error) {
- if tid.IsEmpty() {
- return []byte(`""`), nil
- }
- return marshalJSON(tid[:])
-}
-
-// UnmarshalJSON inflates profile id from hex string, possibly enclosed in quotes.
-// Called by Protobuf JSON deserialization.
-func (tid *ProfileID) UnmarshalJSON(data []byte) error {
- *tid = [profileIDSize]byte{}
- return unmarshalJSON(tid[:], data)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go
deleted file mode 100644
index 53b69b07267..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportLogsServiceRequest struct {
- // An array of ResourceLogs.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
-}
-
-func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} }
-func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsServiceRequest) ProtoMessage() {}
-func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{0}
-}
-func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src)
-}
-func (m *ExportLogsServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs {
- if m != nil {
- return m.ResourceLogs
- }
- return nil
-}
-
-type ExportLogsServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} }
-func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsServiceResponse) ProtoMessage() {}
-func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{1}
-}
-func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src)
-}
-func (m *ExportLogsServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo
-
-func (m *ExportLogsServiceResponse) GetPartialSuccess() ExportLogsPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportLogsPartialSuccess{}
-}
-
-type ExportLogsPartialSuccess struct {
- // The number of rejected log records.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportLogsPartialSuccess) Reset() { *m = ExportLogsPartialSuccess{} }
-func (m *ExportLogsPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportLogsPartialSuccess) ProtoMessage() {}
-func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_8e3bf87aaa43acd4, []int{2}
-}
-func (m *ExportLogsPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportLogsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportLogsPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportLogsPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportLogsPartialSuccess.Merge(m, src)
-}
-func (m *ExportLogsPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportLogsPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportLogsPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportLogsPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 {
- if m != nil {
- return m.RejectedLogRecords
- }
- return 0
-}
-
-func (m *ExportLogsPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest")
- proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse")
- proto.RegisterType((*ExportLogsPartialSuccess)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4)
-}
-
-var fileDescriptor_8e3bf87aaa43acd4 = []byte{
- // 430 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xc1, 0x6e, 0x13, 0x31,
- 0x10, 0x86, 0xd7, 0x2d, 0xaa, 0x84, 0xd3, 0x02, 0xb2, 0x7a, 0x08, 0x39, 0x2c, 0x55, 0x50, 0x51,
- 0xb8, 0x78, 0x49, 0xb8, 0x70, 0x03, 0x05, 0x71, 0x0b, 0x10, 0x6d, 0x11, 0x07, 0x2e, 0xab, 0xc5,
- 0x19, 0x59, 0x5b, 0x6d, 0x77, 0xdc, 0xb1, 0x13, 0xc1, 0x33, 0x20, 0x24, 0x5e, 0x80, 0x17, 0xe0,
- 0x49, 0x7a, 0xe0, 0xd0, 0x23, 0x27, 0x84, 0x92, 0x17, 0x41, 0x5e, 0x97, 0xb0, 0x0b, 0x39, 0x04,
- 0x4e, 0xbb, 0x1e, 0xcf, 0xff, 0xfd, 0xff, 0xd8, 0x32, 0x7f, 0x84, 0x06, 0x2a, 0x07, 0x25, 0x9c,
- 0x81, 0xa3, 0xf7, 0x89, 0x21, 0x74, 0x98, 0x28, 0x2c, 0x4b, 0x50, 0x0e, 0x29, 0x29, 0x51, 0xdb,
- 0x64, 0x31, 0xac, 0xbf, 0x99, 0x05, 0x5a, 0x14, 0x0a, 0x64, 0xdd, 0x24, 0x8e, 0x5b, 0xca, 0x50,
- 0x94, 0x6b, 0xa5, 0xf4, 0x0a, 0xb9, 0x18, 0xf6, 0x0e, 0x35, 0x6a, 0x0c, 0x58, 0xff, 0x17, 0xfa,
- 0x7a, 0xf7, 0x36, 0xd9, 0x36, 0xcd, 0x42, 0x5f, 0xff, 0x94, 0x77, 0x9f, 0xbd, 0x33, 0x48, 0x6e,
- 0x82, 0xda, 0x9e, 0x04, 0xff, 0x14, 0xce, 0xe7, 0x60, 0x9d, 0x78, 0xc1, 0x0f, 0x08, 0x2c, 0xce,
- 0x49, 0x41, 0xe6, 0x25, 0x5d, 0x76, 0xb4, 0x3b, 0xe8, 0x8c, 0xee, 0xcb, 0x4d, 0xc1, 0xae, 0xe2,
- 0xc8, 0xf4, 0x4a, 0xe1, 0x79, 0xe9, 0x3e, 0x35, 0x56, 0xfd, 0x0f, 0x8c, 0xdf, 0xde, 0x60, 0x66,
- 0x0d, 0x56, 0x16, 0x44, 0xc5, 0x6f, 0x9a, 0x9c, 0x5c, 0x91, 0x97, 0x99, 0x9d, 0x2b, 0x05, 0xd6,
- 0xfb, 0xb1, 0x41, 0x67, 0xf4, 0x58, 0x6e, 0x75, 0x10, 0xf2, 0x37, 0x7a, 0x1a, 0x38, 0x27, 0x01,
- 0x33, 0xbe, 0x76, 0xf1, 0xfd, 0x4e, 0x94, 0xde, 0x30, 0xad, 0x6a, 0xff, 0xbc, 0x39, 0x79, 0x5b,
- 0x21, 0x1e, 0xf0, 0x43, 0x82, 0x53, 0x50, 0x0e, 0x66, 0x7e, 0xf2, 0x8c, 0x40, 0x21, 0xcd, 0x42,
- 0xa0, 0xdd, 0x54, 0xfc, 0xda, 0x9b, 0xa0, 0x4e, 0xc3, 0x8e, 0xb8, 0xcb, 0x0f, 0x80, 0x08, 0x29,
- 0x3b, 0x03, 0x6b, 0x73, 0x0d, 0xdd, 0x9d, 0x23, 0x36, 0xb8, 0x9e, 0xee, 0xd7, 0xc5, 0xe7, 0xa1,
- 0x36, 0xfa, 0xcc, 0x78, 0xa7, 0x31, 0xba, 0xf8, 0xc8, 0xf8, 0x5e, 0xc8, 0x20, 0xfe, 0x7d, 0xc8,
- 0xf6, 0x65, 0xf5, 0x9e, 0xfc, 0x3f, 0x20, 0x5c, 0x40, 0x3f, 0x1a, 0x7f, 0x65, 0x17, 0xcb, 0x98,
- 0x5d, 0x2e, 0x63, 0xf6, 0x63, 0x19, 0xb3, 0x4f, 0xab, 0x38, 0xba, 0x5c, 0xc5, 0xd1, 0xb7, 0x55,
- 0x1c, 0xf1, 0x41, 0x81, 0xdb, 0x19, 0x8c, 0x6f, 0x35, 0xd8, 0x53, 0xdf, 0x33, 0x65, 0x6f, 0x26,
- 0xfa, 0x4f, 0x75, 0xd1, 0x7c, 0x04, 0x66, 0x96, 0xbb, 0x3c, 0x29, 0x2a, 0x07, 0x54, 0xe5, 0x65,
- 0x52, 0xaf, 0x6a, 0xbc, 0x86, 0xea, 0xef, 0xb7, 0xf2, 0x65, 0xe7, 0xf8, 0xa5, 0x81, 0xea, 0xd5,
- 0x9a, 0x55, 0xbb, 0xc8, 0xa7, 0xeb, 0x24, 0x3e, 0x80, 0x7c, 0x3d, 0x7c, 0xbb, 0x57, 0x33, 0x1e,
- 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xaf, 0x6c, 0x7d, 0x83, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// LogsServiceClient is the client API for LogsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LogsServiceClient interface {
- Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error)
-}
-
-type logsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient {
- return &logsServiceClient{cc}
-}
-
-func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) {
- out := new(ExportLogsServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// LogsServiceServer is the server API for LogsService service.
-type LogsServiceServer interface {
- Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error)
-}
-
-// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedLogsServiceServer struct {
-}
-
-func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) {
- s.RegisterService(&_LogsService_serviceDesc, srv)
-}
-
-func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportLogsServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LogsServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _LogsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
- HandlerType: (*LogsServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _LogsService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
-}
-
-func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportLogsPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportLogsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportLogsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintLogsService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedLogRecords != 0 {
- i = encodeVarintLogsService(dAtA, i, uint64(m.RejectedLogRecords))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int {
- offset -= sovLogsService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportLogsServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for _, e := range m.ResourceLogs {
- l = e.Size()
- n += 1 + l + sovLogsService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportLogsServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovLogsService(uint64(l))
- return n
-}
-
-func (m *ExportLogsPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedLogRecords != 0 {
- n += 1 + sovLogsService(uint64(m.RejectedLogRecords))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovLogsService(uint64(l))
- }
- return n
-}
-
-func sovLogsService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozLogsService(x uint64) (n int) {
- return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{})
- if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportLogsPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportLogsPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportLogsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
- }
- m.RejectedLogRecords = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedLogRecords |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogsService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipLogsService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthLogsService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupLogsService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLogsService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go
deleted file mode 100644
index bfdc29395a3..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportMetricsServiceRequest struct {
- // An array of ResourceMetrics.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
-}
-
-func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
-func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceRequest) ProtoMessage() {}
-func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{0}
-}
-func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
-}
-func (m *ExportMetricsServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
- if m != nil {
- return m.ResourceMetrics
- }
- return nil
-}
-
-type ExportMetricsServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
-func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceResponse) ProtoMessage() {}
-func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{1}
-}
-func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
-}
-func (m *ExportMetricsServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceResponse) GetPartialSuccess() ExportMetricsPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportMetricsPartialSuccess{}
-}
-
-type ExportMetricsPartialSuccess struct {
- // The number of rejected data points.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportMetricsPartialSuccess) Reset() { *m = ExportMetricsPartialSuccess{} }
-func (m *ExportMetricsPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsPartialSuccess) ProtoMessage() {}
-func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_75fb6015e6e64798, []int{2}
-}
-func (m *ExportMetricsPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportMetricsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportMetricsPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportMetricsPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsPartialSuccess.Merge(m, src)
-}
-func (m *ExportMetricsPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportMetricsPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
- if m != nil {
- return m.RejectedDataPoints
- }
- return 0
-}
-
-func (m *ExportMetricsPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest")
- proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse")
- proto.RegisterType((*ExportMetricsPartialSuccess)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798)
-}
-
-var fileDescriptor_75fb6015e6e64798 = []byte{
- // 427 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x8e, 0xd3, 0x30,
- 0x18, 0x8f, 0xef, 0xd0, 0x49, 0xf8, 0xe0, 0x0e, 0x99, 0x1b, 0x4e, 0x05, 0x85, 0x53, 0x58, 0x22,
- 0x81, 0x1c, 0x5a, 0x76, 0x86, 0xc2, 0xb1, 0x9d, 0x1a, 0xa5, 0x88, 0xa1, 0x4b, 0x64, 0xdc, 0x4f,
- 0x51, 0x50, 0x1a, 0x1b, 0xdb, 0xad, 0xe8, 0x5b, 0x30, 0xb0, 0xf0, 0x0a, 0x88, 0x07, 0xe9, 0xd8,
- 0xb1, 0x13, 0x42, 0xed, 0x8b, 0xa0, 0xc4, 0x69, 0xc1, 0x25, 0x43, 0xc5, 0x6d, 0xce, 0xcf, 0xdf,
- 0xef, 0x4f, 0x7e, 0xd6, 0x87, 0x5f, 0x09, 0x09, 0xa5, 0x81, 0x02, 0x26, 0x60, 0xd4, 0x3c, 0x92,
- 0x4a, 0x18, 0x11, 0x71, 0x51, 0x14, 0xc0, 0x8d, 0x50, 0x51, 0x85, 0xe6, 0x5c, 0x47, 0xb3, 0xee,
- 0xf6, 0x98, 0x6a, 0x50, 0xb3, 0x9c, 0x03, 0xad, 0x47, 0x49, 0xe8, 0xf0, 0x2d, 0x48, 0x77, 0x7c,
- 0xda, 0x90, 0xe8, 0xac, 0xdb, 0xb9, 0xc8, 0x44, 0x26, 0xac, 0x7e, 0x75, 0xb2, 0xa3, 0x9d, 0xe7,
- 0x6d, 0xfe, 0xff, 0xba, 0xda, 0xe9, 0x60, 0x8e, 0x1f, 0x5d, 0x7f, 0x96, 0x42, 0x99, 0x1b, 0x0b,
- 0x0f, 0x6d, 0x96, 0x04, 0x3e, 0x4d, 0x41, 0x1b, 0x32, 0xc2, 0x0f, 0x14, 0x68, 0x31, 0x55, 0x1c,
- 0xd2, 0x86, 0x78, 0x89, 0xae, 0x8e, 0xc3, 0xd3, 0x5e, 0x44, 0xdb, 0x72, 0xfe, 0x49, 0x47, 0x93,
- 0x86, 0xd7, 0x08, 0x27, 0xe7, 0xca, 0x05, 0x82, 0xaf, 0x08, 0x3f, 0x6e, 0xf7, 0xd6, 0x52, 0x94,
- 0x1a, 0x88, 0xc1, 0xe7, 0x92, 0x29, 0x93, 0xb3, 0x22, 0xd5, 0x53, 0xce, 0x41, 0x57, 0xde, 0x28,
- 0x3c, 0xed, 0x5d, 0xd3, 0x43, 0x3b, 0xa2, 0x8e, 0x41, 0x6c, 0xd5, 0x86, 0x56, 0xac, 0x7f, 0x67,
- 0xf1, 0xf3, 0x89, 0x97, 0x9c, 0x49, 0x07, 0x0d, 0xcc, 0x5e, 0x23, 0x2e, 0x89, 0xbc, 0xc0, 0x17,
- 0x0a, 0x3e, 0x02, 0x37, 0x30, 0x4e, 0xc7, 0xcc, 0xb0, 0x54, 0x8a, 0xbc, 0x34, 0x36, 0xd9, 0x71,
- 0x42, 0xb6, 0x77, 0x6f, 0x98, 0x61, 0x71, 0x7d, 0x43, 0x9e, 0xe2, 0xfb, 0xa0, 0x94, 0x50, 0xe9,
- 0x04, 0xb4, 0x66, 0x19, 0x5c, 0x1e, 0x5d, 0xa1, 0xf0, 0x6e, 0x72, 0xaf, 0x06, 0x6f, 0x2c, 0xd6,
- 0xfb, 0x81, 0xf0, 0x99, 0x5b, 0x03, 0xf9, 0x86, 0xf0, 0x89, 0x4d, 0x42, 0xfe, 0xf7, 0x87, 0xdd,
- 0xd7, 0xec, 0xbc, 0xbd, 0xad, 0x8c, 0x7d, 0x98, 0xc0, 0xeb, 0xaf, 0xd0, 0x62, 0xed, 0xa3, 0xe5,
- 0xda, 0x47, 0xbf, 0xd6, 0x3e, 0xfa, 0xb2, 0xf1, 0xbd, 0xe5, 0xc6, 0xf7, 0x56, 0x1b, 0xdf, 0xc3,
- 0xcf, 0x72, 0x71, 0xb0, 0x4d, 0xff, 0xa1, 0xeb, 0x10, 0x57, 0x93, 0x31, 0x1a, 0x0d, 0xb2, 0x7d,
- 0x8d, 0xfc, 0xef, 0x1d, 0x92, 0x55, 0xf1, 0x51, 0x5e, 0x1a, 0x50, 0x25, 0x2b, 0xa2, 0xfa, 0xab,
- 0x36, 0xc9, 0xa0, 0x6c, 0x5d, 0xb5, 0xef, 0x47, 0xe1, 0x40, 0x42, 0xf9, 0x6e, 0x27, 0x57, 0x1b,
- 0xd1, 0xd7, 0xbb, 0x48, 0x4d, 0x0c, 0xfa, 0xbe, 0xfb, 0xe1, 0xa4, 0x56, 0x7a, 0xf9, 0x3b, 0x00,
- 0x00, 0xff, 0xff, 0x47, 0xf2, 0x5f, 0x42, 0xc8, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// MetricsServiceClient is the client API for MetricsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MetricsServiceClient interface {
- Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
-}
-
-type metricsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
- return &metricsServiceClient{cc}
-}
-
-func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
- out := new(ExportMetricsServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MetricsServiceServer is the server API for MetricsService service.
-type MetricsServiceServer interface {
- Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
-}
-
-// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedMetricsServiceServer struct {
-}
-
-func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
- s.RegisterService(&_MetricsService_serviceDesc, srv)
-}
-
-func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportMetricsServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MetricsServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _MetricsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
- HandlerType: (*MetricsServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _MetricsService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
-}
-
-func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetricsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetricsService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportMetricsPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportMetricsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportMetricsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintMetricsService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedDataPoints != 0 {
- i = encodeVarintMetricsService(dAtA, i, uint64(m.RejectedDataPoints))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int {
- offset -= sovMetricsService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportMetricsServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for _, e := range m.ResourceMetrics {
- l = e.Size()
- n += 1 + l + sovMetricsService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportMetricsServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovMetricsService(uint64(l))
- return n
-}
-
-func (m *ExportMetricsPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedDataPoints != 0 {
- n += 1 + sovMetricsService(uint64(m.RejectedDataPoints))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovMetricsService(uint64(l))
- }
- return n
-}
-
-func sovMetricsService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMetricsService(x uint64) (n int) {
- return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{})
- if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportMetricsPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportMetricsPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportMetricsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
- }
- m.RejectedDataPoints = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedDataPoints |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetricsService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetricsService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetricsService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetricsService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMetricsService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetricsService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMetricsService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMetricsService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetricsService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go
deleted file mode 100644
index 80eae38f558..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go
+++ /dev/null
@@ -1,897 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto
-
-package v1development
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportProfilesServiceRequest struct {
- // An array of ResourceProfiles.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
- // The reference table containing all data shared by profiles across the message being sent.
- Dictionary v1development.ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
-}
-
-func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} }
-func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesServiceRequest) ProtoMessage() {}
-func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{0}
-}
-func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesServiceRequest.Merge(m, src)
-}
-func (m *ExportProfilesServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo
-
-func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles {
- if m != nil {
- return m.ResourceProfiles
- }
- return nil
-}
-
-func (m *ExportProfilesServiceRequest) GetDictionary() v1development.ProfilesDictionary {
- if m != nil {
- return m.Dictionary
- }
- return v1development.ProfilesDictionary{}
-}
-
-type ExportProfilesServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportProfilesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesServiceResponse{} }
-func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesServiceResponse) ProtoMessage() {}
-func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{1}
-}
-func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesServiceResponse.Merge(m, src)
-}
-func (m *ExportProfilesServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesServiceResponse proto.InternalMessageInfo
-
-func (m *ExportProfilesServiceResponse) GetPartialSuccess() ExportProfilesPartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportProfilesPartialSuccess{}
-}
-
-type ExportProfilesPartialSuccess struct {
- // The number of rejected profiles.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedProfiles int64 `protobuf:"varint,1,opt,name=rejected_profiles,json=rejectedProfiles,proto3" json:"rejected_profiles,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPartialSuccess{} }
-func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportProfilesPartialSuccess) ProtoMessage() {}
-func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_ad3943ce836e7720, []int{2}
-}
-func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportProfilesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportProfilesPartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportProfilesPartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportProfilesPartialSuccess.Merge(m, src)
-}
-func (m *ExportProfilesPartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportProfilesPartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportProfilesPartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportProfilesPartialSuccess proto.InternalMessageInfo
-
-func (m *ExportProfilesPartialSuccess) GetRejectedProfiles() int64 {
- if m != nil {
- return m.RejectedProfiles
- }
- return 0
-}
-
-func (m *ExportProfilesPartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest")
- proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse")
- proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720)
-}
-
-var fileDescriptor_ad3943ce836e7720 = []byte{
- // 467 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x8a, 0xd3, 0x40,
- 0x18, 0xc7, 0x33, 0xbb, 0xb2, 0xe0, 0xac, 0xba, 0x1a, 0xf6, 0xb0, 0x14, 0x8d, 0x4b, 0xbc, 0x14,
- 0x84, 0x09, 0x5b, 0x17, 0x44, 0x10, 0x0f, 0x75, 0x3d, 0x89, 0x18, 0x52, 0xf1, 0xa0, 0x87, 0x10,
- 0x27, 0x9f, 0x61, 0x24, 0x9d, 0x19, 0x67, 0xa6, 0xc5, 0x1e, 0x7d, 0x03, 0xdf, 0xc1, 0x9b, 0x57,
- 0x1f, 0xc2, 0x1e, 0x7b, 0xf4, 0x24, 0xd2, 0x3e, 0x80, 0x4f, 0x20, 0x48, 0x32, 0x4d, 0x6c, 0x42,
- 0xa5, 0x58, 0x7a, 0xcb, 0x7c, 0xc3, 0xff, 0xf7, 0xff, 0x7f, 0xdf, 0x17, 0x06, 0x3f, 0x15, 0x12,
- 0xb8, 0x81, 0x1c, 0x86, 0x60, 0xd4, 0x24, 0x90, 0x4a, 0x18, 0x11, 0x50, 0x91, 0xe7, 0x40, 0x8d,
- 0x50, 0xc5, 0xf9, 0x2d, 0xcb, 0x41, 0x07, 0xe3, 0xb3, 0x14, 0xc6, 0x90, 0x0b, 0x39, 0x04, 0x6e,
- 0xea, 0x72, 0xac, 0x41, 0x8d, 0x19, 0x05, 0x52, 0xea, 0xdc, 0xf3, 0x06, 0xcc, 0x16, 0x49, 0x0d,
- 0x23, 0x95, 0x8a, 0x34, 0x60, 0x9d, 0xe3, 0x4c, 0x64, 0xc2, 0x1a, 0x17, 0x5f, 0x56, 0xd6, 0x79,
- 0xb0, 0x2e, 0xd8, 0x86, 0x38, 0x56, 0xea, 0xff, 0x42, 0xf8, 0xe6, 0x93, 0x0f, 0x52, 0x28, 0x13,
- 0x2e, 0x2f, 0x06, 0x36, 0x66, 0x04, 0xef, 0x47, 0xa0, 0x8d, 0xcb, 0xf0, 0x0d, 0x05, 0x5a, 0x8c,
- 0x14, 0x85, 0xb8, 0xd2, 0x9e, 0xa0, 0xd3, 0xfd, 0xee, 0x61, 0xef, 0x21, 0x59, 0xd7, 0xc3, 0xfa,
- 0xe4, 0x24, 0x5a, 0x42, 0x2a, 0x9b, 0xe8, 0xba, 0x6a, 0x55, 0xdc, 0x14, 0xe3, 0x94, 0x51, 0xc3,
- 0x04, 0x4f, 0xd4, 0xe4, 0x64, 0xef, 0x14, 0x75, 0x0f, 0x7b, 0x8f, 0xfe, 0xc7, 0xa3, 0x22, 0x5d,
- 0xd4, 0x94, 0xfe, 0xa5, 0xe9, 0x8f, 0xdb, 0x4e, 0xb4, 0xc2, 0xf5, 0x3f, 0x23, 0x7c, 0xeb, 0x1f,
- 0x1d, 0x6b, 0x29, 0xb8, 0x06, 0xf7, 0x23, 0xc2, 0x47, 0x32, 0x51, 0x86, 0x25, 0x79, 0xac, 0x47,
- 0x94, 0x82, 0x2e, 0x3a, 0x2e, 0xd2, 0x44, 0x64, 0x9b, 0xad, 0x91, 0xa6, 0x5d, 0x68, 0xd1, 0x03,
- 0x4b, 0x5e, 0x26, 0xbc, 0x26, 0x1b, 0x55, 0x5f, 0xb6, 0xd7, 0xd2, 0x54, 0xb9, 0x77, 0x8b, 0xb5,
- 0xbc, 0x03, 0x6a, 0x20, 0x5d, 0x5d, 0x0b, 0xea, 0xee, 0x17, 0x83, 0xb5, 0x17, 0xf5, 0x60, 0xef,
- 0xe0, 0xab, 0xa0, 0x94, 0x50, 0xf1, 0x10, 0xb4, 0x4e, 0x32, 0x28, 0x67, 0x7b, 0x39, 0xba, 0x52,
- 0x16, 0x9f, 0xd9, 0x5a, 0xef, 0x1b, 0xc2, 0x47, 0xad, 0x89, 0xb8, 0x5f, 0x11, 0x3e, 0xb0, 0x31,
- 0xdc, 0x9d, 0xb4, 0xde, 0xfc, 0xb7, 0x3a, 0x83, 0x9d, 0x32, 0xed, 0xf6, 0x7c, 0xa7, 0xff, 0x1b,
- 0x4d, 0xe7, 0x1e, 0x9a, 0xcd, 0x3d, 0xf4, 0x73, 0xee, 0xa1, 0x4f, 0x0b, 0xcf, 0x99, 0x2d, 0x3c,
- 0xe7, 0xfb, 0xc2, 0x73, 0xf0, 0x7d, 0x26, 0xb6, 0xf2, 0xec, 0x1f, 0xb7, 0xec, 0xc2, 0x42, 0x16,
- 0xa2, 0x57, 0xaf, 0xb3, 0x36, 0x90, 0x35, 0xde, 0x84, 0x34, 0x31, 0x49, 0xc0, 0xb8, 0x01, 0xc5,
- 0x93, 0x3c, 0x28, 0x4f, 0xa5, 0x63, 0x06, 0x7c, 0xe3, 0xd3, 0xf1, 0x65, 0xef, 0xfc, 0xb9, 0x04,
- 0xfe, 0xa2, 0x46, 0x97, 0xa6, 0xe4, 0x71, 0x9d, 0xb5, 0xca, 0x44, 0x5e, 0x9e, 0x5d, 0xfc, 0x95,
- 0xbd, 0x39, 0x28, 0x1d, 0xee, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb9, 0xb5, 0x6e, 0xb0,
- 0x04, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// ProfilesServiceClient is the client API for ProfilesService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ProfilesServiceClient interface {
- Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error)
-}
-
-type profilesServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient {
- return &profilesServiceClient{cc}
-}
-
-func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) {
- out := new(ExportProfilesServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// ProfilesServiceServer is the server API for ProfilesService service.
-type ProfilesServiceServer interface {
- Export(context.Context, *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error)
-}
-
-// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedProfilesServiceServer struct {
-}
-
-func (*UnimplementedProfilesServiceServer) Export(ctx context.Context, req *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) {
- s.RegisterService(&_ProfilesService_serviceDesc, srv)
-}
-
-func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportProfilesServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ProfilesServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _ProfilesService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService",
- HandlerType: (*ProfilesServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _ProfilesService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto",
-}
-
-func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.ResourceProfiles) > 0 {
- for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportProfilesServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfilesService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportProfilesPartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportProfilesPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportProfilesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintProfilesService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedProfiles != 0 {
- i = encodeVarintProfilesService(dAtA, i, uint64(m.RejectedProfiles))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintProfilesService(dAtA []byte, offset int, v uint64) int {
- offset -= sovProfilesService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportProfilesServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceProfiles) > 0 {
- for _, e := range m.ResourceProfiles {
- l = e.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- }
- }
- l = m.Dictionary.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- return n
-}
-
-func (m *ExportProfilesServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovProfilesService(uint64(l))
- return n
-}
-
-func (m *ExportProfilesPartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedProfiles != 0 {
- n += 1 + sovProfilesService(uint64(m.RejectedProfiles))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovProfilesService(uint64(l))
- }
- return n
-}
-
-func sovProfilesService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozProfilesService(x uint64) (n int) {
- return sovProfilesService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{})
- if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportProfilesServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportProfilesPartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportProfilesPartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportProfilesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
- }
- m.RejectedProfiles = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedProfiles |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfilesService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfilesService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfilesService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfilesService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipProfilesService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfilesService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthProfilesService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupProfilesService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthProfilesService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthProfilesService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowProfilesService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupProfilesService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go
deleted file mode 100644
index 5b547b8a7a9..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go
+++ /dev/null
@@ -1,839 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportTraceServiceRequest struct {
- // An array of ResourceSpans.
- // For data coming from a single resource this array will typically contain one
- // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
- // data from multiple origins typically batch the data before forwarding further and
- // in that case this array will contain multiple elements.
- ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
-}
-
-func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
-func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceRequest) ProtoMessage() {}
-func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{0}
-}
-func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
-}
-func (m *ExportTraceServiceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
-
-func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
- if m != nil {
- return m.ResourceSpans
- }
- return nil
-}
-
-type ExportTraceServiceResponse struct {
- // The details of a partially successful export request.
- //
- // If the request is only partially accepted
- // (i.e. when the server accepts only parts of the data and rejects the rest)
- // the server MUST initialize the `partial_success` field and MUST
- // set the `rejected_` with the number of items it rejected.
- //
- // Servers MAY also make use of the `partial_success` field to convey
- // warnings/suggestions to senders even when the request was fully accepted.
- // In such cases, the `rejected_` MUST have a value of `0` and
- // the `error_message` MUST be non-empty.
- //
- // A `partial_success` message with an empty value (rejected_ = 0 and
- // `error_message` = "") is equivalent to it not being set/present. Senders
- // SHOULD interpret it the same way as in the full success case.
- PartialSuccess ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
-}
-
-func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
-func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceResponse) ProtoMessage() {}
-func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{1}
-}
-func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
-}
-func (m *ExportTraceServiceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
-
-func (m *ExportTraceServiceResponse) GetPartialSuccess() ExportTracePartialSuccess {
- if m != nil {
- return m.PartialSuccess
- }
- return ExportTracePartialSuccess{}
-}
-
-type ExportTracePartialSuccess struct {
- // The number of rejected spans.
- //
- // A `rejected_` field holding a `0` value indicates that the
- // request was fully accepted.
- RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
- // A developer-facing human-readable message in English. It should be used
- // either to explain why the server rejected parts of the data during a partial
- // success or to convey warnings/suggestions during a full success. The message
- // should offer guidance on how users can address such issues.
- //
- // error_message is an optional field. An error_message with an empty value
- // is equivalent to it not being set.
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (m *ExportTracePartialSuccess) Reset() { *m = ExportTracePartialSuccess{} }
-func (m *ExportTracePartialSuccess) String() string { return proto.CompactTextString(m) }
-func (*ExportTracePartialSuccess) ProtoMessage() {}
-func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
- return fileDescriptor_192a962890318cf4, []int{2}
-}
-func (m *ExportTracePartialSuccess) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExportTracePartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExportTracePartialSuccess.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExportTracePartialSuccess) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTracePartialSuccess.Merge(m, src)
-}
-func (m *ExportTracePartialSuccess) XXX_Size() int {
- return m.Size()
-}
-func (m *ExportTracePartialSuccess) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTracePartialSuccess.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTracePartialSuccess proto.InternalMessageInfo
-
-func (m *ExportTracePartialSuccess) GetRejectedSpans() int64 {
- if m != nil {
- return m.RejectedSpans
- }
- return 0
-}
-
-func (m *ExportTracePartialSuccess) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest")
- proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse")
- proto.RegisterType((*ExportTracePartialSuccess)(nil), "opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4)
-}
-
-var fileDescriptor_192a962890318cf4 = []byte{
- // 413 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4f, 0xeb, 0xd3, 0x30,
- 0x18, 0x6e, 0x36, 0x19, 0x98, 0xfd, 0x11, 0x8b, 0x87, 0xd9, 0x43, 0x1d, 0x15, 0x47, 0x45, 0x48,
- 0xd9, 0xbc, 0x79, 0xb3, 0xe2, 0x71, 0x38, 0xba, 0xe1, 0xc1, 0xcb, 0x88, 0xdd, 0x4b, 0xa9, 0x74,
- 0x4d, 0x4c, 0xb2, 0xa1, 0x5f, 0x42, 0xf4, 0x2b, 0x78, 0xf4, 0x93, 0xec, 0xb8, 0xa3, 0x27, 0x91,
- 0xed, 0x8b, 0x48, 0x12, 0x2d, 0xad, 0xf4, 0x30, 0x7e, 0xbf, 0x5b, 0xf2, 0xf0, 0x3e, 0x7f, 0xde,
- 0x27, 0x04, 0xbf, 0x60, 0x1c, 0x4a, 0x05, 0x05, 0xec, 0x40, 0x89, 0xcf, 0x11, 0x17, 0x4c, 0xb1,
- 0x28, 0x65, 0x45, 0x01, 0xa9, 0x62, 0x22, 0x52, 0x82, 0xa6, 0x10, 0x1d, 0x66, 0xf6, 0xb0, 0x91,
- 0x20, 0x0e, 0x79, 0x0a, 0xc4, 0x8c, 0xb9, 0xd3, 0x06, 0xd7, 0x82, 0xa4, 0xe2, 0x12, 0x43, 0x21,
- 0x87, 0x99, 0xf7, 0x20, 0x63, 0x19, 0xb3, 0xca, 0xfa, 0x64, 0x07, 0xbd, 0xb0, 0xcd, 0xb9, 0xe9,
- 0x67, 0x27, 0x03, 0x86, 0x1f, 0xbe, 0xfe, 0xc4, 0x99, 0x50, 0x6b, 0x0d, 0xae, 0x6c, 0x86, 0x04,
- 0x3e, 0xee, 0x41, 0x2a, 0x37, 0xc1, 0x23, 0x01, 0x92, 0xed, 0x85, 0x8e, 0xc7, 0x69, 0x29, 0xc7,
- 0x68, 0xd2, 0x0d, 0xfb, 0xf3, 0x67, 0xa4, 0x2d, 0xdd, 0xbf, 0x4c, 0x24, 0xf9, 0xcb, 0x59, 0x69,
- 0x4a, 0x32, 0x14, 0xf5, 0x6b, 0xf0, 0x05, 0x61, 0xaf, 0xcd, 0x51, 0x72, 0x56, 0x4a, 0x70, 0x39,
- 0xbe, 0xc7, 0xa9, 0x50, 0x39, 0x2d, 0x36, 0x72, 0x9f, 0xa6, 0x20, 0xb5, 0x27, 0x0a, 0xfb, 0xf3,
- 0x97, 0xe4, 0xba, 0x46, 0x48, 0x4d, 0x7c, 0x69, 0x95, 0x56, 0x56, 0x28, 0xbe, 0x73, 0xfc, 0xf5,
- 0xc8, 0x49, 0x46, 0xbc, 0x81, 0x06, 0x59, 0xa3, 0x81, 0x26, 0xc5, 0x7d, 0xa2, 0x1b, 0xf8, 0x00,
- 0xa9, 0x82, 0x6d, 0xd5, 0x00, 0x0a, 0xbb, 0x7a, 0x29, 0x8b, 0x9a, 0xa5, 0xdc, 0xc7, 0x78, 0x08,
- 0x42, 0x30, 0xb1, 0xd9, 0x81, 0x94, 0x34, 0x83, 0x71, 0x67, 0x82, 0xc2, 0xbb, 0xc9, 0xc0, 0x80,
- 0x0b, 0x8b, 0xcd, 0xbf, 0x23, 0x3c, 0xa8, 0xef, 0xec, 0x7e, 0x43, 0xb8, 0x67, 0xad, 0xdd, 0x9b,
- 0x6c, 0xd7, 0x7c, 0x2c, 0x2f, 0xbe, 0x8d, 0x84, 0x6d, 0x3f, 0x70, 0xe2, 0x13, 0x3a, 0x9e, 0x7d,
- 0x74, 0x3a, 0xfb, 0xe8, 0xf7, 0xd9, 0x47, 0x5f, 0x2f, 0xbe, 0x73, 0xba, 0xf8, 0xce, 0xcf, 0x8b,
- 0xef, 0xe0, 0xa7, 0x39, 0xbb, 0xd2, 0x22, 0xbe, 0x5f, 0x57, 0x5f, 0xea, 0xa9, 0x25, 0x7a, 0xb7,
- 0xc8, 0xfe, 0xe7, 0xe7, 0xf5, 0xef, 0xc0, 0xb7, 0x54, 0xd1, 0x28, 0x2f, 0x15, 0x88, 0x92, 0x16,
- 0x91, 0xb9, 0x19, 0x83, 0x0c, 0xca, 0x96, 0x5f, 0xf3, 0xa3, 0x33, 0x7d, 0xc3, 0xa1, 0x5c, 0x57,
- 0x62, 0xc6, 0x86, 0xbc, 0xaa, 0xc2, 0x98, 0x08, 0xe4, 0xed, 0xec, 0x7d, 0xcf, 0xa8, 0x3c, 0xff,
- 0x13, 0x00, 0x00, 0xff, 0xff, 0x82, 0xce, 0x78, 0xc7, 0x8f, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// TraceServiceClient is the client API for TraceService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type TraceServiceClient interface {
- Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
-}
-
-type traceServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
- return &traceServiceClient{cc}
-}
-
-func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
- out := new(ExportTraceServiceResponse)
- err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// TraceServiceServer is the server API for TraceService service.
-type TraceServiceServer interface {
- Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
-}
-
-// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedTraceServiceServer struct {
-}
-
-func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
- s.RegisterService(&_TraceService_serviceDesc, srv)
-}
-
-func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExportTraceServiceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(TraceServiceServer).Export(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _TraceService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
- HandlerType: (*TraceServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Export",
- Handler: _TraceService_Export_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
-}
-
-func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTraceService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTraceService(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ExportTracePartialSuccess) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExportTracePartialSuccess) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExportTracePartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ErrorMessage) > 0 {
- i -= len(m.ErrorMessage)
- copy(dAtA[i:], m.ErrorMessage)
- i = encodeVarintTraceService(dAtA, i, uint64(len(m.ErrorMessage)))
- i--
- dAtA[i] = 0x12
- }
- if m.RejectedSpans != 0 {
- i = encodeVarintTraceService(dAtA, i, uint64(m.RejectedSpans))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int {
- offset -= sovTraceService(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ExportTraceServiceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for _, e := range m.ResourceSpans {
- l = e.Size()
- n += 1 + l + sovTraceService(uint64(l))
- }
- }
- return n
-}
-
-func (m *ExportTraceServiceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.PartialSuccess.Size()
- n += 1 + l + sovTraceService(uint64(l))
- return n
-}
-
-func (m *ExportTracePartialSuccess) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RejectedSpans != 0 {
- n += 1 + sovTraceService(uint64(m.RejectedSpans))
- }
- l = len(m.ErrorMessage)
- if l > 0 {
- n += 1 + l + sovTraceService(uint64(l))
- }
- return n
-}
-
-func sovTraceService(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTraceService(x uint64) (n int) {
- return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{})
- if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExportTracePartialSuccess) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExportTracePartialSuccess: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExportTracePartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
- }
- m.RejectedSpans = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectedSpans |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTraceService
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTraceService
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ErrorMessage = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTraceService(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTraceService
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTraceService(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTraceService
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTraceService
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTraceService
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTraceService
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go
deleted file mode 100644
index 179aa9d5d95..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go
+++ /dev/null
@@ -1,2080 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/common/v1/common.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// AnyValue is used to represent any type of attribute value. AnyValue may contain a
-// primitive value such as a string or integer or it may contain an arbitrary nested
-// object containing arrays, key-value lists and primitives.
-type AnyValue struct {
- // The value is one of the listed fields. It is valid for all values to be unspecified
- // in which case this AnyValue is considered to be "empty".
- //
- // Types that are valid to be assigned to Value:
- // *AnyValue_StringValue
- // *AnyValue_BoolValue
- // *AnyValue_IntValue
- // *AnyValue_DoubleValue
- // *AnyValue_ArrayValue
- // *AnyValue_KvlistValue
- // *AnyValue_BytesValue
- Value isAnyValue_Value `protobuf_oneof:"value"`
-}
-
-func (m *AnyValue) Reset() { *m = AnyValue{} }
-func (m *AnyValue) String() string { return proto.CompactTextString(m) }
-func (*AnyValue) ProtoMessage() {}
-func (*AnyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{0}
-}
-func (m *AnyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AnyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AnyValue.Merge(m, src)
-}
-func (m *AnyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *AnyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_AnyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AnyValue proto.InternalMessageInfo
-
-type isAnyValue_Value interface {
- isAnyValue_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type AnyValue_StringValue struct {
- StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
-}
-type AnyValue_BoolValue struct {
- BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
-}
-type AnyValue_IntValue struct {
- IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"`
-}
-type AnyValue_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"`
-}
-type AnyValue_ArrayValue struct {
- ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"`
-}
-type AnyValue_KvlistValue struct {
- KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"`
-}
-type AnyValue_BytesValue struct {
- BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof" json:"bytes_value,omitempty"`
-}
-
-func (*AnyValue_StringValue) isAnyValue_Value() {}
-func (*AnyValue_BoolValue) isAnyValue_Value() {}
-func (*AnyValue_IntValue) isAnyValue_Value() {}
-func (*AnyValue_DoubleValue) isAnyValue_Value() {}
-func (*AnyValue_ArrayValue) isAnyValue_Value() {}
-func (*AnyValue_KvlistValue) isAnyValue_Value() {}
-func (*AnyValue_BytesValue) isAnyValue_Value() {}
-
-func (m *AnyValue) GetValue() isAnyValue_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *AnyValue) GetStringValue() string {
- if x, ok := m.GetValue().(*AnyValue_StringValue); ok {
- return x.StringValue
- }
- return ""
-}
-
-func (m *AnyValue) GetBoolValue() bool {
- if x, ok := m.GetValue().(*AnyValue_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *AnyValue) GetIntValue() int64 {
- if x, ok := m.GetValue().(*AnyValue_IntValue); ok {
- return x.IntValue
- }
- return 0
-}
-
-func (m *AnyValue) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-func (m *AnyValue) GetArrayValue() *ArrayValue {
- if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
- return x.ArrayValue
- }
- return nil
-}
-
-func (m *AnyValue) GetKvlistValue() *KeyValueList {
- if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
- return x.KvlistValue
- }
- return nil
-}
-
-func (m *AnyValue) GetBytesValue() []byte {
- if x, ok := m.GetValue().(*AnyValue_BytesValue); ok {
- return x.BytesValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*AnyValue) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*AnyValue_StringValue)(nil),
- (*AnyValue_BoolValue)(nil),
- (*AnyValue_IntValue)(nil),
- (*AnyValue_DoubleValue)(nil),
- (*AnyValue_ArrayValue)(nil),
- (*AnyValue_KvlistValue)(nil),
- (*AnyValue_BytesValue)(nil),
- }
-}
-
-// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
-// since oneof in AnyValue does not allow repeated fields.
-type ArrayValue struct {
- // Array of values. The array may be empty (contain 0 elements).
- Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
-}
-
-func (m *ArrayValue) Reset() { *m = ArrayValue{} }
-func (m *ArrayValue) String() string { return proto.CompactTextString(m) }
-func (*ArrayValue) ProtoMessage() {}
-func (*ArrayValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{1}
-}
-func (m *ArrayValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ArrayValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ArrayValue.Merge(m, src)
-}
-func (m *ArrayValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ArrayValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ArrayValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ArrayValue proto.InternalMessageInfo
-
-func (m *ArrayValue) GetValues() []AnyValue {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
-// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
-// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
-// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
-// are semantically equivalent.
-type KeyValueList struct {
- // A collection of key/value pairs of key-value pairs. The list may be empty (may
- // contain 0 elements).
- // The keys MUST be unique (it is not allowed to have more than one
- // value with the same key).
- Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
-}
-
-func (m *KeyValueList) Reset() { *m = KeyValueList{} }
-func (m *KeyValueList) String() string { return proto.CompactTextString(m) }
-func (*KeyValueList) ProtoMessage() {}
-func (*KeyValueList) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{2}
-}
-func (m *KeyValueList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValueList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValueList.Merge(m, src)
-}
-func (m *KeyValueList) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValueList) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValueList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValueList proto.InternalMessageInfo
-
-func (m *KeyValueList) GetValues() []KeyValue {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-// KeyValue is a key-value pair that is used to store Span attributes, Link
-// attributes, etc.
-type KeyValue struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
-}
-
-func (m *KeyValue) Reset() { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage() {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{3}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-func (m *KeyValue) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *KeyValue) GetValue() AnyValue {
- if m != nil {
- return m.Value
- }
- return AnyValue{}
-}
-
-// InstrumentationScope is a message representing the instrumentation scope information
-// such as the fully qualified name and version.
-type InstrumentationScope struct {
- // An empty instrumentation scope name means the name is unknown.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- // Additional attributes that describe the scope. [Optional].
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
- DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
-}
-
-func (m *InstrumentationScope) Reset() { *m = InstrumentationScope{} }
-func (m *InstrumentationScope) String() string { return proto.CompactTextString(m) }
-func (*InstrumentationScope) ProtoMessage() {}
-func (*InstrumentationScope) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{4}
-}
-func (m *InstrumentationScope) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InstrumentationScope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InstrumentationScope.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InstrumentationScope) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InstrumentationScope.Merge(m, src)
-}
-func (m *InstrumentationScope) XXX_Size() int {
- return m.Size()
-}
-func (m *InstrumentationScope) XXX_DiscardUnknown() {
- xxx_messageInfo_InstrumentationScope.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InstrumentationScope proto.InternalMessageInfo
-
-func (m *InstrumentationScope) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *InstrumentationScope) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-func (m *InstrumentationScope) GetAttributes() []KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *InstrumentationScope) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A reference to an Entity.
-// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs.
-//
-// Status: [Development]
-type EntityRef struct {
- // The Schema URL, if known. This is the identifier of the Schema that the entity data
- // is recorded in. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- //
- // This schema_url applies to the data in this message and to the Resource attributes
- // referenced by id_keys and description_keys.
- // TODO: discuss if we are happy with this somewhat complicated definition of what
- // the schema_url applies to.
- //
- // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs.
- SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
- // Defines the type of the entity. MUST not change during the lifetime of the entity.
- // For example: "service" or "host". This field is required and MUST not be empty
- // for valid entities.
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- // Attribute Keys that identify the entity.
- // MUST not change during the lifetime of the entity. The Id must contain at least one attribute.
- // These keys MUST exist in the containing {message}.attributes.
- IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"`
- // Descriptive (non-identifying) attribute keys of the entity.
- // MAY change over the lifetime of the entity. MAY be empty.
- // These attribute keys are not part of entity's identity.
- // These keys MUST exist in the containing {message}.attributes.
- DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"`
-}
-
-func (m *EntityRef) Reset() { *m = EntityRef{} }
-func (m *EntityRef) String() string { return proto.CompactTextString(m) }
-func (*EntityRef) ProtoMessage() {}
-func (*EntityRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_62ba46dcb97aa817, []int{5}
-}
-func (m *EntityRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EntityRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EntityRef.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EntityRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityRef.Merge(m, src)
-}
-func (m *EntityRef) XXX_Size() int {
- return m.Size()
-}
-func (m *EntityRef) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityRef proto.InternalMessageInfo
-
-func (m *EntityRef) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-func (m *EntityRef) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *EntityRef) GetIdKeys() []string {
- if m != nil {
- return m.IdKeys
- }
- return nil
-}
-
-func (m *EntityRef) GetDescriptionKeys() []string {
- if m != nil {
- return m.DescriptionKeys
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue")
- proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue")
- proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList")
- proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue")
- proto.RegisterType((*InstrumentationScope)(nil), "opentelemetry.proto.common.v1.InstrumentationScope")
- proto.RegisterType((*EntityRef)(nil), "opentelemetry.proto.common.v1.EntityRef")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817)
-}
-
-var fileDescriptor_62ba46dcb97aa817 = []byte{
- // 608 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41,
- 0x14, 0xdf, 0xa1, 0xa5, 0xed, 0xbe, 0xd6, 0x48, 0x26, 0x44, 0x1b, 0x93, 0x96, 0xb5, 0x1e, 0x5c,
- 0x34, 0x69, 0x03, 0x5e, 0xbc, 0x52, 0x24, 0xa9, 0x01, 0x23, 0x59, 0x84, 0x83, 0x97, 0x66, 0xdb,
- 0x7d, 0xd6, 0x09, 0xdb, 0x99, 0xcd, 0xec, 0xb4, 0xc9, 0x5e, 0xfd, 0x04, 0x7e, 0x0e, 0x2f, 0x7e,
- 0x0d, 0x2e, 0x26, 0x1c, 0x3d, 0x19, 0x02, 0x5f, 0xc4, 0xcc, 0x9f, 0x16, 0xe4, 0x00, 0xc1, 0xdb,
- 0x7b, 0xbf, 0xf7, 0x7b, 0xbf, 0xf7, 0x7e, 0x33, 0x93, 0x81, 0x57, 0x22, 0x43, 0xae, 0x30, 0xc5,
- 0x29, 0x2a, 0x59, 0xf4, 0x32, 0x29, 0x94, 0xe8, 0x8d, 0xc5, 0x74, 0x2a, 0x78, 0x6f, 0xbe, 0xe5,
- 0xa2, 0xae, 0x81, 0x69, 0xeb, 0x1f, 0xae, 0x05, 0xbb, 0x8e, 0x31, 0xdf, 0x7a, 0xb6, 0x3e, 0x11,
- 0x13, 0x61, 0x05, 0x74, 0x64, 0xeb, 0x9d, 0x8b, 0x15, 0xa8, 0xed, 0xf0, 0xe2, 0x24, 0x4e, 0x67,
- 0x48, 0x5f, 0x40, 0x23, 0x57, 0x92, 0xf1, 0xc9, 0x70, 0xae, 0xf3, 0x26, 0x09, 0x48, 0xe8, 0x0f,
- 0xbc, 0xa8, 0x6e, 0x51, 0x4b, 0xda, 0x00, 0x18, 0x09, 0x91, 0x3a, 0xca, 0x4a, 0x40, 0xc2, 0xda,
- 0xc0, 0x8b, 0x7c, 0x8d, 0x59, 0x42, 0x0b, 0x7c, 0xc6, 0x95, 0xab, 0x97, 0x02, 0x12, 0x96, 0x06,
- 0x5e, 0x54, 0x63, 0x5c, 0x2d, 0x87, 0x24, 0x62, 0x36, 0x4a, 0xd1, 0x31, 0xca, 0x01, 0x09, 0x89,
- 0x1e, 0x62, 0x51, 0x4b, 0x3a, 0x80, 0x7a, 0x2c, 0x65, 0x5c, 0x38, 0xce, 0x6a, 0x40, 0xc2, 0xfa,
- 0xf6, 0x66, 0xf7, 0x4e, 0x87, 0xdd, 0x1d, 0xdd, 0x61, 0xfa, 0x07, 0x5e, 0x04, 0xf1, 0x32, 0xa3,
- 0x87, 0xd0, 0x38, 0x9d, 0xa7, 0x2c, 0x5f, 0x2c, 0x55, 0x31, 0x72, 0xaf, 0xef, 0x91, 0xdb, 0x47,
- 0xdb, 0x7e, 0xc0, 0x72, 0xa5, 0xf7, 0xb3, 0x12, 0x56, 0xf1, 0x39, 0xd4, 0x47, 0x85, 0xc2, 0xdc,
- 0x09, 0x56, 0x03, 0x12, 0x36, 0xf4, 0x50, 0x03, 0x1a, 0x4a, 0xbf, 0x0a, 0xab, 0xa6, 0xd8, 0x39,
- 0x02, 0xb8, 0xde, 0x8c, 0xee, 0x41, 0xc5, 0xc0, 0x79, 0x93, 0x04, 0xa5, 0xb0, 0xbe, 0xfd, 0xf2,
- 0x3e, 0x53, 0xee, 0x72, 0xfa, 0xe5, 0xb3, 0x3f, 0x1b, 0x5e, 0xe4, 0x9a, 0x3b, 0xc7, 0xd0, 0xb8,
- 0xb9, 0xdf, 0x83, 0x65, 0x17, 0xcd, 0xb7, 0x64, 0x63, 0xa8, 0x2d, 0x2a, 0x74, 0x0d, 0x4a, 0xa7,
- 0x58, 0xd8, 0x47, 0x10, 0xe9, 0x90, 0xee, 0x3a, 0x4b, 0xe6, 0xd6, 0x1f, 0xbc, 0xba, 0x3b, 0x8e,
- 0x5f, 0x04, 0xd6, 0xdf, 0xf3, 0x5c, 0xc9, 0xd9, 0x14, 0xb9, 0x8a, 0x15, 0x13, 0xfc, 0x68, 0x2c,
- 0x32, 0xa4, 0x14, 0xca, 0x3c, 0x9e, 0xba, 0x57, 0x17, 0x99, 0x98, 0x36, 0xa1, 0x3a, 0x47, 0x99,
- 0x33, 0xc1, 0xcd, 0x4c, 0x3f, 0x5a, 0xa4, 0xf4, 0x03, 0x40, 0xac, 0x94, 0x64, 0xa3, 0x99, 0xc2,
- 0xbc, 0x59, 0xfa, 0x1f, 0xd3, 0x37, 0x04, 0xe8, 0x5b, 0x68, 0x26, 0x52, 0x64, 0x19, 0x26, 0xc3,
- 0x6b, 0x74, 0x38, 0x16, 0x33, 0xae, 0xcc, 0x0b, 0x7d, 0x14, 0x3d, 0x71, 0xf5, 0x9d, 0x65, 0x79,
- 0x57, 0x57, 0x3b, 0xdf, 0x08, 0xf8, 0x7b, 0x5c, 0x31, 0x55, 0x44, 0xf8, 0x85, 0xb6, 0x00, 0xf2,
- 0xf1, 0x57, 0x9c, 0xc6, 0xc3, 0x99, 0x4c, 0x9d, 0x15, 0xdf, 0x22, 0xc7, 0x32, 0xd5, 0x1e, 0x55,
- 0x91, 0xa1, 0x33, 0x63, 0x62, 0xfa, 0x14, 0xaa, 0x2c, 0x19, 0x9e, 0x62, 0x61, 0x6d, 0xf8, 0x51,
- 0x85, 0x25, 0xfb, 0x58, 0xe4, 0x74, 0x13, 0xd6, 0x12, 0xcc, 0xc7, 0x92, 0x65, 0xfa, 0x90, 0x2c,
- 0xa3, 0x6c, 0x18, 0x8f, 0x6f, 0xe0, 0x9a, 0xda, 0xff, 0x49, 0xce, 0x2e, 0xdb, 0xe4, 0xfc, 0xb2,
- 0x4d, 0x2e, 0x2e, 0xdb, 0xe4, 0xfb, 0x55, 0xdb, 0x3b, 0xbf, 0x6a, 0x7b, 0xbf, 0xaf, 0xda, 0x1e,
- 0x04, 0x4c, 0xdc, 0x7d, 0x2c, 0xfd, 0xfa, 0xae, 0x09, 0x0f, 0x35, 0x7c, 0x48, 0x3e, 0xbf, 0x9b,
- 0xdc, 0x6e, 0x60, 0xfa, 0xcf, 0x49, 0x53, 0x1c, 0x2b, 0x21, 0x7b, 0x59, 0x12, 0xab, 0xb8, 0xc7,
- 0xb8, 0x42, 0xc9, 0xe3, 0xb4, 0x67, 0x32, 0xa3, 0x38, 0x41, 0x7e, 0xfd, 0x35, 0xfd, 0x58, 0x69,
- 0x7d, 0xcc, 0x90, 0x7f, 0x5a, 0x6a, 0x18, 0xf5, 0xae, 0x9d, 0xd4, 0x3d, 0xd9, 0x1a, 0x55, 0x4c,
- 0xcf, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x8b, 0xd4, 0x3b, 0xe2, 0x04, 0x00, 0x00,
-}
-
-func (m *AnyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= len(m.StringValue)
- copy(dAtA[i:], m.StringValue)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i--
- if m.BoolValue {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintCommon(dAtA, i, uint64(m.IntValue))
- i--
- dAtA[i] = 0x18
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue))))
- i--
- dAtA[i] = 0x21
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ArrayValue != nil {
- {
- size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.KvlistValue != nil {
- {
- size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- return len(dAtA) - i, nil
-}
-func (m *AnyValue_BytesValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AnyValue_BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.BytesValue != nil {
- i -= len(m.BytesValue)
- copy(dAtA[i:], m.BytesValue)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.BytesValue)))
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *ArrayValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Values) > 0 {
- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KeyValueList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Values) > 0 {
- for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InstrumentationScope) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InstrumentationScope) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InstrumentationScope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintCommon(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x20
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCommon(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EntityRef) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EntityRef) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EntityRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DescriptionKeys) > 0 {
- for iNdEx := len(m.DescriptionKeys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.DescriptionKeys[iNdEx])
- copy(dAtA[i:], m.DescriptionKeys[iNdEx])
- i = encodeVarintCommon(dAtA, i, uint64(len(m.DescriptionKeys[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.IdKeys) > 0 {
- for iNdEx := len(m.IdKeys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.IdKeys[iNdEx])
- copy(dAtA[i:], m.IdKeys[iNdEx])
- i = encodeVarintCommon(dAtA, i, uint64(len(m.IdKeys[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Type) > 0 {
- i -= len(m.Type)
- copy(dAtA[i:], m.Type)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.Type)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintCommon(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintCommon(dAtA []byte, offset int, v uint64) int {
- offset -= sovCommon(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *AnyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Value != nil {
- n += m.Value.Size()
- }
- return n
-}
-
-func (m *AnyValue_StringValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.StringValue)
- n += 1 + l + sovCommon(uint64(l))
- return n
-}
-func (m *AnyValue_BoolValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 2
- return n
-}
-func (m *AnyValue_IntValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovCommon(uint64(m.IntValue))
- return n
-}
-func (m *AnyValue_DoubleValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *AnyValue_ArrayValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ArrayValue != nil {
- l = m.ArrayValue.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *AnyValue_KvlistValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.KvlistValue != nil {
- l = m.KvlistValue.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *AnyValue_BytesValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.BytesValue != nil {
- l = len(m.BytesValue)
- n += 1 + l + sovCommon(uint64(l))
- }
- return n
-}
-func (m *ArrayValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Values) > 0 {
- for _, e := range m.Values {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func (m *KeyValueList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Values) > 0 {
- for _, e := range m.Values {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func (m *KeyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = m.Value.Size()
- n += 1 + l + sovCommon(uint64(l))
- return n
-}
-
-func (m *InstrumentationScope) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovCommon(uint64(m.DroppedAttributesCount))
- }
- return n
-}
-
-func (m *EntityRef) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- l = len(m.Type)
- if l > 0 {
- n += 1 + l + sovCommon(uint64(l))
- }
- if len(m.IdKeys) > 0 {
- for _, s := range m.IdKeys {
- l = len(s)
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- if len(m.DescriptionKeys) > 0 {
- for _, s := range m.DescriptionKeys {
- l = len(s)
- n += 1 + l + sovCommon(uint64(l))
- }
- }
- return n
-}
-
-func sovCommon(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozCommon(x uint64) (n int) {
- return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *AnyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AnyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])}
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.Value = &AnyValue_BoolValue{b}
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = &AnyValue_IntValue{v}
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))}
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &ArrayValue{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Value = &AnyValue_ArrayValue{v}
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &KeyValueList{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Value = &AnyValue_KvlistValue{v}
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.Value = &AnyValue_BytesValue{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ArrayValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Values = append(m.Values, AnyValue{})
- if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KeyValueList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Values = append(m.Values, KeyValue{})
- if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InstrumentationScope) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InstrumentationScope: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InstrumentationScope: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EntityRef) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EntityRef: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EntityRef: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Type = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IdKeys = append(m.IdKeys, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthCommon
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthCommon
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DescriptionKeys = append(m.DescriptionKeys, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCommon(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCommon
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipCommon(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCommon
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthCommon
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupCommon
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthCommon
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go
deleted file mode 100644
index da266d167d2..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go
+++ /dev/null
@@ -1,1834 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/logs/v1/logs.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Possible values for LogRecord.SeverityNumber.
-type SeverityNumber int32
-
-const (
- // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
- SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0
- SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1
- SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2
- SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3
- SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4
- SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5
- SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6
- SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7
- SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8
- SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9
- SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10
- SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11
- SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12
- SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13
- SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14
- SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15
- SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16
- SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17
- SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18
- SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19
- SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20
- SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21
- SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22
- SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23
- SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24
-)
-
-var SeverityNumber_name = map[int32]string{
- 0: "SEVERITY_NUMBER_UNSPECIFIED",
- 1: "SEVERITY_NUMBER_TRACE",
- 2: "SEVERITY_NUMBER_TRACE2",
- 3: "SEVERITY_NUMBER_TRACE3",
- 4: "SEVERITY_NUMBER_TRACE4",
- 5: "SEVERITY_NUMBER_DEBUG",
- 6: "SEVERITY_NUMBER_DEBUG2",
- 7: "SEVERITY_NUMBER_DEBUG3",
- 8: "SEVERITY_NUMBER_DEBUG4",
- 9: "SEVERITY_NUMBER_INFO",
- 10: "SEVERITY_NUMBER_INFO2",
- 11: "SEVERITY_NUMBER_INFO3",
- 12: "SEVERITY_NUMBER_INFO4",
- 13: "SEVERITY_NUMBER_WARN",
- 14: "SEVERITY_NUMBER_WARN2",
- 15: "SEVERITY_NUMBER_WARN3",
- 16: "SEVERITY_NUMBER_WARN4",
- 17: "SEVERITY_NUMBER_ERROR",
- 18: "SEVERITY_NUMBER_ERROR2",
- 19: "SEVERITY_NUMBER_ERROR3",
- 20: "SEVERITY_NUMBER_ERROR4",
- 21: "SEVERITY_NUMBER_FATAL",
- 22: "SEVERITY_NUMBER_FATAL2",
- 23: "SEVERITY_NUMBER_FATAL3",
- 24: "SEVERITY_NUMBER_FATAL4",
-}
-
-var SeverityNumber_value = map[string]int32{
- "SEVERITY_NUMBER_UNSPECIFIED": 0,
- "SEVERITY_NUMBER_TRACE": 1,
- "SEVERITY_NUMBER_TRACE2": 2,
- "SEVERITY_NUMBER_TRACE3": 3,
- "SEVERITY_NUMBER_TRACE4": 4,
- "SEVERITY_NUMBER_DEBUG": 5,
- "SEVERITY_NUMBER_DEBUG2": 6,
- "SEVERITY_NUMBER_DEBUG3": 7,
- "SEVERITY_NUMBER_DEBUG4": 8,
- "SEVERITY_NUMBER_INFO": 9,
- "SEVERITY_NUMBER_INFO2": 10,
- "SEVERITY_NUMBER_INFO3": 11,
- "SEVERITY_NUMBER_INFO4": 12,
- "SEVERITY_NUMBER_WARN": 13,
- "SEVERITY_NUMBER_WARN2": 14,
- "SEVERITY_NUMBER_WARN3": 15,
- "SEVERITY_NUMBER_WARN4": 16,
- "SEVERITY_NUMBER_ERROR": 17,
- "SEVERITY_NUMBER_ERROR2": 18,
- "SEVERITY_NUMBER_ERROR3": 19,
- "SEVERITY_NUMBER_ERROR4": 20,
- "SEVERITY_NUMBER_FATAL": 21,
- "SEVERITY_NUMBER_FATAL2": 22,
- "SEVERITY_NUMBER_FATAL3": 23,
- "SEVERITY_NUMBER_FATAL4": 24,
-}
-
-func (x SeverityNumber) String() string {
- return proto.EnumName(SeverityNumber_name, int32(x))
-}
-
-func (SeverityNumber) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{0}
-}
-
-// LogRecordFlags represents constants used to interpret the
-// LogRecord.flags field, which is protobuf 'fixed32' type and is to
-// be used as bit-fields. Each non-zero value defined in this enum is
-// a bit-mask. To extract the bit-field, for example, use an
-// expression like:
-//
-// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
-type LogRecordFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0
- // Bits 0-7 are used for trace flags.
- LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255
-)
-
-var LogRecordFlags_name = map[int32]string{
- 0: "LOG_RECORD_FLAGS_DO_NOT_USE",
- 255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK",
-}
-
-var LogRecordFlags_value = map[string]int32{
- "LOG_RECORD_FLAGS_DO_NOT_USE": 0,
- "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255,
-}
-
-func (x LogRecordFlags) String() string {
- return proto.EnumName(LogRecordFlags_name, int32(x))
-}
-
-func (LogRecordFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{1}
-}
-
-// LogsData represents the logs data that can be stored in a persistent storage,
-// OR can be embedded by other protocols that transfer OTLP logs data but do not
-// implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type LogsData struct {
- // An array of ResourceLogs.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
-}
-
-func (m *LogsData) Reset() { *m = LogsData{} }
-func (m *LogsData) String() string { return proto.CompactTextString(m) }
-func (*LogsData) ProtoMessage() {}
-func (*LogsData) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{0}
-}
-func (m *LogsData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LogsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LogsData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LogsData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogsData.Merge(m, src)
-}
-func (m *LogsData) XXX_Size() int {
- return m.Size()
-}
-func (m *LogsData) XXX_DiscardUnknown() {
- xxx_messageInfo_LogsData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogsData proto.InternalMessageInfo
-
-func (m *LogsData) GetResourceLogs() []*ResourceLogs {
- if m != nil {
- return m.ResourceLogs
- }
- return nil
-}
-
-// A collection of ScopeLogs from a Resource.
-type ResourceLogs struct {
- DeprecatedScopeLogs []*ScopeLogs `protobuf:"bytes,1000,rep,name=deprecated_scope_logs,json=deprecatedScopeLogs,proto3" json:"deprecated_scope_logs,omitempty"`
- // The resource for the logs in this message.
- // If this field is not set then resource info is unknown.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeLogs that originate from a resource.
- ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_logs" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceLogs) Reset() { *m = ResourceLogs{} }
-func (m *ResourceLogs) String() string { return proto.CompactTextString(m) }
-func (*ResourceLogs) ProtoMessage() {}
-func (*ResourceLogs) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{1}
-}
-func (m *ResourceLogs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceLogs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceLogs.Merge(m, src)
-}
-func (m *ResourceLogs) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceLogs) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceLogs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo
-
-func (m *ResourceLogs) GetDeprecatedScopeLogs() []*ScopeLogs {
- if m != nil {
- return m.DeprecatedScopeLogs
- }
- return nil
-}
-
-func (m *ResourceLogs) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceLogs) GetScopeLogs() []*ScopeLogs {
- if m != nil {
- return m.ScopeLogs
- }
- return nil
-}
-
-func (m *ResourceLogs) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Logs produced by a Scope.
-type ScopeLogs struct {
- // The instrumentation scope information for the logs in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of log records.
- LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the log data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all logs in the "logs" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeLogs) Reset() { *m = ScopeLogs{} }
-func (m *ScopeLogs) String() string { return proto.CompactTextString(m) }
-func (*ScopeLogs) ProtoMessage() {}
-func (*ScopeLogs) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{2}
-}
-func (m *ScopeLogs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeLogs.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeLogs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeLogs.Merge(m, src)
-}
-func (m *ScopeLogs) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeLogs) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeLogs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeLogs proto.InternalMessageInfo
-
-func (m *ScopeLogs) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeLogs) GetLogRecords() []*LogRecord {
- if m != nil {
- return m.LogRecords
- }
- return nil
-}
-
-func (m *ScopeLogs) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A log record according to OpenTelemetry Log Data Model:
-// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
-type LogRecord struct {
- // time_unix_nano is the time when the event occurred.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- // Value of 0 indicates unknown or missing timestamp.
- TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // Time when the event was observed by the collection system.
- // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
- // this timestamp is typically set at the generation time and is equal to Timestamp.
- // For events originating externally and collected by OpenTelemetry (e.g. using
- // Collector) this is the time when OpenTelemetry's code observed the event measured
- // by the clock of the OpenTelemetry code. This field MUST be set once the event is
- // observed by OpenTelemetry.
- //
- // For converting OpenTelemetry log data to formats that support only one timestamp or
- // when receiving OpenTelemetry log data by recipients that support only one timestamp
- // internally the following logic is recommended:
- // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- // Value of 0 indicates unknown or missing timestamp.
- ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"`
- // Numerical value of the severity, normalized to values described in Log Data Model.
- // [Optional].
- SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"`
- // The severity text (also known as log level). The original string representation as
- // it is known at the source. [Optional].
- SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"`
- // A value containing the body of the log record. Can be for example a human-readable
- // string message (including multi-line) describing the event in a free form or it can
- // be a structured data composed of arrays and maps of other values. [Optional].
- Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"`
- // Additional attributes that describe the specific event occurrence. [Optional].
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"`
- DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Flags, a bit field. 8 least significant bits are the trace flags as
- // defined in W3C Trace Context specification. 24 most significant bits are reserved
- // and must be set to 0. Readers must not assume that 24 most significant bits
- // will be zero and must correctly mask the bits when reading 8-bit trace flag (use
- // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional].
- Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"`
- // A unique identifier for a trace. All logs from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
- // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is optional.
- //
- // The receivers SHOULD assume that the log record is not associated with a
- // trace if any of the following is true:
- // - the field is not present,
- // - the field contains an invalid value.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
- // other than 8 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is optional. If the sender specifies a valid span_id then it SHOULD also
- // specify a valid trace_id.
- //
- // The receivers SHOULD assume that the log record is not associated with a
- // span if any of the following is true:
- // - the field is not present,
- // - the field contains an invalid value.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // A unique identifier of event category/type.
- // All events with the same event_name are expected to conform to the same
- // schema for both their attributes and their body.
- //
- // Recommended to be fully qualified and short (no longer than 256 characters).
- //
- // Presence of event_name on the log record identifies this record
- // as an event.
- //
- // [Optional].
- EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"`
-}
-
-func (m *LogRecord) Reset() { *m = LogRecord{} }
-func (m *LogRecord) String() string { return proto.CompactTextString(m) }
-func (*LogRecord) ProtoMessage() {}
-func (*LogRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1c030a3ec7e961e, []int{3}
-}
-func (m *LogRecord) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LogRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogRecord.Merge(m, src)
-}
-func (m *LogRecord) XXX_Size() int {
- return m.Size()
-}
-func (m *LogRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_LogRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogRecord proto.InternalMessageInfo
-
-func (m *LogRecord) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *LogRecord) GetObservedTimeUnixNano() uint64 {
- if m != nil {
- return m.ObservedTimeUnixNano
- }
- return 0
-}
-
-func (m *LogRecord) GetSeverityNumber() SeverityNumber {
- if m != nil {
- return m.SeverityNumber
- }
- return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
-}
-
-func (m *LogRecord) GetSeverityText() string {
- if m != nil {
- return m.SeverityText
- }
- return ""
-}
-
-func (m *LogRecord) GetBody() v11.AnyValue {
- if m != nil {
- return m.Body
- }
- return v11.AnyValue{}
-}
-
-func (m *LogRecord) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *LogRecord) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *LogRecord) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *LogRecord) GetEventName() string {
- if m != nil {
- return m.EventName
- }
- return ""
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value)
- proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value)
- proto.RegisterType((*LogsData)(nil), "opentelemetry.proto.logs.v1.LogsData")
- proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs")
- proto.RegisterType((*ScopeLogs)(nil), "opentelemetry.proto.logs.v1.ScopeLogs")
- proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e)
-}
-
-var fileDescriptor_d1c030a3ec7e961e = []byte{
- // 971 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46,
- 0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3,
- 0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90,
- 0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f,
- 0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b,
- 0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f,
- 0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92,
- 0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd,
- 0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a,
- 0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86,
- 0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2,
- 0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f,
- 0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5,
- 0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26,
- 0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b,
- 0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95,
- 0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40,
- 0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d,
- 0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9,
- 0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05,
- 0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9,
- 0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe,
- 0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41,
- 0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca,
- 0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b,
- 0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7,
- 0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b,
- 0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd,
- 0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c,
- 0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d,
- 0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83,
- 0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd,
- 0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8,
- 0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31,
- 0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48,
- 0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18,
- 0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd,
- 0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48,
- 0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f,
- 0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99,
- 0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5,
- 0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57,
- 0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c,
- 0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7,
- 0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55,
- 0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e,
- 0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07,
- 0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0,
- 0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17,
- 0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19,
- 0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82,
- 0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3,
- 0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa,
- 0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e,
- 0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f,
- 0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9,
- 0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9,
- 0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f,
- 0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b,
- 0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00,
- 0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00,
-}
-
-func (m *LogsData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LogsData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LogsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceLogs) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeLogs) > 0 {
- for iNdEx := len(m.DeprecatedScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeLogs) > 0 {
- for iNdEx := len(m.ScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeLogs) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeLogs) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.LogRecords) > 0 {
- for iNdEx := len(m.LogRecords) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LogRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *LogRecord) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EventName) > 0 {
- i -= len(m.EventName)
- copy(dAtA[i:], m.EventName)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName)))
- i--
- dAtA[i] = 0x62
- }
- if m.ObservedTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano))
- i--
- dAtA[i] = 0x59
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x45
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x38
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- {
- size, err := m.Body.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLogs(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- if len(m.SeverityText) > 0 {
- i -= len(m.SeverityText)
- copy(dAtA[i:], m.SeverityText)
- i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText)))
- i--
- dAtA[i] = 0x1a
- }
- if m.SeverityNumber != 0 {
- i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber))
- i--
- dAtA[i] = 0x10
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintLogs(dAtA []byte, offset int, v uint64) int {
- offset -= sovLogs(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *LogsData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceLogs) > 0 {
- for _, e := range m.ResourceLogs {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceLogs) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.ScopeLogs) > 0 {
- for _, e := range m.ScopeLogs {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- if len(m.DeprecatedScopeLogs) > 0 {
- for _, e := range m.DeprecatedScopeLogs {
- l = e.Size()
- n += 2 + l + sovLogs(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeLogs) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.LogRecords) > 0 {
- for _, e := range m.LogRecords {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- return n
-}
-
-func (m *LogRecord) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.SeverityNumber != 0 {
- n += 1 + sovLogs(uint64(m.SeverityNumber))
- }
- l = len(m.SeverityText)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- l = m.Body.Size()
- n += 1 + l + sovLogs(uint64(l))
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovLogs(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovLogs(uint64(m.DroppedAttributesCount))
- }
- if m.Flags != 0 {
- n += 5
- }
- l = m.TraceId.Size()
- n += 1 + l + sovLogs(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovLogs(uint64(l))
- if m.ObservedTimeUnixNano != 0 {
- n += 9
- }
- l = len(m.EventName)
- if l > 0 {
- n += 1 + l + sovLogs(uint64(l))
- }
- return n
-}
-
-func sovLogs(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozLogs(x uint64) (n int) {
- return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *LogsData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogsData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogsData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceLogs = append(m.ResourceLogs, &ResourceLogs{})
- if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceLogs) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeLogs = append(m.ScopeLogs, &ScopeLogs{})
- if err := m.ScopeLogs[len(m.ScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeLogs = append(m.DeprecatedScopeLogs, &ScopeLogs{})
- if err := m.DeprecatedScopeLogs[len(m.DeprecatedScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeLogs) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeLogs: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeLogs: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LogRecords = append(m.LogRecords, &LogRecord{})
- if err := m.LogRecords[len(m.LogRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LogRecord) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogRecord: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
- }
- m.SeverityNumber = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SeverityNumber |= SeverityNumber(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SeverityText = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
- }
- m.ObservedTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLogs
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthLogs
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EventName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogs(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthLogs
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipLogs(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLogs
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthLogs
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupLogs
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLogs
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go
deleted file mode 100644
index 2371096c745..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go
+++ /dev/null
@@ -1,6655 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/metrics/v1/metrics.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// AggregationTemporality defines how a metric aggregator reports aggregated
-// values. It describes how those values relate to the time interval over
-// which they are aggregated.
-type AggregationTemporality int32
-
-const (
- // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
- AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
- // DELTA is an AggregationTemporality for a metric aggregator which reports
- // changes since last report time. Successive metrics contain aggregation of
- // values from continuous and non-overlapping intervals.
- //
- // The values for a DELTA metric are based only on the time interval
- // associated with one measurement cycle. There is no dependency on
- // previous measurements like is the case for CUMULATIVE metrics.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // DELTA metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0+1 to
- // t_0+2 with a value of 2.
- AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
- // CUMULATIVE is an AggregationTemporality for a metric aggregator which
- // reports changes since a fixed start time. This means that current values
- // of a CUMULATIVE metric depend on all previous measurements since the
- // start time. Because of this, the sender is required to retain this state
- // in some form. If this state is lost or invalidated, the CUMULATIVE metric
- // values MUST be reset and a new fixed start time following the last
- // reported measurement time sent MUST be used.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // CUMULATIVE metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+2 with a value of 5.
- // 9. The system experiences a fault and loses state.
- // 10. The system recovers and resumes receiving at time=t_1.
- // 11. A request is received, the system measures 1 request.
- // 12. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_1 to
- // t_0+1 with a value of 1.
- //
- // Note: Even though, when reporting changes since last report time, using
- // CUMULATIVE is valid, it is not recommended. This may cause problems for
- // systems that do not use start_time to determine when the aggregation
- // value was reset (e.g. Prometheus).
- AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
-)
-
-var AggregationTemporality_name = map[int32]string{
- 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
- 1: "AGGREGATION_TEMPORALITY_DELTA",
- 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
-}
-
-var AggregationTemporality_value = map[string]int32{
- "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
- "AGGREGATION_TEMPORALITY_DELTA": 1,
- "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
-}
-
-func (x AggregationTemporality) String() string {
- return proto.EnumName(AggregationTemporality_name, int32(x))
-}
-
-func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{0}
-}
-
-// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
-// bit-field representing 32 distinct boolean flags. Each flag defined in this
-// enum is a bit-mask. To test the presence of a single flag in the flags of
-// a data point, for example, use an expression like:
-//
-// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
-type DataPointFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0
- // This DataPoint is valid but has no recorded value. This value
- // SHOULD be used to reflect explicitly missing data in a series, as
- // for an equivalent to the Prometheus "staleness marker".
- DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1
-)
-
-var DataPointFlags_name = map[int32]string{
- 0: "DATA_POINT_FLAGS_DO_NOT_USE",
- 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
-}
-
-var DataPointFlags_value = map[string]int32{
- "DATA_POINT_FLAGS_DO_NOT_USE": 0,
- "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1,
-}
-
-func (x DataPointFlags) String() string {
- return proto.EnumName(DataPointFlags_name, int32(x))
-}
-
-func (DataPointFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{1}
-}
-
-// MetricsData represents the metrics data that can be stored in a persistent
-// storage, OR can be embedded by other protocols that transfer OTLP metrics
-// data but do not implement the OTLP protocol.
-//
-// MetricsData
-// └─── ResourceMetrics
-//
-// ├── Resource
-// ├── SchemaURL
-// └── ScopeMetrics
-// ├── Scope
-// ├── SchemaURL
-// └── Metric
-// ├── Name
-// ├── Description
-// ├── Unit
-// └── data
-// ├── Gauge
-// ├── Sum
-// ├── Histogram
-// ├── ExponentialHistogram
-// └── Summary
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type MetricsData struct {
- // An array of ResourceMetrics.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
-}
-
-func (m *MetricsData) Reset() { *m = MetricsData{} }
-func (m *MetricsData) String() string { return proto.CompactTextString(m) }
-func (*MetricsData) ProtoMessage() {}
-func (*MetricsData) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{0}
-}
-func (m *MetricsData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MetricsData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MetricsData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricsData.Merge(m, src)
-}
-func (m *MetricsData) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricsData) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricsData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricsData proto.InternalMessageInfo
-
-func (m *MetricsData) GetResourceMetrics() []*ResourceMetrics {
- if m != nil {
- return m.ResourceMetrics
- }
- return nil
-}
-
-// A collection of ScopeMetrics from a Resource.
-type ResourceMetrics struct {
- DeprecatedScopeMetrics []*ScopeMetrics `protobuf:"bytes,1000,rep,name=deprecated_scope_metrics,json=deprecatedScopeMetrics,proto3" json:"deprecated_scope_metrics,omitempty"`
- // The resource for the metrics in this message.
- // If this field is not set then no resource info is known.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of metrics that originate from a resource.
- ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_metrics" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} }
-func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) }
-func (*ResourceMetrics) ProtoMessage() {}
-func (*ResourceMetrics) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{1}
-}
-func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceMetrics) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetrics.Merge(m, src)
-}
-func (m *ResourceMetrics) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetrics) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo
-
-func (m *ResourceMetrics) GetDeprecatedScopeMetrics() []*ScopeMetrics {
- if m != nil {
- return m.DeprecatedScopeMetrics
- }
- return nil
-}
-
-func (m *ResourceMetrics) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics {
- if m != nil {
- return m.ScopeMetrics
- }
- return nil
-}
-
-func (m *ResourceMetrics) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Metrics produced by an Scope.
-type ScopeMetrics struct {
- // The instrumentation scope information for the metrics in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of metrics that originate from an instrumentation library.
- Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the metric data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all metrics in the "metrics" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeMetrics) Reset() { *m = ScopeMetrics{} }
-func (m *ScopeMetrics) String() string { return proto.CompactTextString(m) }
-func (*ScopeMetrics) ProtoMessage() {}
-func (*ScopeMetrics) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{2}
-}
-func (m *ScopeMetrics) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeMetrics.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeMetrics) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeMetrics.Merge(m, src)
-}
-func (m *ScopeMetrics) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeMetrics) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeMetrics.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeMetrics proto.InternalMessageInfo
-
-func (m *ScopeMetrics) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeMetrics) GetMetrics() []*Metric {
- if m != nil {
- return m.Metrics
- }
- return nil
-}
-
-func (m *ScopeMetrics) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// Defines a Metric which has one or more timeseries. The following is a
-// brief summary of the Metric data model. For more details, see:
-//
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
-//
-// The data model and relation between entities is shown in the
-// diagram below. Here, "DataPoint" is the term used to refer to any
-// one of the specific data point value types, and "points" is the term used
-// to refer to any one of the lists of points contained in the Metric.
-//
-// - Metric is composed of a metadata and data.
-//
-// - Metadata part contains a name, description, unit.
-//
-// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
-//
-// - DataPoint contains timestamps, attributes, and one of the possible value type
-// fields.
-//
-// Metric
-// +------------+
-// |name |
-// |description |
-// |unit | +------------------------------------+
-// |data |---> |Gauge, Sum, Histogram, Summary, ... |
-// +------------+ +------------------------------------+
-//
-// Data [One of Gauge, Sum, Histogram, Summary, ...]
-// +-----------+
-// |... | // Metadata about the Data.
-// |points |--+
-// +-----------+ |
-// | +---------------------------+
-// | |DataPoint 1 |
-// v |+------+------+ +------+ |
-// +-----+ ||label |label |...|label | |
-// | 1 |-->||value1|value2|...|valueN| |
-// +-----+ |+------+------+ +------+ |
-// | . | |+-----+ |
-// | . | ||value| |
-// | . | |+-----+ |
-// | . | +---------------------------+
-// | . | .
-// | . | .
-// | . | .
-// | . | +---------------------------+
-// | . | |DataPoint M |
-// +-----+ |+------+------+ +------+ |
-// | M |-->||label |label |...|label | |
-// +-----+ ||value1|value2|...|valueN| |
-// |+------+------+ +------+ |
-// |+-----+ |
-// ||value| |
-// |+-----+ |
-// +---------------------------+
-//
-// Each distinct type of DataPoint represents the output of a specific
-// aggregation function, the result of applying the DataPoint's
-// associated function of to one or more measurements.
-//
-// All DataPoint types have three common fields:
-// - Attributes includes key-value pairs associated with the data point
-// - TimeUnixNano is required, set to the end time of the aggregation
-// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
-// having an AggregationTemporality field, as discussed below.
-//
-// Both TimeUnixNano and StartTimeUnixNano values are expressed as
-// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
-//
-// # TimeUnixNano
-//
-// This field is required, having consistent interpretation across
-// DataPoint types. TimeUnixNano is the moment corresponding to when
-// the data point's aggregate value was captured.
-//
-// Data points with the 0 value for TimeUnixNano SHOULD be rejected
-// by consumers.
-//
-// # StartTimeUnixNano
-//
-// StartTimeUnixNano in general allows detecting when a sequence of
-// observations is unbroken. This field indicates to consumers the
-// start time for points with cumulative and delta
-// AggregationTemporality, and it should be included whenever possible
-// to support correct rate calculation. Although it may be omitted
-// when the start time is truly unknown, setting StartTimeUnixNano is
-// strongly encouraged.
-type Metric struct {
- // name of the metric.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // description of the metric, which can be used in documentation.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- // unit in which the metric value is reported. Follows the format
- // described by https://unitsofmeasure.org/ucum.html.
- Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
- // Data determines the aggregation type (if any) of the metric, what is the
- // reported value type for the data points, as well as the relatationship to
- // the time interval over which they are reported.
- //
- // Types that are valid to be assigned to Data:
- // *Metric_Gauge
- // *Metric_Sum
- // *Metric_Histogram
- // *Metric_ExponentialHistogram
- // *Metric_Summary
- Data isMetric_Data `protobuf_oneof:"data"`
- // Additional metadata attributes that describe the metric. [Optional].
- // Attributes are non-identifying.
- // Consumers SHOULD NOT need to be aware of these attributes.
- // These attributes MAY be used to encode information allowing
- // for lossless roundtrip translation to / from another data model.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Metadata []v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{3}
-}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return m.Size()
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-type isMetric_Data interface {
- isMetric_Data()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Metric_Gauge struct {
- Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof" json:"gauge,omitempty"`
-}
-type Metric_Sum struct {
- Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type Metric_Histogram struct {
- Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof" json:"histogram,omitempty"`
-}
-type Metric_ExponentialHistogram struct {
- ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof" json:"exponential_histogram,omitempty"`
-}
-type Metric_Summary struct {
- Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof" json:"summary,omitempty"`
-}
-
-func (*Metric_Gauge) isMetric_Data() {}
-func (*Metric_Sum) isMetric_Data() {}
-func (*Metric_Histogram) isMetric_Data() {}
-func (*Metric_ExponentialHistogram) isMetric_Data() {}
-func (*Metric_Summary) isMetric_Data() {}
-
-func (m *Metric) GetData() isMetric_Data {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-func (m *Metric) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Metric) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-func (m *Metric) GetUnit() string {
- if m != nil {
- return m.Unit
- }
- return ""
-}
-
-func (m *Metric) GetGauge() *Gauge {
- if x, ok := m.GetData().(*Metric_Gauge); ok {
- return x.Gauge
- }
- return nil
-}
-
-func (m *Metric) GetSum() *Sum {
- if x, ok := m.GetData().(*Metric_Sum); ok {
- return x.Sum
- }
- return nil
-}
-
-func (m *Metric) GetHistogram() *Histogram {
- if x, ok := m.GetData().(*Metric_Histogram); ok {
- return x.Histogram
- }
- return nil
-}
-
-func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
- if x, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
- return x.ExponentialHistogram
- }
- return nil
-}
-
-func (m *Metric) GetSummary() *Summary {
- if x, ok := m.GetData().(*Metric_Summary); ok {
- return x.Summary
- }
- return nil
-}
-
-func (m *Metric) GetMetadata() []v11.KeyValue {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Metric) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Metric_Gauge)(nil),
- (*Metric_Sum)(nil),
- (*Metric_Histogram)(nil),
- (*Metric_ExponentialHistogram)(nil),
- (*Metric_Summary)(nil),
- }
-}
-
-// Gauge represents the type of a scalar metric that always exports the
-// "current value" for every data point. It should be used for an "unknown"
-// aggregation.
-//
-// A Gauge does not support different aggregation temporalities. Given the
-// aggregation is unknown, points cannot be combined using the same
-// aggregation, regardless of aggregation temporalities. Therefore,
-// AggregationTemporality is not included. Consequently, this also means
-// "StartTimeUnixNano" is ignored for all data points.
-type Gauge struct {
- DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
-}
-
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{4}
-}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
-}
-func (m *Gauge) XXX_Size() int {
- return m.Size()
-}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
-
-func (m *Gauge) GetDataPoints() []*NumberDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-// Sum represents the type of a scalar metric that is calculated as a sum of all
-// reported measurements over a time interval.
-type Sum struct {
- DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
- // If "true" means that the sum is monotonic.
- IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
-}
-
-func (m *Sum) Reset() { *m = Sum{} }
-func (m *Sum) String() string { return proto.CompactTextString(m) }
-func (*Sum) ProtoMessage() {}
-func (*Sum) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{5}
-}
-func (m *Sum) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Sum.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Sum) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sum.Merge(m, src)
-}
-func (m *Sum) XXX_Size() int {
- return m.Size()
-}
-func (m *Sum) XXX_DiscardUnknown() {
- xxx_messageInfo_Sum.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Sum proto.InternalMessageInfo
-
-func (m *Sum) GetDataPoints() []*NumberDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *Sum) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-func (m *Sum) GetIsMonotonic() bool {
- if m != nil {
- return m.IsMonotonic
- }
- return false
-}
-
-// Histogram represents the type of a metric that is calculated by aggregating
-// as a Histogram of all reported measurements over a time interval.
-type Histogram struct {
- DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{6}
-}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return m.Size()
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-func (m *Histogram) GetDataPoints() []*HistogramDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *Histogram) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// ExponentialHistogram represents the type of a metric that is calculated by aggregating
-// as a ExponentialHistogram of all reported double measurements over a time interval.
-type ExponentialHistogram struct {
- DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *ExponentialHistogram) Reset() { *m = ExponentialHistogram{} }
-func (m *ExponentialHistogram) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogram) ProtoMessage() {}
-func (*ExponentialHistogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{7}
-}
-func (m *ExponentialHistogram) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogram.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogram.Merge(m, src)
-}
-func (m *ExponentialHistogram) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogram) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogram proto.InternalMessageInfo
-
-func (m *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// Summary metric data are used to convey quantile summaries,
-// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
-// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
-// data type. These data points cannot always be merged in a meaningful way.
-// While they can be useful in some applications, histogram data points are
-// recommended for new applications.
-// Summary metrics do not have an aggregation temporality field. This is
-// because the count and sum fields of a SummaryDataPoint are assumed to be
-// cumulative values.
-type Summary struct {
- DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
-}
-
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{8}
-}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return m.Size()
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Summary proto.InternalMessageInfo
-
-func (m *Summary) GetDataPoints() []*SummaryDataPoint {
- if m != nil {
- return m.DataPoints
- }
- return nil
-}
-
-// NumberDataPoint is a single data point in a timeseries that describes the
-// time-varying scalar value of a metric.
-type NumberDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // The value itself. A point is considered invalid when one of the recognized
- // value fields is not present inside this oneof.
- //
- // Types that are valid to be assigned to Value:
- // *NumberDataPoint_AsDouble
- // *NumberDataPoint_AsInt
- Value isNumberDataPoint_Value `protobuf_oneof:"value"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *NumberDataPoint) Reset() { *m = NumberDataPoint{} }
-func (m *NumberDataPoint) String() string { return proto.CompactTextString(m) }
-func (*NumberDataPoint) ProtoMessage() {}
-func (*NumberDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{9}
-}
-func (m *NumberDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NumberDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_NumberDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *NumberDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NumberDataPoint.Merge(m, src)
-}
-func (m *NumberDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *NumberDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_NumberDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NumberDataPoint proto.InternalMessageInfo
-
-type isNumberDataPoint_Value interface {
- isNumberDataPoint_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type NumberDataPoint_AsDouble struct {
- AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
-}
-type NumberDataPoint_AsInt struct {
- AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
-}
-
-func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
-func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
-
-func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetAsDouble() float64 {
- if x, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
- return x.AsDouble
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetAsInt() int64 {
- if x, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
- return x.AsInt
- }
- return 0
-}
-
-func (m *NumberDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *NumberDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*NumberDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*NumberDataPoint_AsDouble)(nil),
- (*NumberDataPoint_AsInt)(nil),
- }
-}
-
-// HistogramDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a Histogram. A Histogram contains summary statistics
-// for a population of values, it may optionally contain the distribution of
-// those values across a set of buckets.
-//
-// If the histogram contains the distribution of values, then both
-// "explicit_bounds" and "bucket counts" fields must be defined.
-// If the histogram does not contain the distribution of values, then both
-// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
-// "sum" are known.
-type HistogramDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be non-negative. This
- // value must be equal to the sum of the "count" fields in buckets if a
- // histogram is provided.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
- //
- // Types that are valid to be assigned to Sum_:
- // *HistogramDataPoint_Sum
- Sum_ isHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
- // bucket_counts is an optional field contains the count values of histogram
- // for each bucket.
- //
- // The sum of the bucket_counts must equal the value in the count field.
- //
- // The number of elements in bucket_counts array must be by one greater than
- // the number of elements in explicit_bounds array. The exception to this rule
- // is when the length of bucket_counts is 0, then the length of explicit_bounds
- // must also be 0.
- BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
- // explicit_bounds specifies buckets with explicitly defined bounds for values.
- //
- // The boundaries for bucket at index i are:
- //
- // (-infinity, explicit_bounds[i]] for i == 0
- // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
- // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
- //
- // The values in the explicit_bounds array must be strictly increasing.
- //
- // Histogram buckets are inclusive of their upper boundary, except the last
- // bucket where the boundary is at infinity. This format is intentionally
- // compatible with the OpenMetrics histogram definition.
- //
- // If bucket_counts length is 0 then explicit_bounds length must also be 0,
- // otherwise the data point is invalid.
- ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
- // min is the minimum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Min_:
- // *HistogramDataPoint_Min
- Min_ isHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
- // max is the maximum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Max_:
- // *HistogramDataPoint_Max
- Max_ isHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
-}
-
-func (m *HistogramDataPoint) Reset() { *m = HistogramDataPoint{} }
-func (m *HistogramDataPoint) String() string { return proto.CompactTextString(m) }
-func (*HistogramDataPoint) ProtoMessage() {}
-func (*HistogramDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{10}
-}
-func (m *HistogramDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HistogramDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HistogramDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HistogramDataPoint.Merge(m, src)
-}
-func (m *HistogramDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *HistogramDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_HistogramDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HistogramDataPoint proto.InternalMessageInfo
-
-type isHistogramDataPoint_Sum_ interface {
- isHistogramDataPoint_Sum_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isHistogramDataPoint_Min_ interface {
- isHistogramDataPoint_Min_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isHistogramDataPoint_Max_ interface {
- isHistogramDataPoint_Max_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type HistogramDataPoint_Sum struct {
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type HistogramDataPoint_Min struct {
- Min float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
-}
-type HistogramDataPoint_Max struct {
- Max float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
-}
-
-func (*HistogramDataPoint_Sum) isHistogramDataPoint_Sum_() {}
-func (*HistogramDataPoint_Min) isHistogramDataPoint_Min_() {}
-func (*HistogramDataPoint_Max) isHistogramDataPoint_Max_() {}
-
-func (m *HistogramDataPoint) GetSum_() isHistogramDataPoint_Sum_ {
- if m != nil {
- return m.Sum_
- }
- return nil
-}
-func (m *HistogramDataPoint) GetMin_() isHistogramDataPoint_Min_ {
- if m != nil {
- return m.Min_
- }
- return nil
-}
-func (m *HistogramDataPoint) GetMax_() isHistogramDataPoint_Max_ {
- if m != nil {
- return m.Max_
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetSum() float64 {
- if x, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
- return x.Sum
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetBucketCounts() []uint64 {
- if m != nil {
- return m.BucketCounts
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetExplicitBounds() []float64 {
- if m != nil {
- return m.ExplicitBounds
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *HistogramDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetMin() float64 {
- if x, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
- return x.Min
- }
- return 0
-}
-
-func (m *HistogramDataPoint) GetMax() float64 {
- if x, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
- return x.Max
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*HistogramDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*HistogramDataPoint_Sum)(nil),
- (*HistogramDataPoint_Min)(nil),
- (*HistogramDataPoint_Max)(nil),
- }
-}
-
-// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
-// summary statistics for a population of values, it may optionally contain the
-// distribution of those values across a set of buckets.
-type ExponentialHistogramDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be
- // non-negative. This value must be equal to the sum of the "bucket_counts"
- // values in the positive and negative Buckets plus the "zero_count" field.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
- //
- // Types that are valid to be assigned to Sum_:
- // *ExponentialHistogramDataPoint_Sum
- Sum_ isExponentialHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
- // scale describes the resolution of the histogram. Boundaries are
- // located at powers of the base, where:
- //
- // base = (2^(2^-scale))
- //
- // The histogram bucket identified by `index`, a signed integer,
- // contains values that are greater than (base^index) and
- // less than or equal to (base^(index+1)).
- //
- // The positive and negative ranges of the histogram are expressed
- // separately. Negative values are mapped by their absolute value
- // into the negative range using the same scale as the positive range.
- //
- // scale is not restricted by the protocol, as the permissible
- // values depend on the range of the data.
- Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
- // zero_count is the count of values that are either exactly zero or
- // within the region considered zero by the instrumentation at the
- // tolerated degree of precision. This bucket stores values that
- // cannot be expressed using the standard exponential formula as
- // well as values that have been rounded to zero.
- //
- // Implementations MAY consider the zero bucket to have probability
- // mass equal to (zero_count / count).
- ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
- // positive carries the positive range of exponential bucket counts.
- Positive ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive"`
- // negative carries the negative range of exponential bucket counts.
- Negative ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- Exemplars []Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars"`
- // min is the minimum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Min_:
- // *ExponentialHistogramDataPoint_Min
- Min_ isExponentialHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
- // max is the maximum value over (start_time, end_time].
- //
- // Types that are valid to be assigned to Max_:
- // *ExponentialHistogramDataPoint_Max
- Max_ isExponentialHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
- // ZeroThreshold may be optionally set to convey the width of the zero
- // region. Where the zero region is defined as the closed interval
- // [-ZeroThreshold, ZeroThreshold].
- // When ZeroThreshold is 0, zero count bucket stores values that cannot be
- // expressed using the standard exponential formula as well as values that
- // have been rounded to zero.
- ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
-}
-
-func (m *ExponentialHistogramDataPoint) Reset() { *m = ExponentialHistogramDataPoint{} }
-func (m *ExponentialHistogramDataPoint) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogramDataPoint) ProtoMessage() {}
-func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{11}
-}
-func (m *ExponentialHistogramDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogramDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogramDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogramDataPoint.Merge(m, src)
-}
-func (m *ExponentialHistogramDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogramDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogramDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogramDataPoint proto.InternalMessageInfo
-
-type isExponentialHistogramDataPoint_Sum_ interface {
- isExponentialHistogramDataPoint_Sum_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isExponentialHistogramDataPoint_Min_ interface {
- isExponentialHistogramDataPoint_Min_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isExponentialHistogramDataPoint_Max_ interface {
- isExponentialHistogramDataPoint_Max_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type ExponentialHistogramDataPoint_Sum struct {
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
-}
-type ExponentialHistogramDataPoint_Min struct {
- Min float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
-}
-type ExponentialHistogramDataPoint_Max struct {
- Max float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
-}
-
-func (*ExponentialHistogramDataPoint_Sum) isExponentialHistogramDataPoint_Sum_() {}
-func (*ExponentialHistogramDataPoint_Min) isExponentialHistogramDataPoint_Min_() {}
-func (*ExponentialHistogramDataPoint_Max) isExponentialHistogramDataPoint_Max_() {}
-
-func (m *ExponentialHistogramDataPoint) GetSum_() isExponentialHistogramDataPoint_Sum_ {
- if m != nil {
- return m.Sum_
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) GetMin_() isExponentialHistogramDataPoint_Min_ {
- if m != nil {
- return m.Min_
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) GetMax_() isExponentialHistogramDataPoint_Max_ {
- if m != nil {
- return m.Max_
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetSum() float64 {
- if x, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
- return x.Sum
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetScale() int32 {
- if m != nil {
- return m.Scale
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetZeroCount() uint64 {
- if m != nil {
- return m.ZeroCount
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetPositive() ExponentialHistogramDataPoint_Buckets {
- if m != nil {
- return m.Positive
- }
- return ExponentialHistogramDataPoint_Buckets{}
-}
-
-func (m *ExponentialHistogramDataPoint) GetNegative() ExponentialHistogramDataPoint_Buckets {
- if m != nil {
- return m.Negative
- }
- return ExponentialHistogramDataPoint_Buckets{}
-}
-
-func (m *ExponentialHistogramDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetExemplars() []Exemplar {
- if m != nil {
- return m.Exemplars
- }
- return nil
-}
-
-func (m *ExponentialHistogramDataPoint) GetMin() float64 {
- if x, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
- return x.Min
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetMax() float64 {
- if x, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
- return x.Max
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint) GetZeroThreshold() float64 {
- if m != nil {
- return m.ZeroThreshold
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ExponentialHistogramDataPoint) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ExponentialHistogramDataPoint_Sum)(nil),
- (*ExponentialHistogramDataPoint_Min)(nil),
- (*ExponentialHistogramDataPoint_Max)(nil),
- }
-}
-
-// Buckets are a set of bucket counts, encoded in a contiguous array
-// of counts.
-type ExponentialHistogramDataPoint_Buckets struct {
- // Offset is the bucket index of the first entry in the bucket_counts array.
- //
- // Note: This uses a varint encoding as a simple form of compression.
- Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
- // bucket_counts is an array of count values, where bucket_counts[i] carries
- // the count of the bucket at index (offset+i). bucket_counts[i] is the count
- // of values greater than base^(offset+i) and less than or equal to
- // base^(offset+i+1).
- //
- // Note: By contrast, the explicit HistogramDataPoint uses
- // fixed64. This field is expected to have many buckets,
- // especially zeros, so uint64 has been selected to ensure
- // varint encoding.
- BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) Reset() { *m = ExponentialHistogramDataPoint_Buckets{} }
-func (m *ExponentialHistogramDataPoint_Buckets) String() string { return proto.CompactTextString(m) }
-func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {}
-func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{11, 0}
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Merge(m, src)
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_Size() int {
- return m.Size()
-}
-func (m *ExponentialHistogramDataPoint_Buckets) XXX_DiscardUnknown() {
- xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExponentialHistogramDataPoint_Buckets proto.InternalMessageInfo
-
-func (m *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 {
- if m != nil {
- return m.Offset
- }
- return 0
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 {
- if m != nil {
- return m.BucketCounts
- }
- return nil
-}
-
-// SummaryDataPoint is a single data point in a timeseries that describes the
-// time-varying values of a Summary metric. The count and sum fields represent
-// cumulative values.
-type SummaryDataPoint struct {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be non-negative.
- Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
- Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
- // (Optional) list of values at different quantiles of the distribution calculated
- // from the current snapshot. The quantiles must be strictly increasing.
- QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *SummaryDataPoint) Reset() { *m = SummaryDataPoint{} }
-func (m *SummaryDataPoint) String() string { return proto.CompactTextString(m) }
-func (*SummaryDataPoint) ProtoMessage() {}
-func (*SummaryDataPoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{12}
-}
-func (m *SummaryDataPoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SummaryDataPoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SummaryDataPoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryDataPoint.Merge(m, src)
-}
-func (m *SummaryDataPoint) XXX_Size() int {
- return m.Size()
-}
-func (m *SummaryDataPoint) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryDataPoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryDataPoint proto.InternalMessageInfo
-
-func (m *SummaryDataPoint) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *SummaryDataPoint) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetCount() uint64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetSum() float64 {
- if m != nil {
- return m.Sum
- }
- return 0
-}
-
-func (m *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile {
- if m != nil {
- return m.QuantileValues
- }
- return nil
-}
-
-func (m *SummaryDataPoint) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// Represents the value at a given quantile of a distribution.
-//
-// To record Min and Max values following conventions are used:
-// - The 1.0 quantile is equivalent to the maximum value observed.
-// - The 0.0 quantile is equivalent to the minimum value observed.
-//
-// See the following issue for more context:
-// https://github.com/open-telemetry/opentelemetry-proto/issues/125
-type SummaryDataPoint_ValueAtQuantile struct {
- // The quantile of a distribution. Must be in the interval
- // [0.0, 1.0].
- Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
- // The value at the given quantile of a distribution.
- //
- // Quantile values must NOT be negative.
- Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Reset() { *m = SummaryDataPoint_ValueAtQuantile{} }
-func (m *SummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) }
-func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {}
-func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{12, 0}
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Merge(m, src)
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_Size() int {
- return m.Size()
-}
-func (m *SummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo
-
-func (m *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 {
- if m != nil {
- return m.Quantile
- }
- return 0
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// A representation of an exemplar, which is a sample input measurement.
-// Exemplars also hold information about the environment when the measurement
-// was recorded, for example the span and trace ID of the active span when the
-// exemplar was recorded.
-type Exemplar struct {
- // The set of key/value pairs that were filtered out by the aggregator, but
- // recorded alongside the original measurement. Only key/value pairs that were
- // filtered out by the aggregator should be included
- FilteredAttributes []v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes"`
- // time_unix_nano is the exact time when this exemplar was recorded
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // The value of the measurement that was recorded. An exemplar is
- // considered invalid when one of the recognized value fields is not present
- // inside this oneof.
- //
- // Types that are valid to be assigned to Value:
- // *Exemplar_AsDouble
- // *Exemplar_AsInt
- Value isExemplar_Value `protobuf_oneof:"value"`
- // (Optional) Span ID of the exemplar trace.
- // span_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // (Optional) Trace ID of the exemplar trace.
- // trace_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
-}
-
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_3c3112f9fa006917, []int{13}
-}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
- return m.Size()
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-type isExemplar_Value interface {
- isExemplar_Value()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Exemplar_AsDouble struct {
- AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
-}
-type Exemplar_AsInt struct {
- AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
-}
-
-func (*Exemplar_AsDouble) isExemplar_Value() {}
-func (*Exemplar_AsInt) isExemplar_Value() {}
-
-func (m *Exemplar) GetValue() isExemplar_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Exemplar) GetFilteredAttributes() []v11.KeyValue {
- if m != nil {
- return m.FilteredAttributes
- }
- return nil
-}
-
-func (m *Exemplar) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *Exemplar) GetAsDouble() float64 {
- if x, ok := m.GetValue().(*Exemplar_AsDouble); ok {
- return x.AsDouble
- }
- return 0
-}
-
-func (m *Exemplar) GetAsInt() int64 {
- if x, ok := m.GetValue().(*Exemplar_AsInt); ok {
- return x.AsInt
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Exemplar) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Exemplar_AsDouble)(nil),
- (*Exemplar_AsInt)(nil),
- }
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
- proto.RegisterEnum("opentelemetry.proto.metrics.v1.DataPointFlags", DataPointFlags_name, DataPointFlags_value)
- proto.RegisterType((*MetricsData)(nil), "opentelemetry.proto.metrics.v1.MetricsData")
- proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics")
- proto.RegisterType((*ScopeMetrics)(nil), "opentelemetry.proto.metrics.v1.ScopeMetrics")
- proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric")
- proto.RegisterType((*Gauge)(nil), "opentelemetry.proto.metrics.v1.Gauge")
- proto.RegisterType((*Sum)(nil), "opentelemetry.proto.metrics.v1.Sum")
- proto.RegisterType((*Histogram)(nil), "opentelemetry.proto.metrics.v1.Histogram")
- proto.RegisterType((*ExponentialHistogram)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogram")
- proto.RegisterType((*Summary)(nil), "opentelemetry.proto.metrics.v1.Summary")
- proto.RegisterType((*NumberDataPoint)(nil), "opentelemetry.proto.metrics.v1.NumberDataPoint")
- proto.RegisterType((*HistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.HistogramDataPoint")
- proto.RegisterType((*ExponentialHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint")
- proto.RegisterType((*ExponentialHistogramDataPoint_Buckets)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets")
- proto.RegisterType((*SummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint")
- proto.RegisterType((*SummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile")
- proto.RegisterType((*Exemplar)(nil), "opentelemetry.proto.metrics.v1.Exemplar")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917)
-}
-
-var fileDescriptor_3c3112f9fa006917 = []byte{
- // 1568 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x4f, 0x1b, 0x49,
- 0x16, 0x77, 0xfb, 0xdb, 0xcf, 0x06, 0x9c, 0x5a, 0x96, 0xb4, 0x58, 0xe1, 0x38, 0xce, 0x26, 0xb0,
- 0xd9, 0xc8, 0x5e, 0xc8, 0x6a, 0x3f, 0x0e, 0x91, 0x62, 0x63, 0x03, 0x26, 0x80, 0x49, 0xd9, 0x20,
- 0x25, 0x8a, 0xd2, 0x2a, 0xec, 0xc2, 0xb4, 0xd2, 0xdd, 0xe5, 0xed, 0xae, 0x46, 0xb0, 0xff, 0xc1,
- 0x4a, 0x7b, 0xc8, 0xdf, 0xb1, 0xca, 0x6d, 0x4f, 0x73, 0x9b, 0x63, 0x8e, 0x99, 0xdb, 0x68, 0x34,
- 0x8a, 0x46, 0xe4, 0x30, 0x23, 0xcd, 0x3f, 0x31, 0xaa, 0xea, 0x6e, 0xfc, 0x81, 0x89, 0xc9, 0xc7,
- 0x21, 0x39, 0xb9, 0xea, 0xd5, 0x7b, 0xbf, 0x7a, 0xaf, 0xde, 0xef, 0xd5, 0x2b, 0x37, 0xdc, 0x63,
- 0x3d, 0x6a, 0x71, 0x6a, 0x50, 0x93, 0x72, 0xfb, 0xb4, 0xd4, 0xb3, 0x19, 0x67, 0x25, 0x31, 0xd6,
- 0xdb, 0x4e, 0xe9, 0x78, 0x39, 0x18, 0x16, 0xe5, 0x02, 0xca, 0x0d, 0x69, 0x7b, 0xc2, 0x62, 0xa0,
- 0x72, 0xbc, 0x3c, 0x3f, 0xdb, 0x65, 0x5d, 0xe6, 0x61, 0x88, 0x91, 0xa7, 0x30, 0x7f, 0x77, 0xdc,
- 0x1e, 0x6d, 0x66, 0x9a, 0xcc, 0x12, 0x5b, 0x78, 0x23, 0x5f, 0xb7, 0x38, 0x4e, 0xd7, 0xa6, 0x0e,
- 0x73, 0xed, 0x36, 0x15, 0xda, 0xc1, 0xd8, 0xd3, 0x2f, 0xe8, 0x90, 0xde, 0xf6, 0xf6, 0xaf, 0x12,
- 0x4e, 0xd0, 0x53, 0xc8, 0x06, 0x0a, 0x9a, 0xef, 0x97, 0xaa, 0xe4, 0x23, 0x4b, 0xe9, 0x95, 0x52,
- 0xf1, 0xfd, 0xbe, 0x17, 0xb1, 0x6f, 0xe7, 0xc3, 0xe1, 0x19, 0x7b, 0x58, 0x50, 0xf8, 0x26, 0x0c,
- 0x33, 0x23, 0x4a, 0xa8, 0x0b, 0x6a, 0x87, 0xf6, 0x6c, 0xda, 0x26, 0x9c, 0x76, 0x34, 0xa7, 0xcd,
- 0x7a, 0xfd, 0x7d, 0x7f, 0x49, 0xc8, 0x8d, 0xef, 0x4d, 0xda, 0xb8, 0x29, 0xac, 0x82, 0x5d, 0xe7,
- 0xfa, 0x70, 0x83, 0x72, 0xf4, 0x08, 0x92, 0x81, 0x3f, 0xaa, 0x92, 0x57, 0x96, 0xd2, 0x2b, 0x7f,
- 0x1a, 0x8b, 0x7b, 0x7e, 0x3c, 0x03, 0x11, 0x55, 0xa2, 0xaf, 0xdf, 0xde, 0x08, 0xe1, 0x73, 0x00,
- 0xf4, 0x18, 0xa6, 0x86, 0x5d, 0x0d, 0x7f, 0x84, 0xa7, 0x19, 0x67, 0xd0, 0xbf, 0x05, 0x00, 0xa7,
- 0x7d, 0x44, 0x4d, 0xa2, 0xb9, 0xb6, 0xa1, 0x46, 0xf2, 0xca, 0x52, 0x0a, 0xa7, 0x3c, 0xc9, 0x9e,
- 0x6d, 0x14, 0xbe, 0x55, 0x20, 0x33, 0x14, 0x4f, 0x03, 0x62, 0xd2, 0xde, 0x0f, 0xe6, 0xfe, 0xd8,
- 0xad, 0x7d, 0x66, 0x1c, 0x2f, 0x17, 0xeb, 0x96, 0xc3, 0x6d, 0xd7, 0xa4, 0x16, 0x27, 0x5c, 0x67,
- 0x96, 0x84, 0xf2, 0xc3, 0xf2, 0x70, 0xd0, 0x43, 0x48, 0x0c, 0x47, 0x73, 0x67, 0x52, 0x34, 0x9e,
- 0x2b, 0x38, 0x30, 0x9b, 0x14, 0xc2, 0xab, 0x28, 0xc4, 0x3d, 0x13, 0x84, 0x20, 0x6a, 0x11, 0xd3,
- 0xf3, 0x3d, 0x85, 0xe5, 0x18, 0xe5, 0x21, 0xdd, 0xa1, 0x4e, 0xdb, 0xd6, 0x7b, 0xc2, 0x41, 0x35,
- 0x2c, 0x97, 0x06, 0x45, 0xc2, 0xca, 0xb5, 0x74, 0xee, 0x23, 0xcb, 0x31, 0x7a, 0x00, 0xb1, 0x2e,
- 0x71, 0xbb, 0x54, 0x8d, 0xc9, 0x63, 0xb8, 0x3d, 0xc9, 0xe7, 0x75, 0xa1, 0xbc, 0x11, 0xc2, 0x9e,
- 0x15, 0xfa, 0x3b, 0x44, 0x1c, 0xd7, 0x54, 0x13, 0xd2, 0xf8, 0xd6, 0xc4, 0xf4, 0xb9, 0xe6, 0x46,
- 0x08, 0x0b, 0x0b, 0x54, 0x87, 0xd4, 0x91, 0xee, 0x70, 0xd6, 0xb5, 0x89, 0xa9, 0xa6, 0xde, 0xc3,
- 0xa7, 0x01, 0xf3, 0x8d, 0xc0, 0x60, 0x23, 0x84, 0xfb, 0xd6, 0xe8, 0x05, 0xfc, 0x9e, 0x9e, 0xf4,
- 0x98, 0x45, 0x2d, 0xae, 0x13, 0x43, 0xeb, 0xc3, 0x82, 0x84, 0xfd, 0xeb, 0x24, 0xd8, 0x5a, 0xdf,
- 0x78, 0x70, 0x87, 0x59, 0x3a, 0x46, 0x8e, 0x56, 0x21, 0xe1, 0xb8, 0xa6, 0x49, 0xec, 0x53, 0x35,
- 0x2d, 0xe1, 0x17, 0xaf, 0x10, 0xb4, 0x50, 0xdf, 0x08, 0xe1, 0xc0, 0x12, 0xd5, 0x21, 0x69, 0x52,
- 0x4e, 0x3a, 0x84, 0x13, 0x35, 0x23, 0xb9, 0xb2, 0x38, 0x81, 0x7e, 0x8f, 0xe8, 0xe9, 0x3e, 0x31,
- 0xdc, 0xf3, 0x4a, 0x0a, 0xcc, 0x2b, 0x71, 0x88, 0x8a, 0xdf, 0xcd, 0x68, 0x32, 0x9a, 0x8d, 0x6d,
- 0x46, 0x93, 0xf1, 0x6c, 0x62, 0x33, 0x9a, 0x4c, 0x66, 0x53, 0x85, 0x27, 0x10, 0x93, 0xc9, 0x42,
- 0xbb, 0x90, 0x16, 0x2a, 0x5a, 0x8f, 0xe9, 0x16, 0xbf, 0xf2, 0x6d, 0xb4, 0xe3, 0x9a, 0x07, 0xd4,
- 0x16, 0x77, 0xda, 0xae, 0xb0, 0xc3, 0xd0, 0x09, 0x86, 0x4e, 0xe1, 0x57, 0x05, 0x22, 0x4d, 0xd7,
- 0xfc, 0xfc, 0xc8, 0x88, 0xc1, 0x75, 0xd2, 0xed, 0xda, 0xb4, 0x2b, 0xab, 0x4c, 0xe3, 0xd4, 0xec,
- 0x31, 0x9b, 0x18, 0x3a, 0x3f, 0x95, 0x84, 0x9e, 0x5e, 0xf9, 0xdb, 0x24, 0xf4, 0x72, 0xdf, 0xbc,
- 0xd5, 0xb7, 0xc6, 0x73, 0x64, 0xac, 0x1c, 0xdd, 0x84, 0x8c, 0xee, 0x68, 0x26, 0xb3, 0x18, 0x67,
- 0x96, 0xde, 0x96, 0xb5, 0x91, 0xc4, 0x69, 0xdd, 0xd9, 0x0e, 0x44, 0x85, 0xef, 0x14, 0x48, 0xf5,
- 0x09, 0xd0, 0x1c, 0x17, 0xf3, 0xca, 0x95, 0xa9, 0xfb, 0x65, 0x84, 0x5d, 0xf8, 0x59, 0x81, 0xd9,
- 0x71, 0xbc, 0x47, 0xcf, 0xc7, 0x85, 0xf7, 0xe0, 0x63, 0x4a, 0xe8, 0x0b, 0x89, 0xf4, 0x19, 0x24,
- 0xfc, 0x0a, 0x44, 0x8f, 0xc7, 0xc5, 0xf6, 0x97, 0x2b, 0xd6, 0xef, 0xf8, 0x4a, 0x38, 0x0b, 0xc3,
- 0xcc, 0x08, 0x9f, 0xd1, 0x36, 0x00, 0xe1, 0xdc, 0xd6, 0x0f, 0x5c, 0x4e, 0x1d, 0x35, 0xf1, 0x31,
- 0xf5, 0x3d, 0x00, 0x80, 0x4a, 0x30, 0xeb, 0x70, 0x62, 0x73, 0x8d, 0xeb, 0x26, 0xd5, 0x5c, 0x4b,
- 0x3f, 0xd1, 0x2c, 0x62, 0x31, 0x79, 0x5c, 0x71, 0x7c, 0x4d, 0xae, 0xb5, 0x74, 0x93, 0xee, 0x59,
- 0xfa, 0xc9, 0x0e, 0xb1, 0x18, 0xfa, 0x23, 0x4c, 0x8f, 0xa8, 0x46, 0xa4, 0x6a, 0x86, 0x0f, 0x6a,
- 0x2d, 0x40, 0x8a, 0x38, 0x5a, 0x87, 0xb9, 0x07, 0x06, 0x55, 0xa3, 0x79, 0x65, 0x49, 0xd9, 0x08,
- 0xe1, 0x24, 0x71, 0xaa, 0x52, 0x82, 0xae, 0x43, 0x9c, 0x38, 0x9a, 0x6e, 0x71, 0x35, 0x9e, 0x57,
- 0x96, 0xb2, 0xe2, 0xc6, 0x27, 0x4e, 0xdd, 0xe2, 0x68, 0x0b, 0x52, 0xf4, 0x84, 0x9a, 0x3d, 0x83,
- 0xd8, 0x8e, 0x1a, 0x93, 0xc1, 0x2d, 0x4d, 0xa6, 0x87, 0x67, 0xe0, 0x47, 0xd7, 0x07, 0x40, 0xb3,
- 0x10, 0x3b, 0x34, 0x48, 0xd7, 0x51, 0x93, 0x79, 0x65, 0x69, 0x0a, 0x7b, 0x93, 0x4a, 0x02, 0x62,
- 0xc7, 0xe2, 0x34, 0x36, 0xa3, 0x49, 0x25, 0x1b, 0x2e, 0xfc, 0x18, 0x01, 0x74, 0x91, 0x56, 0x23,
- 0xe7, 0x9c, 0xfa, 0x42, 0xcf, 0x79, 0x16, 0x62, 0x6d, 0xe6, 0x5a, 0x5c, 0x9e, 0x71, 0x1c, 0x7b,
- 0x13, 0x84, 0xbc, 0xbe, 0x19, 0xf3, 0xcf, 0x5d, 0xb6, 0xc4, 0x5b, 0x30, 0x75, 0xe0, 0xb6, 0x5f,
- 0x50, 0xae, 0x49, 0x1d, 0x47, 0x8d, 0xe7, 0x23, 0x02, 0xce, 0x13, 0xae, 0x4a, 0x19, 0x5a, 0x84,
- 0x19, 0x7a, 0xd2, 0x33, 0xf4, 0xb6, 0xce, 0xb5, 0x03, 0xe6, 0x5a, 0x1d, 0x8f, 0x61, 0x0a, 0x9e,
- 0x0e, 0xc4, 0x15, 0x29, 0x1d, 0xce, 0x53, 0xf2, 0xb3, 0xe5, 0x09, 0x06, 0xf2, 0x24, 0xa2, 0x30,
- 0x75, 0x4b, 0x36, 0x42, 0x65, 0x43, 0xc1, 0x62, 0x22, 0x65, 0xe4, 0x44, 0xcd, 0x48, 0x59, 0x18,
- 0x8b, 0x89, 0x68, 0x52, 0x8e, 0x6b, 0x6a, 0xe2, 0xd7, 0xd4, 0x2d, 0xef, 0x97, 0x9c, 0x68, 0x7e,
- 0x7a, 0xff, 0x13, 0x87, 0x85, 0xf7, 0x5e, 0x20, 0x23, 0x99, 0x56, 0xbe, 0xfa, 0x4c, 0xcf, 0x8a,
- 0xb7, 0x27, 0x31, 0xa8, 0xac, 0xad, 0x6b, 0xd8, 0x9b, 0x88, 0xe7, 0xdf, 0xbf, 0xa9, 0xcd, 0xbc,
- 0xec, 0xcb, 0x27, 0x55, 0x1c, 0xa7, 0x84, 0x44, 0xa6, 0x1e, 0x75, 0x21, 0xd9, 0x63, 0x8e, 0xce,
- 0xf5, 0x63, 0x2a, 0xab, 0x25, 0xbd, 0x52, 0xfb, 0xa4, 0x6b, 0xb9, 0x58, 0x91, 0xbc, 0x72, 0x82,
- 0x27, 0x45, 0x00, 0x2e, 0x36, 0xb2, 0xe4, 0x45, 0x7a, 0x4c, 0xfd, 0x97, 0xd9, 0xe7, 0xdd, 0x28,
- 0x00, 0xbf, 0x84, 0x54, 0x43, 0xc4, 0x4d, 0x7f, 0x2a, 0x71, 0x7d, 0x8a, 0x66, 0xc6, 0x50, 0x74,
- 0x6a, 0x80, 0xa2, 0xe8, 0x36, 0x4c, 0xcb, 0xc3, 0xe7, 0x47, 0x36, 0x75, 0x8e, 0x98, 0xd1, 0x51,
- 0xa7, 0xc5, 0x32, 0x9e, 0x12, 0xd2, 0x56, 0x20, 0x9c, 0x5f, 0x83, 0x84, 0x1f, 0x0d, 0x9a, 0x83,
- 0x38, 0x3b, 0x3c, 0x74, 0x28, 0x97, 0xaf, 0xf0, 0x6b, 0xd8, 0x9f, 0x5d, 0x2c, 0x63, 0xf1, 0x6f,
- 0x20, 0x3a, 0x5c, 0xc6, 0x97, 0x55, 0x44, 0xe1, 0x55, 0x04, 0xb2, 0xa3, 0x0d, 0xe7, 0x2b, 0x69,
- 0x28, 0xe3, 0xe9, 0x9f, 0x1d, 0xa0, 0xbf, 0x47, 0x7e, 0x1d, 0x66, 0xfe, 0xe5, 0x12, 0x8b, 0xeb,
- 0x06, 0xd5, 0xe4, 0x2d, 0xef, 0x5d, 0x74, 0xe9, 0x95, 0x87, 0x1f, 0xda, 0x89, 0x8b, 0x32, 0xc2,
- 0x32, 0x7f, 0xec, 0xc3, 0xe1, 0xe9, 0x00, 0x58, 0x2e, 0x5c, 0xd2, 0x5d, 0xe6, 0x57, 0x61, 0x66,
- 0xc4, 0x10, 0xcd, 0x43, 0x32, 0x30, 0x95, 0xd9, 0x54, 0xf0, 0xf9, 0x5c, 0x80, 0x48, 0x37, 0xe5,
- 0xf9, 0x28, 0x78, 0xa8, 0x33, 0xbd, 0x8c, 0x40, 0x32, 0xe0, 0x1e, 0x7a, 0x0e, 0xbf, 0x3b, 0xd4,
- 0x0d, 0x4e, 0x6d, 0xda, 0xd1, 0x3e, 0x35, 0x5f, 0x28, 0x40, 0x2a, 0xf7, 0xf3, 0x76, 0x31, 0x0d,
- 0xe1, 0x49, 0x7d, 0x3d, 0x72, 0xf5, 0xbe, 0xfe, 0x04, 0x12, 0x4e, 0x8f, 0x58, 0x9a, 0xde, 0x91,
- 0x09, 0xcc, 0x54, 0x1e, 0x0a, 0x47, 0x7e, 0x78, 0x7b, 0xe3, 0x1f, 0x5d, 0x36, 0xe2, 0xbb, 0xce,
- 0x4a, 0x6d, 0x66, 0x18, 0xb4, 0xcd, 0x99, 0x5d, 0xea, 0x89, 0xd7, 0x50, 0x49, 0xb7, 0x38, 0xb5,
- 0x2d, 0x62, 0x94, 0xc4, 0xac, 0xd8, 0xec, 0x11, 0xab, 0x5e, 0xc5, 0x71, 0x01, 0x58, 0xef, 0xa0,
- 0x67, 0x90, 0xe4, 0x36, 0x69, 0x53, 0x81, 0x1d, 0x93, 0xd8, 0x65, 0x1f, 0xfb, 0x9f, 0x1f, 0x8e,
- 0xdd, 0x12, 0x48, 0xf5, 0x2a, 0x4e, 0x48, 0xc8, 0x7a, 0x67, 0xe4, 0xb1, 0x70, 0xf7, 0xbf, 0x0a,
- 0xcc, 0x8d, 0x7f, 0x22, 0xa2, 0x45, 0xb8, 0x55, 0x5e, 0x5f, 0xc7, 0xb5, 0xf5, 0x72, 0xab, 0xde,
- 0xd8, 0xd1, 0x5a, 0xb5, 0xed, 0xdd, 0x06, 0x2e, 0x6f, 0xd5, 0x5b, 0x4f, 0xb4, 0xbd, 0x9d, 0xe6,
- 0x6e, 0x6d, 0xb5, 0xbe, 0x56, 0xaf, 0x55, 0xb3, 0x21, 0x74, 0x13, 0x16, 0x2e, 0x53, 0xac, 0xd6,
- 0xb6, 0x5a, 0xe5, 0xac, 0x82, 0xee, 0x40, 0xe1, 0x32, 0x95, 0xd5, 0xbd, 0xed, 0xbd, 0xad, 0x72,
- 0xab, 0xbe, 0x5f, 0xcb, 0x86, 0xef, 0x3e, 0x87, 0xe9, 0x73, 0xbe, 0xae, 0xc9, 0xfb, 0xed, 0x06,
- 0xfc, 0xa1, 0x5a, 0x6e, 0x95, 0xb5, 0xdd, 0x46, 0x7d, 0xa7, 0xa5, 0xad, 0x6d, 0x95, 0xd7, 0x9b,
- 0x5a, 0xb5, 0xa1, 0xed, 0x34, 0x5a, 0xda, 0x5e, 0xb3, 0x96, 0x0d, 0xa1, 0x3f, 0xc3, 0xe2, 0x05,
- 0x85, 0x9d, 0x86, 0x86, 0x6b, 0xab, 0x0d, 0x5c, 0xad, 0x55, 0xb5, 0xfd, 0xf2, 0xd6, 0x5e, 0x4d,
- 0xdb, 0x2e, 0x37, 0x1f, 0x65, 0x95, 0xca, 0xff, 0x95, 0xd7, 0x67, 0x39, 0xe5, 0xcd, 0x59, 0x4e,
- 0xf9, 0xe9, 0x2c, 0xa7, 0xbc, 0x7c, 0x97, 0x0b, 0xbd, 0x79, 0x97, 0x0b, 0x7d, 0xff, 0x2e, 0x17,
- 0x82, 0x9b, 0x3a, 0x9b, 0x50, 0x51, 0x95, 0x8c, 0xff, 0x35, 0x64, 0x57, 0x2c, 0xec, 0x2a, 0x4f,
- 0x6b, 0x1f, 0x9c, 0x0f, 0xef, 0x03, 0x59, 0x97, 0x5a, 0x03, 0xdf, 0xec, 0xfe, 0x17, 0xce, 0x35,
- 0x7a, 0xd4, 0x6a, 0x9d, 0x83, 0x48, 0x78, 0xff, 0x73, 0x87, 0x53, 0xdc, 0x5f, 0x3e, 0x88, 0x4b,
- 0xab, 0xfb, 0xbf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x00, 0xa3, 0x78, 0x2c, 0xfd, 0x13, 0x00, 0x00,
-}
-
-func (m *MetricsData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MetricsData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MetricsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeMetrics) > 0 {
- for iNdEx := len(m.DeprecatedScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeMetrics) > 0 {
- for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeMetrics) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeMetrics) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Metrics) > 0 {
- for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Metric) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Metadata) > 0 {
- for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x62
- }
- }
- if m.Data != nil {
- {
- size := m.Data.Size()
- i -= size
- if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Unit) > 0 {
- i -= len(m.Unit)
- copy(dAtA[i:], m.Unit)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Description) > 0 {
- i -= len(m.Description)
- copy(dAtA[i:], m.Description)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Metric_Gauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Gauge != nil {
- {
- size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Sum != nil {
- {
- size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Histogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Histogram != nil {
- {
- size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ExponentialHistogram != nil {
- {
- size, err := m.ExponentialHistogram.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- }
- return len(dAtA) - i, nil
-}
-func (m *Metric_Summary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metric_Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Summary != nil {
- {
- size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- return len(dAtA) - i, nil
-}
-func (m *Gauge) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Gauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sum) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.IsMonotonic {
- i--
- if m.IsMonotonic {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Histogram) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExponentialHistogram) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x10
- }
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Summary) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Summary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *NumberDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NumberDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x40
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *NumberDataPoint_AsDouble) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
- i--
- dAtA[i] = 0x21
- return len(dAtA) - i, nil
-}
-func (m *NumberDataPoint_AsInt) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NumberDataPoint_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
- i--
- dAtA[i] = 0x31
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Max_ != nil {
- {
- size := m.Max_.Size()
- i -= size
- if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Min_ != nil {
- {
- size := m.Min_.Size()
- i -= size
- if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- }
- if len(m.ExplicitBounds) > 0 {
- for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- {
- f8 := math.Float64bits(float64(m.ExplicitBounds[iNdEx]))
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f8))
- }
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8))
- i--
- dAtA[i] = 0x3a
- }
- if len(m.BucketCounts) > 0 {
- for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx]))
- }
- i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8))
- i--
- dAtA[i] = 0x32
- }
- if m.Sum_ != nil {
- {
- size := m.Sum_.Size()
- i -= size
- if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
- i--
- dAtA[i] = 0x59
- return len(dAtA) - i, nil
-}
-func (m *HistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
- i--
- dAtA[i] = 0x61
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ZeroThreshold != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
- i--
- dAtA[i] = 0x71
- }
- if m.Max_ != nil {
- {
- size := m.Max_.Size()
- i -= size
- if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Min_ != nil {
- {
- size := m.Min_.Size()
- i -= size
- if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x50
- }
- {
- size, err := m.Negative.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- {
- size, err := m.Positive.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- if m.ZeroCount != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount))
- i--
- dAtA[i] = 0x39
- }
- if m.Scale != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31))))
- i--
- dAtA[i] = 0x30
- }
- if m.Sum_ != nil {
- {
- size := m.Sum_.Size()
- i -= size
- if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
- i--
- dAtA[i] = 0x61
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
- i--
- dAtA[i] = 0x69
- return len(dAtA) - i, nil
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.BucketCounts) > 0 {
- dAtA12 := make([]byte, len(m.BucketCounts)*10)
- var j11 int
- for _, num := range m.BucketCounts {
- for num >= 1<<7 {
- dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j11++
- }
- dAtA12[j11] = uint8(num)
- j11++
- }
- i -= j11
- copy(dAtA[i:], dAtA12[:j11])
- i = encodeVarintMetrics(dAtA, i, uint64(j11))
- i--
- dAtA[i] = 0x12
- }
- if m.Offset != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SummaryDataPoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SummaryDataPoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
- i--
- dAtA[i] = 0x40
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.QuantileValues) > 0 {
- for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if m.Sum != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x29
- }
- if m.Count != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
- i--
- dAtA[i] = 0x21
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x19
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Value != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
- i--
- dAtA[i] = 0x11
- }
- if m.Quantile != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile))))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Exemplar) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.FilteredAttributes) > 0 {
- for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if m.Value != nil {
- {
- size := m.Value.Size()
- i -= size
- if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintMetrics(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x11
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Exemplar_AsDouble) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
- i--
- dAtA[i] = 0x19
- return len(dAtA) - i, nil
-}
-func (m *Exemplar_AsInt) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Exemplar_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
- i--
- dAtA[i] = 0x31
- return len(dAtA) - i, nil
-}
-func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
- offset -= sovMetrics(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *MetricsData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceMetrics) > 0 {
- for _, e := range m.ResourceMetrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceMetrics) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.ScopeMetrics) > 0 {
- for _, e := range m.ScopeMetrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if len(m.DeprecatedScopeMetrics) > 0 {
- for _, e := range m.DeprecatedScopeMetrics {
- l = e.Size()
- n += 2 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeMetrics) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.Metrics) > 0 {
- for _, e := range m.Metrics {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-
-func (m *Metric) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Description)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- l = len(m.Unit)
- if l > 0 {
- n += 1 + l + sovMetrics(uint64(l))
- }
- if m.Data != nil {
- n += m.Data.Size()
- }
- if len(m.Metadata) > 0 {
- for _, e := range m.Metadata {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Metric_Gauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Gauge != nil {
- l = m.Gauge.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Sum != nil {
- l = m.Sum.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Histogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Histogram != nil {
- l = m.Histogram.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_ExponentialHistogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ExponentialHistogram != nil {
- l = m.ExponentialHistogram.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Metric_Summary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Summary != nil {
- l = m.Summary.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- return n
-}
-func (m *Gauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- if m.IsMonotonic {
- n += 2
- }
- return n
-}
-
-func (m *Histogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *ExponentialHistogram) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovMetrics(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *Summary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DataPoints) > 0 {
- for _, e := range m.DataPoints {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *NumberDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Value != nil {
- n += m.Value.Size()
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- return n
-}
-
-func (m *NumberDataPoint_AsDouble) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *NumberDataPoint_AsInt) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum_ != nil {
- n += m.Sum_.Size()
- }
- if len(m.BucketCounts) > 0 {
- n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8
- }
- if len(m.ExplicitBounds) > 0 {
- n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- if m.Min_ != nil {
- n += m.Min_.Size()
- }
- if m.Max_ != nil {
- n += m.Max_.Size()
- }
- return n
-}
-
-func (m *HistogramDataPoint_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint_Min) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *HistogramDataPoint_Max) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum_ != nil {
- n += m.Sum_.Size()
- }
- if m.Scale != 0 {
- n += 1 + sozMetrics(uint64(m.Scale))
- }
- if m.ZeroCount != 0 {
- n += 9
- }
- l = m.Positive.Size()
- n += 1 + l + sovMetrics(uint64(l))
- l = m.Negative.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Min_ != nil {
- n += m.Min_.Size()
- }
- if m.Max_ != nil {
- n += m.Max_.Size()
- }
- if m.ZeroThreshold != 0 {
- n += 9
- }
- return n
-}
-
-func (m *ExponentialHistogramDataPoint_Sum) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Min) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Max) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Offset != 0 {
- n += 1 + sozMetrics(uint64(m.Offset))
- }
- if len(m.BucketCounts) > 0 {
- l = 0
- for _, e := range m.BucketCounts {
- l += sovMetrics(uint64(e))
- }
- n += 1 + sovMetrics(uint64(l)) + l
- }
- return n
-}
-
-func (m *SummaryDataPoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Count != 0 {
- n += 9
- }
- if m.Sum != 0 {
- n += 9
- }
- if len(m.QuantileValues) > 0 {
- for _, e := range m.QuantileValues {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- if m.Flags != 0 {
- n += 1 + sovMetrics(uint64(m.Flags))
- }
- return n
-}
-
-func (m *SummaryDataPoint_ValueAtQuantile) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Quantile != 0 {
- n += 9
- }
- if m.Value != 0 {
- n += 9
- }
- return n
-}
-
-func (m *Exemplar) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- if m.Value != nil {
- n += m.Value.Size()
- }
- l = m.SpanId.Size()
- n += 1 + l + sovMetrics(uint64(l))
- l = m.TraceId.Size()
- n += 1 + l + sovMetrics(uint64(l))
- if len(m.FilteredAttributes) > 0 {
- for _, e := range m.FilteredAttributes {
- l = e.Size()
- n += 1 + l + sovMetrics(uint64(l))
- }
- }
- return n
-}
-
-func (m *Exemplar_AsDouble) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *Exemplar_AsInt) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
-}
-
-func sovMetrics(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMetrics(x uint64) (n int) {
- return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *MetricsData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MetricsData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
- if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceMetrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{})
- if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeMetrics = append(m.DeprecatedScopeMetrics, &ScopeMetrics{})
- if err := m.DeprecatedScopeMetrics[len(m.DeprecatedScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeMetrics) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Metrics = append(m.Metrics, &Metric{})
- if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Metric) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metric: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Description = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Unit = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Gauge{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Gauge{v}
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Sum{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Sum{v}
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Histogram{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Histogram{v}
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &ExponentialHistogram{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_ExponentialHistogram{v}
- iNdEx = postIndex
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Summary{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Data = &Metric_Summary{v}
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Metadata = append(m.Metadata, v11.KeyValue{})
- if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Gauge) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Gauge: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Sum) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Sum: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsMonotonic = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Histogram) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &HistogramDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogram) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Summary) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Summary: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DataPoints = append(m.DataPoints, &SummaryDataPoint{})
- if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *NumberDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &NumberDataPoint_AsDouble{float64(math.Float64frombits(v))}
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
- }
- var v int64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &NumberDataPoint_AsInt{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HistogramDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum_ = &HistogramDataPoint_Sum{float64(math.Float64frombits(v))}
- case 6:
- if wireType == 1 {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.BucketCounts = append(m.BucketCounts, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- elementCount = packedLen / 8
- if elementCount != 0 && len(m.BucketCounts) == 0 {
- m.BucketCounts = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.BucketCounts = append(m.BucketCounts, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- case 7:
- if wireType == 1 {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- v2 := float64(math.Float64frombits(v))
- m.ExplicitBounds = append(m.ExplicitBounds, v2)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- elementCount = packedLen / 8
- if elementCount != 0 && len(m.ExplicitBounds) == 0 {
- m.ExplicitBounds = make([]float64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- v2 := float64(math.Float64frombits(v))
- m.ExplicitBounds = append(m.ExplicitBounds, v2)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Min_ = &HistogramDataPoint_Min{float64(math.Float64frombits(v))}
- case 12:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Max_ = &HistogramDataPoint_Max{float64(math.Float64frombits(v))}
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum_ = &ExponentialHistogramDataPoint_Sum{float64(math.Float64frombits(v))}
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
- m.Scale = v
- case 7:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
- }
- m.ZeroCount = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.ZeroCount = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Positive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Negative.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exemplars = append(m.Exemplars, Exemplar{})
- if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Min_ = &ExponentialHistogramDataPoint_Min{float64(math.Float64frombits(v))}
- case 13:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Max_ = &ExponentialHistogramDataPoint_Max{float64(math.Float64frombits(v))}
- case 14:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.ZeroThreshold = float64(math.Float64frombits(v))
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExponentialHistogramDataPoint_Buckets) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Buckets: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Buckets: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
- m.Offset = v
- case 2:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.BucketCounts = append(m.BucketCounts, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.BucketCounts) == 0 {
- m.BucketCounts = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.BucketCounts = append(m.BucketCounts, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SummaryDataPoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 4:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 5:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Sum = float64(math.Float64frombits(v))
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{})
- if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Flags |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Quantile = float64(math.Float64frombits(v))
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = float64(math.Float64frombits(v))
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Exemplar) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 3:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
- }
- var v uint64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &Exemplar_AsDouble{float64(math.Float64frombits(v))}
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
- }
- var v int64
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- m.Value = &Exemplar_AsInt{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMetrics
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMetrics
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FilteredAttributes = append(m.FilteredAttributes, v11.KeyValue{})
- if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMetrics(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMetrics
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMetrics(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMetrics
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMetrics
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMetrics
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go
deleted file mode 100644
index 23e4ca9fb8c..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go
+++ /dev/null
@@ -1,5512 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/profiles/v1development/profiles.proto
-
-package v1development
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Specifies the method of aggregating metric values, either DELTA (change since last report)
-// or CUMULATIVE (total since a fixed start time).
-type AggregationTemporality int32
-
-const (
- // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
- AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
- //* DELTA is an AggregationTemporality for a profiler which reports
- //changes since last report time. Successive metrics contain aggregation of
- //values from continuous and non-overlapping intervals.
- //
- //The values for a DELTA metric are based only on the time interval
- //associated with one measurement cycle. There is no dependency on
- //previous measurements like is the case for CUMULATIVE metrics.
- //
- //For example, consider a system measuring the number of requests that
- //it receives and reports the sum of these requests every second as a
- //DELTA metric:
- //
- //1. The system starts receiving at time=t_0.
- //2. A request is received, the system measures 1 request.
- //3. A request is received, the system measures 1 request.
- //4. A request is received, the system measures 1 request.
- //5. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+1 with a value of 3.
- //6. A request is received, the system measures 1 request.
- //7. A request is received, the system measures 1 request.
- //8. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0+1 to
- //t_0+2 with a value of 2.
- AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
- //* CUMULATIVE is an AggregationTemporality for a profiler which
- //reports changes since a fixed start time. This means that current values
- //of a CUMULATIVE metric depend on all previous measurements since the
- //start time. Because of this, the sender is required to retain this state
- //in some form. If this state is lost or invalidated, the CUMULATIVE metric
- //values MUST be reset and a new fixed start time following the last
- //reported measurement time sent MUST be used.
- //
- //For example, consider a system measuring the number of requests that
- //it receives and reports the sum of these requests every second as a
- //CUMULATIVE metric:
- //
- //1. The system starts receiving at time=t_0.
- //2. A request is received, the system measures 1 request.
- //3. A request is received, the system measures 1 request.
- //4. A request is received, the system measures 1 request.
- //5. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+1 with a value of 3.
- //6. A request is received, the system measures 1 request.
- //7. A request is received, the system measures 1 request.
- //8. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_0 to
- //t_0+2 with a value of 5.
- //9. The system experiences a fault and loses state.
- //10. The system recovers and resumes receiving at time=t_1.
- //11. A request is received, the system measures 1 request.
- //12. The 1 second collection cycle ends. A metric is exported for the
- //number of requests received over the interval of time t_1 to
- //t_1+1 with a value of 1.
- //
- //Note: Even though, when reporting changes since last report time, using
- //CUMULATIVE is valid, it is not recommended.
- AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
-)
-
-var AggregationTemporality_name = map[int32]string{
- 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
- 1: "AGGREGATION_TEMPORALITY_DELTA",
- 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
-}
-
-var AggregationTemporality_value = map[string]int32{
- "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
- "AGGREGATION_TEMPORALITY_DELTA": 1,
- "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
-}
-
-func (x AggregationTemporality) String() string {
- return proto.EnumName(AggregationTemporality_name, int32(x))
-}
-
-func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{0}
-}
-
-// ProfilesDictionary represents the profiles data shared across the
-// entire message being sent.
-type ProfilesDictionary struct {
- // Mappings from address ranges to the image/binary/library mapped
- // into that address range referenced by locations via Location.mapping_index.
- MappingTable []*Mapping `protobuf:"bytes,1,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"`
- // Locations referenced by samples via Profile.location_indices.
- LocationTable []*Location `protobuf:"bytes,2,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"`
- // Functions referenced by locations via Line.function_index.
- FunctionTable []*Function `protobuf:"bytes,3,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"`
- // Links referenced by samples via Sample.link_index.
- LinkTable []*Link `protobuf:"bytes,4,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"`
- // A common table for strings referenced by various messages.
- // string_table[0] must always be "".
- StringTable []string `protobuf:"bytes,5,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"`
- // A common table for attributes referenced by various messages.
- AttributeTable []v1.KeyValue `protobuf:"bytes,6,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"`
- // Represents a mapping between Attribute Keys and Units.
- AttributeUnits []*AttributeUnit `protobuf:"bytes,7,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units,omitempty"`
-}
-
-func (m *ProfilesDictionary) Reset() { *m = ProfilesDictionary{} }
-func (m *ProfilesDictionary) String() string { return proto.CompactTextString(m) }
-func (*ProfilesDictionary) ProtoMessage() {}
-func (*ProfilesDictionary) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{0}
-}
-func (m *ProfilesDictionary) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProfilesDictionary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProfilesDictionary.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProfilesDictionary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProfilesDictionary.Merge(m, src)
-}
-func (m *ProfilesDictionary) XXX_Size() int {
- return m.Size()
-}
-func (m *ProfilesDictionary) XXX_DiscardUnknown() {
- xxx_messageInfo_ProfilesDictionary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProfilesDictionary proto.InternalMessageInfo
-
-func (m *ProfilesDictionary) GetMappingTable() []*Mapping {
- if m != nil {
- return m.MappingTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetLocationTable() []*Location {
- if m != nil {
- return m.LocationTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetFunctionTable() []*Function {
- if m != nil {
- return m.FunctionTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetLinkTable() []*Link {
- if m != nil {
- return m.LinkTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetStringTable() []string {
- if m != nil {
- return m.StringTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetAttributeTable() []v1.KeyValue {
- if m != nil {
- return m.AttributeTable
- }
- return nil
-}
-
-func (m *ProfilesDictionary) GetAttributeUnits() []*AttributeUnit {
- if m != nil {
- return m.AttributeUnits
- }
- return nil
-}
-
-// ProfilesData represents the profiles data that can be stored in persistent storage,
-// OR can be embedded by other protocols that transfer OTLP profiles data but do not
-// implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type ProfilesData struct {
- // An array of ResourceProfiles.
- // For data coming from an SDK profiler, this array will typically contain one
- // element. Host-level profilers will usually create one ResourceProfile per
- // container, as well as one additional ResourceProfile grouping all samples
- // from non-containerized processes.
- // Other resource groupings are possible as well and clarified via
- // Resource.attributes and semantic conventions.
- ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
- // One instance of ProfilesDictionary
- Dictionary ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
-}
-
-func (m *ProfilesData) Reset() { *m = ProfilesData{} }
-func (m *ProfilesData) String() string { return proto.CompactTextString(m) }
-func (*ProfilesData) ProtoMessage() {}
-func (*ProfilesData) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{1}
-}
-func (m *ProfilesData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProfilesData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProfilesData.Merge(m, src)
-}
-func (m *ProfilesData) XXX_Size() int {
- return m.Size()
-}
-func (m *ProfilesData) XXX_DiscardUnknown() {
- xxx_messageInfo_ProfilesData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProfilesData proto.InternalMessageInfo
-
-func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles {
- if m != nil {
- return m.ResourceProfiles
- }
- return nil
-}
-
-func (m *ProfilesData) GetDictionary() ProfilesDictionary {
- if m != nil {
- return m.Dictionary
- }
- return ProfilesDictionary{}
-}
-
-// A collection of ScopeProfiles from a Resource.
-type ResourceProfiles struct {
- // The resource for the profiles in this message.
- // If this field is not set then no resource info is known.
- Resource v11.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeProfiles that originate from a resource.
- ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_profiles" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} }
-func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) }
-func (*ResourceProfiles) ProtoMessage() {}
-func (*ResourceProfiles) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{2}
-}
-func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceProfiles) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceProfiles.Merge(m, src)
-}
-func (m *ResourceProfiles) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceProfiles) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceProfiles.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo
-
-func (m *ResourceProfiles) GetResource() v11.Resource {
- if m != nil {
- return m.Resource
- }
- return v11.Resource{}
-}
-
-func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles {
- if m != nil {
- return m.ScopeProfiles
- }
- return nil
-}
-
-func (m *ResourceProfiles) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Profiles produced by an InstrumentationScope.
-type ScopeProfiles struct {
- // The instrumentation scope information for the profiles in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v1.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of Profiles that originate from an instrumentation scope.
- Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the profile data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all profiles in the "profiles" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} }
-func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) }
-func (*ScopeProfiles) ProtoMessage() {}
-func (*ScopeProfiles) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{3}
-}
-func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeProfiles) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeProfiles.Merge(m, src)
-}
-func (m *ScopeProfiles) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeProfiles) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeProfiles.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo
-
-func (m *ScopeProfiles) GetScope() v1.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v1.InstrumentationScope{}
-}
-
-func (m *ScopeProfiles) GetProfiles() []*Profile {
- if m != nil {
- return m.Profiles
- }
- return nil
-}
-
-func (m *ScopeProfiles) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// Represents a complete profile, including sample types, samples,
-// mappings to binaries, locations, functions, string table, and additional metadata.
-// It modifies and annotates pprof Profile with OpenTelemetry specific fields.
-//
-// Note that whilst fields in this message retain the name and field id from pprof in most cases
-// for ease of understanding data migration, it is not intended that pprof:Profile and
-// OpenTelemetry:Profile encoding be wire compatible.
-type Profile struct {
- // A description of the samples associated with each Sample.value.
- // For a cpu profile this might be:
- // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]]
- // For a heap profile, this might be:
- // [["allocations","count"], ["space","bytes"]],
- // If one of the values represents the number of events represented
- // by the sample, by convention it should be at index 0 and use
- // sample_type.unit == "count".
- SampleType []*ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type,omitempty"`
- // The set of samples recorded in this profile.
- Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"`
- // References to locations in ProfilesDictionary.location_table.
- LocationIndices []int32 `protobuf:"varint,3,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"`
- // Time of collection (UTC) represented as nanoseconds past the epoch.
- TimeNanos int64 `protobuf:"varint,4,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"`
- // Duration of the profile, if a duration makes sense.
- DurationNanos int64 `protobuf:"varint,5,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"`
- // The kind of events between sampled occurrences.
- // e.g [ "cpu","cycles" ] or [ "heap","bytes" ]
- PeriodType ValueType `protobuf:"bytes,6,opt,name=period_type,json=periodType,proto3" json:"period_type"`
- // The number of events between sampled occurrences.
- Period int64 `protobuf:"varint,7,opt,name=period,proto3" json:"period,omitempty"`
- // Free-form text associated with the profile. The text is displayed as is
- // to the user by the tools that read profiles (e.g. by pprof). This field
- // should not be used to store any machine-readable information, it is only
- // for human-friendly content. The profile must stay functional if this field
- // is cleaned.
- CommentStrindices []int32 `protobuf:"varint,8,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"`
- // Index into the sample_type array to the default sample type.
- DefaultSampleTypeIndex int32 `protobuf:"varint,9,opt,name=default_sample_type_index,json=defaultSampleTypeIndex,proto3" json:"default_sample_type_index,omitempty"`
- // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with
- // all zeroes is considered invalid.
- //
- // This field is required.
- ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,10,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"`
- // dropped_attributes_count is the number of attributes that were discarded. Attributes
- // can be discarded because their keys are too long or because there are too many
- // attributes. If this value is 0, then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,11,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present]
- OriginalPayloadFormat string `protobuf:"bytes,12,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"`
- // Original payload can be stored in this field. This can be useful for users who want to get the original payload.
- // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec.
- // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload.
- // If the original payload is in pprof format, it SHOULD not be included in this field.
- // The field is optional, however if it is present then equivalent converted data should be populated in other fields
- // of this message as far as is practicable.
- OriginalPayload []byte `protobuf:"bytes,13,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"`
- // References to attributes in attribute_table. [optional]
- // It is a collection of key/value pairs. Note, global attributes
- // like server name can be set using the resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "abc.com/myattribute": true
- // "abc.com/score": 10.239
- //
- // The OpenTelemetry API specification further restricts the allowed value types:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- AttributeIndices []int32 `protobuf:"varint,14,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
-}
-
-func (m *Profile) Reset() { *m = Profile{} }
-func (m *Profile) String() string { return proto.CompactTextString(m) }
-func (*Profile) ProtoMessage() {}
-func (*Profile) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{4}
-}
-func (m *Profile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Profile.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Profile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Profile.Merge(m, src)
-}
-func (m *Profile) XXX_Size() int {
- return m.Size()
-}
-func (m *Profile) XXX_DiscardUnknown() {
- xxx_messageInfo_Profile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Profile proto.InternalMessageInfo
-
-func (m *Profile) GetSampleType() []*ValueType {
- if m != nil {
- return m.SampleType
- }
- return nil
-}
-
-func (m *Profile) GetSample() []*Sample {
- if m != nil {
- return m.Sample
- }
- return nil
-}
-
-func (m *Profile) GetLocationIndices() []int32 {
- if m != nil {
- return m.LocationIndices
- }
- return nil
-}
-
-func (m *Profile) GetTimeNanos() int64 {
- if m != nil {
- return m.TimeNanos
- }
- return 0
-}
-
-func (m *Profile) GetDurationNanos() int64 {
- if m != nil {
- return m.DurationNanos
- }
- return 0
-}
-
-func (m *Profile) GetPeriodType() ValueType {
- if m != nil {
- return m.PeriodType
- }
- return ValueType{}
-}
-
-func (m *Profile) GetPeriod() int64 {
- if m != nil {
- return m.Period
- }
- return 0
-}
-
-func (m *Profile) GetCommentStrindices() []int32 {
- if m != nil {
- return m.CommentStrindices
- }
- return nil
-}
-
-func (m *Profile) GetDefaultSampleTypeIndex() int32 {
- if m != nil {
- return m.DefaultSampleTypeIndex
- }
- return 0
-}
-
-func (m *Profile) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Profile) GetOriginalPayloadFormat() string {
- if m != nil {
- return m.OriginalPayloadFormat
- }
- return ""
-}
-
-func (m *Profile) GetOriginalPayload() []byte {
- if m != nil {
- return m.OriginalPayload
- }
- return nil
-}
-
-func (m *Profile) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-// Represents a mapping between Attribute Keys and Units.
-type AttributeUnit struct {
- // Index into string table.
- AttributeKeyStrindex int32 `protobuf:"varint,1,opt,name=attribute_key_strindex,json=attributeKeyStrindex,proto3" json:"attribute_key_strindex,omitempty"`
- // Index into string table.
- UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
-}
-
-func (m *AttributeUnit) Reset() { *m = AttributeUnit{} }
-func (m *AttributeUnit) String() string { return proto.CompactTextString(m) }
-func (*AttributeUnit) ProtoMessage() {}
-func (*AttributeUnit) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{5}
-}
-func (m *AttributeUnit) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AttributeUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AttributeUnit.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AttributeUnit) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AttributeUnit.Merge(m, src)
-}
-func (m *AttributeUnit) XXX_Size() int {
- return m.Size()
-}
-func (m *AttributeUnit) XXX_DiscardUnknown() {
- xxx_messageInfo_AttributeUnit.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo
-
-func (m *AttributeUnit) GetAttributeKeyStrindex() int32 {
- if m != nil {
- return m.AttributeKeyStrindex
- }
- return 0
-}
-
-func (m *AttributeUnit) GetUnitStrindex() int32 {
- if m != nil {
- return m.UnitStrindex
- }
- return 0
-}
-
-// A pointer from a profile Sample to a trace Span.
-// Connects a profile sample to a trace span, identified by unique trace and span IDs.
-type Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
-}
-
-func (m *Link) Reset() { *m = Link{} }
-func (m *Link) String() string { return proto.CompactTextString(m) }
-func (*Link) ProtoMessage() {}
-func (*Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{6}
-}
-func (m *Link) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Link.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Link.Merge(m, src)
-}
-func (m *Link) XXX_Size() int {
- return m.Size()
-}
-func (m *Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Link proto.InternalMessageInfo
-
-// ValueType describes the type and units of a value, with an optional aggregation temporality.
-type ValueType struct {
- TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"`
- UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
- AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"`
-}
-
-func (m *ValueType) Reset() { *m = ValueType{} }
-func (m *ValueType) String() string { return proto.CompactTextString(m) }
-func (*ValueType) ProtoMessage() {}
-func (*ValueType) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{7}
-}
-func (m *ValueType) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ValueType.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ValueType) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValueType.Merge(m, src)
-}
-func (m *ValueType) XXX_Size() int {
- return m.Size()
-}
-func (m *ValueType) XXX_DiscardUnknown() {
- xxx_messageInfo_ValueType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValueType proto.InternalMessageInfo
-
-func (m *ValueType) GetTypeStrindex() int32 {
- if m != nil {
- return m.TypeStrindex
- }
- return 0
-}
-
-func (m *ValueType) GetUnitStrindex() int32 {
- if m != nil {
- return m.UnitStrindex
- }
- return 0
-}
-
-func (m *ValueType) GetAggregationTemporality() AggregationTemporality {
- if m != nil {
- return m.AggregationTemporality
- }
- return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
-}
-
-// Each Sample records values encountered in some program
-// context. The program context is typically a stack trace, perhaps
-// augmented with auxiliary information like the thread-id, some
-// indicator of a higher level request being handled etc.
-type Sample struct {
- // locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices.
- LocationsStartIndex int32 `protobuf:"varint,1,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"`
- // locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices.
- // Supersedes location_index.
- LocationsLength int32 `protobuf:"varint,2,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"`
- // The type and unit of each value is defined by the corresponding
- // entry in Profile.sample_type. All samples must have the same
- // number of values, the same as the length of Profile.sample_type.
- // When aggregating multiple samples into a single sample, the
- // result has a list of values that is the element-wise sum of the
- // lists of the originals.
- Value []int64 `protobuf:"varint,3,rep,packed,name=value,proto3" json:"value,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
- // Reference to link in ProfilesDictionary.link_table. [optional]
- //
- // Types that are valid to be assigned to LinkIndex_:
- // *Sample_LinkIndex
- LinkIndex_ isSample_LinkIndex_ `protobuf_oneof:"link_index_"`
- // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected
- // to fall within the Profile's time range. [optional]
- TimestampsUnixNano []uint64 `protobuf:"varint,6,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"`
-}
-
-func (m *Sample) Reset() { *m = Sample{} }
-func (m *Sample) String() string { return proto.CompactTextString(m) }
-func (*Sample) ProtoMessage() {}
-func (*Sample) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{8}
-}
-func (m *Sample) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Sample) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sample.Merge(m, src)
-}
-func (m *Sample) XXX_Size() int {
- return m.Size()
-}
-func (m *Sample) XXX_DiscardUnknown() {
- xxx_messageInfo_Sample.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Sample proto.InternalMessageInfo
-
-type isSample_LinkIndex_ interface {
- isSample_LinkIndex_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Sample_LinkIndex struct {
- LinkIndex int32 `protobuf:"varint,5,opt,name=link_index,json=linkIndex,proto3,oneof" json:"link_index,omitempty"`
-}
-
-func (*Sample_LinkIndex) isSample_LinkIndex_() {}
-
-func (m *Sample) GetLinkIndex_() isSample_LinkIndex_ {
- if m != nil {
- return m.LinkIndex_
- }
- return nil
-}
-
-func (m *Sample) GetLocationsStartIndex() int32 {
- if m != nil {
- return m.LocationsStartIndex
- }
- return 0
-}
-
-func (m *Sample) GetLocationsLength() int32 {
- if m != nil {
- return m.LocationsLength
- }
- return 0
-}
-
-func (m *Sample) GetValue() []int64 {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Sample) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-func (m *Sample) GetLinkIndex() int32 {
- if x, ok := m.GetLinkIndex_().(*Sample_LinkIndex); ok {
- return x.LinkIndex
- }
- return 0
-}
-
-func (m *Sample) GetTimestampsUnixNano() []uint64 {
- if m != nil {
- return m.TimestampsUnixNano
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Sample) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Sample_LinkIndex)(nil),
- }
-}
-
-// Describes the mapping of a binary in memory, including its address range,
-// file offset, and metadata like build ID
-type Mapping struct {
- // Address at which the binary (or DLL) is loaded into memory.
- MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"`
- // The limit of the address range occupied by this mapping.
- MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"`
- // Offset in the binary that corresponds to the first mapped address.
- FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"`
- // The object this entry is loaded from. This can be a filename on
- // disk for the main binary and shared libraries, or virtual
- // abstractions like "[vdso]".
- FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
- // The following fields indicate the resolution of symbolic info.
- HasFunctions bool `protobuf:"varint,6,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"`
- HasFilenames bool `protobuf:"varint,7,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"`
- HasLineNumbers bool `protobuf:"varint,8,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"`
- HasInlineFrames bool `protobuf:"varint,9,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"`
-}
-
-func (m *Mapping) Reset() { *m = Mapping{} }
-func (m *Mapping) String() string { return proto.CompactTextString(m) }
-func (*Mapping) ProtoMessage() {}
-func (*Mapping) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{9}
-}
-func (m *Mapping) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Mapping.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Mapping) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Mapping.Merge(m, src)
-}
-func (m *Mapping) XXX_Size() int {
- return m.Size()
-}
-func (m *Mapping) XXX_DiscardUnknown() {
- xxx_messageInfo_Mapping.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Mapping proto.InternalMessageInfo
-
-func (m *Mapping) GetMemoryStart() uint64 {
- if m != nil {
- return m.MemoryStart
- }
- return 0
-}
-
-func (m *Mapping) GetMemoryLimit() uint64 {
- if m != nil {
- return m.MemoryLimit
- }
- return 0
-}
-
-func (m *Mapping) GetFileOffset() uint64 {
- if m != nil {
- return m.FileOffset
- }
- return 0
-}
-
-func (m *Mapping) GetFilenameStrindex() int32 {
- if m != nil {
- return m.FilenameStrindex
- }
- return 0
-}
-
-func (m *Mapping) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-func (m *Mapping) GetHasFunctions() bool {
- if m != nil {
- return m.HasFunctions
- }
- return false
-}
-
-func (m *Mapping) GetHasFilenames() bool {
- if m != nil {
- return m.HasFilenames
- }
- return false
-}
-
-func (m *Mapping) GetHasLineNumbers() bool {
- if m != nil {
- return m.HasLineNumbers
- }
- return false
-}
-
-func (m *Mapping) GetHasInlineFrames() bool {
- if m != nil {
- return m.HasInlineFrames
- }
- return false
-}
-
-// Describes function and line table debug information.
-type Location struct {
- // Reference to mapping in ProfilesDictionary.mapping_table.
- // It can be unset if the mapping is unknown or not applicable for
- // this profile type.
- //
- // Types that are valid to be assigned to MappingIndex_:
- // *Location_MappingIndex
- MappingIndex_ isLocation_MappingIndex_ `protobuf_oneof:"mapping_index_"`
- // The instruction address for this location, if available. It
- // should be within [Mapping.memory_start...Mapping.memory_limit]
- // for the corresponding mapping. A non-leaf address may be in the
- // middle of a call instruction. It is up to display tools to find
- // the beginning of the instruction if necessary.
- Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"`
- // Multiple line indicates this location has inlined functions,
- // where the last entry represents the caller into which the
- // preceding entries were inlined.
- //
- // E.g., if memcpy() is inlined into printf:
- // line[0].function_name == "memcpy"
- // line[1].function_name == "printf"
- Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"`
- // Provides an indication that multiple symbols map to this location's
- // address, for example due to identical code folding by the linker. In that
- // case the line information above represents one of the multiple
- // symbols. This field must be recomputed when the symbolization state of the
- // profile changes.
- IsFolded bool `protobuf:"varint,4,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"`
- // References to attributes in ProfilesDictionary.attribute_table. [optional]
- AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
-}
-
-func (m *Location) Reset() { *m = Location{} }
-func (m *Location) String() string { return proto.CompactTextString(m) }
-func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{10}
-}
-func (m *Location) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Location.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Location) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Location.Merge(m, src)
-}
-func (m *Location) XXX_Size() int {
- return m.Size()
-}
-func (m *Location) XXX_DiscardUnknown() {
- xxx_messageInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Location proto.InternalMessageInfo
-
-type isLocation_MappingIndex_ interface {
- isLocation_MappingIndex_()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Location_MappingIndex struct {
- MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3,oneof" json:"mapping_index,omitempty"`
-}
-
-func (*Location_MappingIndex) isLocation_MappingIndex_() {}
-
-func (m *Location) GetMappingIndex_() isLocation_MappingIndex_ {
- if m != nil {
- return m.MappingIndex_
- }
- return nil
-}
-
-func (m *Location) GetMappingIndex() int32 {
- if x, ok := m.GetMappingIndex_().(*Location_MappingIndex); ok {
- return x.MappingIndex
- }
- return 0
-}
-
-func (m *Location) GetAddress() uint64 {
- if m != nil {
- return m.Address
- }
- return 0
-}
-
-func (m *Location) GetLine() []*Line {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *Location) GetIsFolded() bool {
- if m != nil {
- return m.IsFolded
- }
- return false
-}
-
-func (m *Location) GetAttributeIndices() []int32 {
- if m != nil {
- return m.AttributeIndices
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Location) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Location_MappingIndex)(nil),
- }
-}
-
-// Details a specific line in a source code, linked to a function.
-type Line struct {
- // Reference to function in ProfilesDictionary.function_table.
- FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"`
- // Line number in source code. 0 means unset.
- Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"`
- // Column number in source code. 0 means unset.
- Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"`
-}
-
-func (m *Line) Reset() { *m = Line{} }
-func (m *Line) String() string { return proto.CompactTextString(m) }
-func (*Line) ProtoMessage() {}
-func (*Line) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{11}
-}
-func (m *Line) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Line.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Line) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Line.Merge(m, src)
-}
-func (m *Line) XXX_Size() int {
- return m.Size()
-}
-func (m *Line) XXX_DiscardUnknown() {
- xxx_messageInfo_Line.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Line proto.InternalMessageInfo
-
-func (m *Line) GetFunctionIndex() int32 {
- if m != nil {
- return m.FunctionIndex
- }
- return 0
-}
-
-func (m *Line) GetLine() int64 {
- if m != nil {
- return m.Line
- }
- return 0
-}
-
-func (m *Line) GetColumn() int64 {
- if m != nil {
- return m.Column
- }
- return 0
-}
-
-// Describes a function, including its human-readable name, system name,
-// source file, and starting line number in the source.
-type Function struct {
- // Function name. Empty string if not available.
- NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"`
- // Function name, as identified by the system. For instance,
- // it can be a C++ mangled name. Empty string if not available.
- SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"`
- // Source file containing the function. Empty string if not available.
- FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
- // Line number in source file. 0 means unset.
- StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"`
-}
-
-func (m *Function) Reset() { *m = Function{} }
-func (m *Function) String() string { return proto.CompactTextString(m) }
-func (*Function) ProtoMessage() {}
-func (*Function) Descriptor() ([]byte, []int) {
- return fileDescriptor_ddd0cf081a2fe76f, []int{12}
-}
-func (m *Function) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Function.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Function) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Function.Merge(m, src)
-}
-func (m *Function) XXX_Size() int {
- return m.Size()
-}
-func (m *Function) XXX_DiscardUnknown() {
- xxx_messageInfo_Function.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Function proto.InternalMessageInfo
-
-func (m *Function) GetNameStrindex() int32 {
- if m != nil {
- return m.NameStrindex
- }
- return 0
-}
-
-func (m *Function) GetSystemNameStrindex() int32 {
- if m != nil {
- return m.SystemNameStrindex
- }
- return 0
-}
-
-func (m *Function) GetFilenameStrindex() int32 {
- if m != nil {
- return m.FilenameStrindex
- }
- return 0
-}
-
-func (m *Function) GetStartLine() int64 {
- if m != nil {
- return m.StartLine
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
- proto.RegisterType((*ProfilesDictionary)(nil), "opentelemetry.proto.profiles.v1development.ProfilesDictionary")
- proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData")
- proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles")
- proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles")
- proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile")
- proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1development.AttributeUnit")
- proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link")
- proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType")
- proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample")
- proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping")
- proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location")
- proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line")
- proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f)
-}
-
-var fileDescriptor_ddd0cf081a2fe76f = []byte{
- // 1617 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x4f, 0x1c, 0xc9,
- 0x15, 0xa6, 0xe7, 0x3e, 0x67, 0x2e, 0x0c, 0x15, 0x96, 0x9d, 0x6c, 0xb4, 0x30, 0x3b, 0xd6, 0x66,
- 0x67, 0x89, 0x16, 0x0c, 0x6c, 0xa2, 0x45, 0x89, 0xa2, 0x00, 0x03, 0xde, 0xb1, 0x31, 0x90, 0x62,
- 0x40, 0x71, 0x62, 0xa9, 0x53, 0x4c, 0xd7, 0x0c, 0x1d, 0xf7, 0x4d, 0x5d, 0x35, 0x88, 0x51, 0xfe,
- 0x82, 0x1f, 0xf2, 0x3b, 0xa2, 0xe4, 0x37, 0xe4, 0xd5, 0x8f, 0x56, 0x9e, 0xac, 0x3c, 0x38, 0x91,
- 0xfd, 0xe2, 0x44, 0xca, 0x7f, 0x88, 0xea, 0xd2, 0x3d, 0x17, 0x0f, 0x72, 0xda, 0x2f, 0x68, 0xea,
- 0x9c, 0xaf, 0xbe, 0x53, 0xe7, 0x52, 0xa7, 0x4e, 0x03, 0xbb, 0x7e, 0x40, 0x3d, 0x4e, 0x1d, 0xea,
- 0x52, 0x1e, 0x8e, 0x36, 0x83, 0xd0, 0xe7, 0xbe, 0xf8, 0xdb, 0xb7, 0x1d, 0xca, 0x36, 0x6f, 0xb6,
- 0x2c, 0x7a, 0x43, 0x1d, 0x3f, 0x70, 0xa9, 0xc7, 0x63, 0xf1, 0x86, 0x44, 0xa1, 0xf5, 0xa9, 0xad,
- 0x4a, 0xb8, 0x11, 0x63, 0xa6, 0xb6, 0x7e, 0xb6, 0x3c, 0xf0, 0x07, 0xbe, 0x22, 0x17, 0xbf, 0x14,
- 0xf8, 0xb3, 0xf5, 0x79, 0xc6, 0x7b, 0xbe, 0xeb, 0xfa, 0xde, 0xe6, 0xcd, 0x96, 0xfe, 0xa5, 0xb1,
- 0x1b, 0xf3, 0xb0, 0x21, 0x65, 0xfe, 0x30, 0xec, 0x51, 0x81, 0x8e, 0x7e, 0x2b, 0x7c, 0xf3, 0x55,
- 0x06, 0xd0, 0x99, 0x3e, 0x4c, 0xdb, 0xee, 0x71, 0xdb, 0xf7, 0x48, 0x38, 0x42, 0xbf, 0x81, 0x8a,
- 0x4b, 0x82, 0xc0, 0xf6, 0x06, 0x26, 0x27, 0x57, 0x0e, 0xad, 0x1b, 0x8d, 0x74, 0xab, 0xb4, 0xbd,
- 0xb3, 0xf1, 0xff, 0x3b, 0xb3, 0xf1, 0x58, 0x11, 0xe0, 0xb2, 0x66, 0xea, 0x0a, 0x22, 0xf4, 0x3b,
- 0xa8, 0x3a, 0x7e, 0x8f, 0x08, 0x43, 0x9a, 0x3a, 0x25, 0xa9, 0xbf, 0x4d, 0x42, 0x7d, 0xac, 0x19,
- 0x70, 0x25, 0xe2, 0x8a, 0xc9, 0xfb, 0x43, 0xaf, 0x37, 0x41, 0x9e, 0x4e, 0x4e, 0x7e, 0xa4, 0x19,
- 0x70, 0x25, 0xe2, 0x52, 0xe4, 0xa7, 0x00, 0x8e, 0xed, 0x3d, 0xd3, 0xc4, 0x19, 0x49, 0x7c, 0x3f,
- 0xd1, 0xa9, 0x6d, 0xef, 0x19, 0x2e, 0x0a, 0x0e, 0x45, 0xf8, 0x05, 0x94, 0x19, 0x0f, 0xc7, 0x31,
- 0xce, 0x36, 0xd2, 0xad, 0x22, 0x2e, 0x29, 0x99, 0x82, 0x5c, 0xc2, 0x22, 0xe1, 0x3c, 0xb4, 0xaf,
- 0x86, 0x9c, 0x6a, 0x54, 0x4e, 0x1a, 0xfe, 0x6a, 0xae, 0x61, 0x5d, 0x0a, 0x37, 0x5b, 0x1b, 0x8f,
- 0xe8, 0xe8, 0x92, 0x38, 0x43, 0xba, 0x9f, 0x79, 0xf1, 0x7a, 0x6d, 0x01, 0x57, 0x63, 0x16, 0xc5,
- 0x7b, 0x35, 0xc9, 0x3b, 0xf4, 0x6c, 0xce, 0xea, 0x79, 0xc9, 0xbb, 0x9b, 0xc4, 0xa1, 0xbd, 0x88,
- 0xe2, 0xc2, 0xb3, 0xf9, 0x84, 0x0d, 0xb1, 0x64, 0xcd, 0x7f, 0x1a, 0x50, 0x8e, 0x4b, 0x8b, 0x70,
- 0x82, 0x6c, 0x58, 0x8a, 0xaa, 0xcf, 0x8c, 0x18, 0x75, 0x61, 0xfd, 0x22, 0x89, 0x59, 0xac, 0x49,
- 0x22, 0x72, 0x5c, 0x0b, 0x67, 0x24, 0xc8, 0x02, 0xb0, 0xe2, 0x6a, 0xae, 0xa7, 0x1a, 0x46, 0xab,
- 0xb4, 0xfd, 0xcb, 0x24, 0x36, 0xde, 0xbf, 0x13, 0x3a, 0x92, 0x13, 0xbc, 0xcd, 0x77, 0x06, 0xd4,
- 0x66, 0x0f, 0x83, 0x1e, 0x41, 0x21, 0x3a, 0x4e, 0xdd, 0x90, 0x86, 0xbf, 0x9e, 0x6b, 0x38, 0xbe,
- 0x88, 0x37, 0x5b, 0xb1, 0x47, 0xda, 0x46, 0x4c, 0x80, 0x7e, 0x0f, 0x55, 0xd6, 0xf3, 0x83, 0x89,
- 0x78, 0xa5, 0x92, 0xa7, 0xe9, 0x5c, 0x30, 0xc4, 0xc1, 0xaa, 0xb0, 0xc9, 0x25, 0xfa, 0x1c, 0x80,
- 0xf5, 0xae, 0xa9, 0x4b, 0xcc, 0x61, 0xe8, 0xd4, 0xd3, 0x0d, 0xa3, 0x55, 0xc4, 0x45, 0x25, 0xb9,
- 0x08, 0x9d, 0x87, 0xb9, 0xc2, 0xbb, 0x7c, 0xed, 0xdf, 0xf9, 0xe6, 0x4b, 0x03, 0x2a, 0x53, 0x3c,
- 0xe8, 0x14, 0xb2, 0x92, 0x49, 0x3b, 0xb9, 0xf3, 0x81, 0x82, 0xec, 0x78, 0x8c, 0x87, 0x43, 0x71,
- 0x1e, 0x79, 0x5f, 0x25, 0x97, 0x76, 0x57, 0xf1, 0xa0, 0x53, 0x28, 0xcc, 0x78, 0xb9, 0xf3, 0x11,
- 0x19, 0xc3, 0x31, 0xc9, 0x07, 0x5c, 0x6b, 0xfe, 0x25, 0x07, 0x79, 0xbd, 0x09, 0x5d, 0x42, 0x89,
- 0x11, 0x37, 0x70, 0xa8, 0xc9, 0x47, 0x41, 0xd4, 0xed, 0x7e, 0x9a, 0xc4, 0xbc, 0xbc, 0x6d, 0xdd,
- 0x51, 0x40, 0x31, 0x28, 0x26, 0xf1, 0x1b, 0x3d, 0x84, 0x9c, 0x5a, 0x69, 0x8f, 0xb6, 0x13, 0xe5,
- 0x4d, 0xee, 0xc4, 0x9a, 0x01, 0x7d, 0x0d, 0xb5, 0xb8, 0x73, 0xda, 0x9e, 0x65, 0xf7, 0x28, 0x93,
- 0xed, 0x2d, 0x8b, 0x17, 0x23, 0x79, 0x47, 0x89, 0x85, 0xe7, 0xdc, 0x76, 0xa9, 0xe9, 0x11, 0xcf,
- 0x67, 0xf5, 0x4c, 0xc3, 0x68, 0xa5, 0x71, 0x51, 0x48, 0x4e, 0x84, 0x00, 0x7d, 0x09, 0x55, 0x6b,
- 0x18, 0x2a, 0x26, 0x05, 0xc9, 0x4a, 0x48, 0x25, 0x92, 0x2a, 0xd8, 0x53, 0x28, 0x05, 0x34, 0xb4,
- 0x7d, 0x4b, 0x05, 0x25, 0x27, 0xf3, 0xfc, 0x71, 0x41, 0x89, 0x2e, 0x8f, 0xe2, 0x93, 0xa1, 0x59,
- 0x81, 0x9c, 0x5a, 0xd5, 0xf3, 0xd2, 0xb8, 0x5e, 0xa1, 0x6f, 0x00, 0x89, 0xaa, 0xa1, 0x1e, 0x37,
- 0x65, 0x27, 0x54, 0x8e, 0x16, 0xa4, 0xa3, 0x4b, 0x5a, 0x73, 0x1e, 0x2b, 0xd0, 0x2e, 0xfc, 0xd0,
- 0xa2, 0x7d, 0x32, 0x74, 0xb8, 0x39, 0x91, 0x41, 0x11, 0x20, 0x7a, 0x5b, 0x2f, 0x36, 0x8c, 0x56,
- 0x16, 0xaf, 0x68, 0xc0, 0x79, 0x9c, 0x97, 0x8e, 0xd0, 0xa2, 0x2b, 0x00, 0x7d, 0x6e, 0xd3, 0xb6,
- 0xea, 0xd0, 0x30, 0x5a, 0xe5, 0xfd, 0x03, 0x71, 0xce, 0x7f, 0xbc, 0x5e, 0xfb, 0xf9, 0xc0, 0x9f,
- 0x71, 0xd4, 0x16, 0x6f, 0xae, 0xe3, 0xd0, 0x1e, 0xf7, 0xc3, 0xcd, 0xc0, 0x22, 0x9c, 0x6c, 0xda,
- 0x1e, 0xa7, 0xa1, 0x47, 0x9c, 0x4d, 0xb1, 0x8a, 0x0a, 0xb0, 0xd3, 0xc6, 0x45, 0x4d, 0xdb, 0xb1,
- 0xd0, 0x77, 0x50, 0xb7, 0x42, 0x3f, 0x08, 0xa8, 0x65, 0xc6, 0xed, 0x91, 0x99, 0x3d, 0x7f, 0xe8,
- 0xf1, 0x7a, 0xa9, 0x61, 0xb4, 0x2a, 0x78, 0x45, 0xeb, 0xe3, 0x66, 0xca, 0x0e, 0x84, 0x16, 0xfd,
- 0x0c, 0x3e, 0xf5, 0x43, 0x7b, 0x60, 0x7b, 0xc4, 0x31, 0x03, 0x32, 0x72, 0x7c, 0x62, 0x99, 0x7d,
- 0x3f, 0x74, 0x09, 0xaf, 0x97, 0x65, 0x29, 0x7f, 0x12, 0xa9, 0xcf, 0x94, 0xf6, 0x48, 0x2a, 0x45,
- 0x99, 0xcc, 0xee, 0xab, 0x57, 0x84, 0x6f, 0x78, 0x71, 0x66, 0x03, 0xfa, 0x09, 0x2c, 0x8d, 0x5f,
- 0x81, 0x28, 0xd2, 0x55, 0x19, 0xe9, 0x5a, 0xac, 0xd0, 0x35, 0xd5, 0xfc, 0x03, 0x54, 0xa6, 0xfa,
- 0x3d, 0xfa, 0x16, 0x56, 0xc6, 0xbb, 0x9f, 0xd1, 0x91, 0x4e, 0x17, 0xbd, 0x95, 0x1d, 0x21, 0x8b,
- 0x97, 0x63, 0xed, 0x23, 0x3a, 0x3a, 0xd7, 0x3a, 0x74, 0x0f, 0x2a, 0xe2, 0xbd, 0x19, 0x83, 0x53,
- 0x12, 0x5c, 0x16, 0xc2, 0x08, 0xd4, 0xfc, 0x9b, 0x01, 0x19, 0xf1, 0x5a, 0xa2, 0xa7, 0x50, 0xe0,
- 0x21, 0xe9, 0xc9, 0x04, 0x19, 0x32, 0x41, 0x7b, 0x3a, 0x41, 0xbb, 0xc9, 0x13, 0xd4, 0x15, 0x4c,
- 0x9d, 0x36, 0xce, 0x4b, 0xca, 0x8e, 0x85, 0x9e, 0x40, 0x9e, 0x05, 0xc4, 0x13, 0xe4, 0x29, 0x49,
- 0xfe, 0x2b, 0x4d, 0xfe, 0x5d, 0x72, 0xf2, 0xf3, 0x80, 0x78, 0x9d, 0x36, 0xce, 0x09, 0xc2, 0x8e,
- 0xd5, 0xfc, 0xbb, 0x01, 0xc5, 0xb8, 0xfa, 0x85, 0xd3, 0xb2, 0x2a, 0x67, 0x22, 0x54, 0x16, 0xc2,
- 0x44, 0x91, 0x41, 0x7f, 0x84, 0x4f, 0xc9, 0x60, 0x10, 0xd2, 0x81, 0x9e, 0xa0, 0xa8, 0x1b, 0xf8,
- 0x21, 0x71, 0x6c, 0x3e, 0x92, 0x0d, 0xae, 0xba, 0xbd, 0x9f, 0xe8, 0x01, 0x1f, 0x53, 0x75, 0xc7,
- 0x4c, 0x78, 0x85, 0xcc, 0x95, 0x37, 0x9f, 0xa7, 0x20, 0xa7, 0x2e, 0x11, 0xda, 0x86, 0x4f, 0xa2,
- 0xa6, 0xc3, 0x4c, 0xc6, 0x49, 0xc8, 0xcd, 0x49, 0xcf, 0x7e, 0x10, 0x2b, 0xcf, 0x85, 0x4e, 0xdd,
- 0xb7, 0x89, 0x06, 0xc6, 0x4c, 0x87, 0x7a, 0x03, 0x7e, 0xad, 0x7d, 0x8c, 0x1b, 0x18, 0x3b, 0x96,
- 0x62, 0xb4, 0x0c, 0xd9, 0x1b, 0x11, 0x3d, 0xd9, 0xe0, 0xd2, 0x58, 0x2d, 0xe6, 0xd7, 0x6b, 0x66,
- 0x7e, 0xbd, 0xa2, 0x35, 0x3d, 0xae, 0xa9, 0x63, 0x89, 0x06, 0x97, 0xfd, 0x7e, 0x41, 0x8d, 0x5f,
- 0xea, 0x38, 0xf7, 0x61, 0x59, 0xb4, 0x44, 0xc6, 0x89, 0x1b, 0x30, 0x31, 0x04, 0xdd, 0xca, 0x66,
- 0x28, 0x07, 0xac, 0x0c, 0x46, 0x63, 0xdd, 0x85, 0x67, 0xdf, 0x8a, 0x8e, 0xb8, 0x5f, 0x81, 0xd2,
- 0x98, 0xd2, 0x6c, 0xfe, 0x27, 0x05, 0x79, 0x3d, 0xe4, 0x8a, 0x59, 0xce, 0xa5, 0xae, 0x1f, 0x8e,
- 0x54, 0x30, 0x64, 0x18, 0x32, 0xb8, 0xa4, 0x64, 0x32, 0x06, 0x13, 0x10, 0xc7, 0x76, 0x6d, 0x2e,
- 0x5d, 0x8f, 0x21, 0xc7, 0x42, 0x84, 0xd6, 0xa0, 0x24, 0xdb, 0x91, 0xdf, 0xef, 0x33, 0xca, 0x65,
- 0x46, 0x33, 0x18, 0x84, 0xe8, 0x54, 0x4a, 0x44, 0x04, 0xc4, 0xca, 0x23, 0xee, 0x44, 0x31, 0x65,
- 0x64, 0x0c, 0x6b, 0x91, 0x22, 0xae, 0x95, 0xb9, 0xe1, 0xca, 0xde, 0x11, 0xae, 0x7b, 0x50, 0xb9,
- 0x26, 0xcc, 0x8c, 0x46, 0x5e, 0x26, 0xdb, 0x7d, 0x01, 0x97, 0xaf, 0x09, 0x8b, 0x06, 0xe2, 0x31,
- 0x48, 0x5b, 0x62, 0xb2, 0x75, 0x6b, 0x50, 0x24, 0x43, 0x2d, 0xa8, 0x09, 0x90, 0x63, 0x7b, 0xd4,
- 0xf4, 0x86, 0xee, 0x15, 0x0d, 0x45, 0xfb, 0x16, 0xb8, 0xea, 0x35, 0x61, 0xc7, 0xb6, 0x47, 0x4f,
- 0x94, 0x14, 0xad, 0xc3, 0x92, 0x40, 0xda, 0x9e, 0xc4, 0xf6, 0x43, 0x49, 0x59, 0x94, 0xd0, 0xc5,
- 0x6b, 0xc2, 0x3a, 0x52, 0x7e, 0x24, 0xc5, 0xcd, 0xff, 0x1a, 0x50, 0x88, 0xc6, 0x7e, 0xf4, 0xe5,
- 0xf8, 0xf3, 0x64, 0xa2, 0xea, 0xbe, 0x5f, 0x88, 0xbf, 0x35, 0x54, 0x86, 0xeb, 0x90, 0x27, 0x96,
- 0x15, 0x52, 0xc6, 0x74, 0xb0, 0xa3, 0x25, 0x6a, 0x43, 0x46, 0x70, 0xeb, 0xcf, 0x83, 0xa4, 0x53,
- 0x3c, 0xc5, 0x72, 0x37, 0xfa, 0x11, 0x14, 0x6d, 0x66, 0xf6, 0x7d, 0xc7, 0xa2, 0x96, 0xcc, 0x42,
- 0x01, 0x17, 0x6c, 0x76, 0x24, 0xd7, 0x89, 0xa2, 0xbf, 0x5f, 0x83, 0xea, 0x94, 0x43, 0x66, 0xf3,
- 0x89, 0xec, 0x80, 0x54, 0xbc, 0xd5, 0xf1, 0x27, 0xcd, 0xe4, 0x0d, 0x8b, 0x3f, 0x4e, 0x94, 0xab,
- 0x48, 0x3b, 0x94, 0x92, 0x6f, 0xa9, 0x3a, 0xde, 0x0a, 0xe4, 0x7a, 0xbe, 0x33, 0x74, 0x3d, 0x59,
- 0x48, 0x69, 0xac, 0x57, 0xcd, 0xbf, 0x1a, 0x50, 0x88, 0x72, 0x2a, 0x52, 0x3a, 0x5d, 0x4d, 0xba,
- 0x35, 0x4d, 0x55, 0xd2, 0x7d, 0x58, 0x66, 0x23, 0xc6, 0xa9, 0x6b, 0x4e, 0x63, 0xd5, 0xed, 0x45,
- 0x4a, 0x77, 0x32, 0x53, 0x7b, 0xef, 0x17, 0x6a, 0xfa, 0x8e, 0x42, 0x15, 0x83, 0x9a, 0x6c, 0x21,
- 0xd2, 0x05, 0x3d, 0xae, 0x48, 0x89, 0x08, 0xc1, 0xfa, 0x73, 0x03, 0x56, 0xe6, 0x77, 0x2a, 0xf4,
- 0x15, 0xdc, 0xdb, 0x7b, 0xf0, 0x00, 0x1f, 0x3e, 0xd8, 0xeb, 0x76, 0x4e, 0x4f, 0xcc, 0xee, 0xe1,
- 0xe3, 0xb3, 0x53, 0xbc, 0x77, 0xdc, 0xe9, 0x3e, 0x31, 0x2f, 0x4e, 0xce, 0xcf, 0x0e, 0x0f, 0x3a,
- 0x47, 0x9d, 0xc3, 0x76, 0x6d, 0x01, 0x7d, 0x01, 0x9f, 0xdf, 0x05, 0x6c, 0x1f, 0x1e, 0x77, 0xf7,
- 0x6a, 0x06, 0xfa, 0x31, 0x34, 0xef, 0x82, 0x1c, 0x5c, 0x3c, 0xbe, 0x38, 0xde, 0xeb, 0x76, 0x2e,
- 0x0f, 0x6b, 0xa9, 0xfd, 0x57, 0xc6, 0x8b, 0x37, 0xab, 0xc6, 0xcb, 0x37, 0xab, 0xc6, 0xbf, 0xde,
- 0xac, 0x1a, 0x7f, 0x7a, 0xbb, 0xba, 0xf0, 0xf2, 0xed, 0xea, 0xc2, 0xab, 0xb7, 0xab, 0x0b, 0xf0,
- 0x8d, 0xed, 0x27, 0x28, 0xa5, 0xfd, 0x4a, 0x34, 0x4c, 0x9f, 0x09, 0xd4, 0x99, 0xf1, 0xdb, 0x5f,
- 0x27, 0x7e, 0x77, 0xd4, 0x17, 0xfe, 0x80, 0x7a, 0x77, 0xfc, 0x37, 0xe2, 0xcf, 0xa9, 0xf5, 0xd3,
- 0x80, 0x7a, 0xdd, 0x98, 0x50, 0x9a, 0x8a, 0x3f, 0x6f, 0x36, 0x2e, 0xb7, 0xda, 0x63, 0xf0, 0x55,
- 0x4e, 0xb2, 0xed, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x89, 0x14, 0x57, 0x2d, 0xef, 0x10, 0x00,
- 0x00,
-}
-
-func (m *ProfilesDictionary) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProfilesDictionary) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProfilesDictionary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeUnits) > 0 {
- for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.AttributeTable) > 0 {
- for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- }
- if len(m.StringTable) > 0 {
- for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.StringTable[iNdEx])
- copy(dAtA[i:], m.StringTable[iNdEx])
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx])))
- i--
- dAtA[i] = 0x2a
- }
- }
- if len(m.LinkTable) > 0 {
- for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.FunctionTable) > 0 {
- for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.LocationTable) > 0 {
- for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.MappingTable) > 0 {
- for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ProfilesData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if len(m.ResourceProfiles) > 0 {
- for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeProfiles) > 0 {
- for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Profiles) > 0 {
- for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Profile) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Profile) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeIndices) > 0 {
- dAtA5 := make([]byte, len(m.AttributeIndices)*10)
- var j4 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j4++
- }
- dAtA5[j4] = uint8(num)
- j4++
- }
- i -= j4
- copy(dAtA[i:], dAtA5[:j4])
- i = encodeVarintProfiles(dAtA, i, uint64(j4))
- i--
- dAtA[i] = 0x72
- }
- if len(m.OriginalPayload) > 0 {
- i -= len(m.OriginalPayload)
- copy(dAtA[i:], m.OriginalPayload)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload)))
- i--
- dAtA[i] = 0x6a
- }
- if len(m.OriginalPayloadFormat) > 0 {
- i -= len(m.OriginalPayloadFormat)
- copy(dAtA[i:], m.OriginalPayloadFormat)
- i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat)))
- i--
- dAtA[i] = 0x62
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x58
- }
- {
- size := m.ProfileId.Size()
- i -= size
- if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- if m.DefaultSampleTypeIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DefaultSampleTypeIndex))
- i--
- dAtA[i] = 0x48
- }
- if len(m.CommentStrindices) > 0 {
- dAtA7 := make([]byte, len(m.CommentStrindices)*10)
- var j6 int
- for _, num1 := range m.CommentStrindices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j6++
- }
- dAtA7[j6] = uint8(num)
- j6++
- }
- i -= j6
- copy(dAtA[i:], dAtA7[:j6])
- i = encodeVarintProfiles(dAtA, i, uint64(j6))
- i--
- dAtA[i] = 0x42
- }
- if m.Period != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Period))
- i--
- dAtA[i] = 0x38
- }
- {
- size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- if m.DurationNanos != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNanos))
- i--
- dAtA[i] = 0x28
- }
- if m.TimeNanos != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.TimeNanos))
- i--
- dAtA[i] = 0x20
- }
- if len(m.LocationIndices) > 0 {
- dAtA10 := make([]byte, len(m.LocationIndices)*10)
- var j9 int
- for _, num1 := range m.LocationIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA10[j9] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j9++
- }
- dAtA10[j9] = uint8(num)
- j9++
- }
- i -= j9
- copy(dAtA[i:], dAtA10[:j9])
- i = encodeVarintProfiles(dAtA, i, uint64(j9))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Sample) > 0 {
- for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.SampleType) > 0 {
- for iNdEx := len(m.SampleType) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.SampleType[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AttributeUnit) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AttributeUnit) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.UnitStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.AttributeKeyStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.AttributeKeyStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Link) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Link) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ValueType) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ValueType) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AggregationTemporality != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality))
- i--
- dAtA[i] = 0x18
- }
- if m.UnitStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.TypeStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sample) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.TimestampsUnixNano) > 0 {
- dAtA12 := make([]byte, len(m.TimestampsUnixNano)*10)
- var j11 int
- for _, num := range m.TimestampsUnixNano {
- for num >= 1<<7 {
- dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j11++
- }
- dAtA12[j11] = uint8(num)
- j11++
- }
- i -= j11
- copy(dAtA[i:], dAtA12[:j11])
- i = encodeVarintProfiles(dAtA, i, uint64(j11))
- i--
- dAtA[i] = 0x32
- }
- if m.LinkIndex_ != nil {
- {
- size := m.LinkIndex_.Size()
- i -= size
- if _, err := m.LinkIndex_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.AttributeIndices) > 0 {
- dAtA14 := make([]byte, len(m.AttributeIndices)*10)
- var j13 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA14[j13] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j13++
- }
- dAtA14[j13] = uint8(num)
- j13++
- }
- i -= j13
- copy(dAtA[i:], dAtA14[:j13])
- i = encodeVarintProfiles(dAtA, i, uint64(j13))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Value) > 0 {
- dAtA16 := make([]byte, len(m.Value)*10)
- var j15 int
- for _, num1 := range m.Value {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA16[j15] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j15++
- }
- dAtA16[j15] = uint8(num)
- j15++
- }
- i -= j15
- copy(dAtA[i:], dAtA16[:j15])
- i = encodeVarintProfiles(dAtA, i, uint64(j15))
- i--
- dAtA[i] = 0x1a
- }
- if m.LocationsLength != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsLength))
- i--
- dAtA[i] = 0x10
- }
- if m.LocationsStartIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsStartIndex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Sample_LinkIndex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample_LinkIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex))
- i--
- dAtA[i] = 0x28
- return len(dAtA) - i, nil
-}
-func (m *Mapping) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Mapping) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.HasInlineFrames {
- i--
- if m.HasInlineFrames {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x48
- }
- if m.HasLineNumbers {
- i--
- if m.HasLineNumbers {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.HasFilenames {
- i--
- if m.HasFilenames {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if m.HasFunctions {
- i--
- if m.HasFunctions {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if len(m.AttributeIndices) > 0 {
- dAtA18 := make([]byte, len(m.AttributeIndices)*10)
- var j17 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA18[j17] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j17++
- }
- dAtA18[j17] = uint8(num)
- j17++
- }
- i -= j17
- copy(dAtA[i:], dAtA18[:j17])
- i = encodeVarintProfiles(dAtA, i, uint64(j17))
- i--
- dAtA[i] = 0x2a
- }
- if m.FilenameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
- i--
- dAtA[i] = 0x20
- }
- if m.FileOffset != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset))
- i--
- dAtA[i] = 0x18
- }
- if m.MemoryLimit != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit))
- i--
- dAtA[i] = 0x10
- }
- if m.MemoryStart != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Location) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Location) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.AttributeIndices) > 0 {
- dAtA20 := make([]byte, len(m.AttributeIndices)*10)
- var j19 int
- for _, num1 := range m.AttributeIndices {
- num := uint64(num1)
- for num >= 1<<7 {
- dAtA20[j19] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j19++
- }
- dAtA20[j19] = uint8(num)
- j19++
- }
- i -= j19
- copy(dAtA[i:], dAtA20[:j19])
- i = encodeVarintProfiles(dAtA, i, uint64(j19))
- i--
- dAtA[i] = 0x2a
- }
- if m.IsFolded {
- i--
- if m.IsFolded {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if len(m.Line) > 0 {
- for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintProfiles(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Address != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Address))
- i--
- dAtA[i] = 0x10
- }
- if m.MappingIndex_ != nil {
- {
- size := m.MappingIndex_.Size()
- i -= size
- if _, err := m.MappingIndex_.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Location_MappingIndex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Location_MappingIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-func (m *Line) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Line) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Column != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Column))
- i--
- dAtA[i] = 0x18
- }
- if m.Line != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.Line))
- i--
- dAtA[i] = 0x10
- }
- if m.FunctionIndex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Function) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Function) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.StartLine != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine))
- i--
- dAtA[i] = 0x20
- }
- if m.FilenameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
- i--
- dAtA[i] = 0x18
- }
- if m.SystemNameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex))
- i--
- dAtA[i] = 0x10
- }
- if m.NameStrindex != 0 {
- i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int {
- offset -= sovProfiles(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ProfilesDictionary) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.MappingTable) > 0 {
- for _, e := range m.MappingTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LocationTable) > 0 {
- for _, e := range m.LocationTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.FunctionTable) > 0 {
- for _, e := range m.FunctionTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LinkTable) > 0 {
- for _, e := range m.LinkTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.StringTable) > 0 {
- for _, s := range m.StringTable {
- l = len(s)
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.AttributeTable) > 0 {
- for _, e := range m.AttributeTable {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.AttributeUnits) > 0 {
- for _, e := range m.AttributeUnits {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- return n
-}
-
-func (m *ProfilesData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceProfiles) > 0 {
- for _, e := range m.ResourceProfiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = m.Dictionary.Size()
- n += 1 + l + sovProfiles(uint64(l))
- return n
-}
-
-func (m *ResourceProfiles) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if len(m.ScopeProfiles) > 0 {
- for _, e := range m.ScopeProfiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- return n
-}
-
-func (m *ScopeProfiles) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if len(m.Profiles) > 0 {
- for _, e := range m.Profiles {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- return n
-}
-
-func (m *Profile) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.SampleType) > 0 {
- for _, e := range m.SampleType {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.Sample) > 0 {
- for _, e := range m.Sample {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if len(m.LocationIndices) > 0 {
- l = 0
- for _, e := range m.LocationIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.TimeNanos != 0 {
- n += 1 + sovProfiles(uint64(m.TimeNanos))
- }
- if m.DurationNanos != 0 {
- n += 1 + sovProfiles(uint64(m.DurationNanos))
- }
- l = m.PeriodType.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if m.Period != 0 {
- n += 1 + sovProfiles(uint64(m.Period))
- }
- if len(m.CommentStrindices) > 0 {
- l = 0
- for _, e := range m.CommentStrindices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.DefaultSampleTypeIndex != 0 {
- n += 1 + sovProfiles(uint64(m.DefaultSampleTypeIndex))
- }
- l = m.ProfileId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovProfiles(uint64(m.DroppedAttributesCount))
- }
- l = len(m.OriginalPayloadFormat)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- l = len(m.OriginalPayload)
- if l > 0 {
- n += 1 + l + sovProfiles(uint64(l))
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *AttributeUnit) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.AttributeKeyStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.AttributeKeyStrindex))
- }
- if m.UnitStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.UnitStrindex))
- }
- return n
-}
-
-func (m *Link) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovProfiles(uint64(l))
- return n
-}
-
-func (m *ValueType) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TypeStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.TypeStrindex))
- }
- if m.UnitStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.UnitStrindex))
- }
- if m.AggregationTemporality != 0 {
- n += 1 + sovProfiles(uint64(m.AggregationTemporality))
- }
- return n
-}
-
-func (m *Sample) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.LocationsStartIndex != 0 {
- n += 1 + sovProfiles(uint64(m.LocationsStartIndex))
- }
- if m.LocationsLength != 0 {
- n += 1 + sovProfiles(uint64(m.LocationsLength))
- }
- if len(m.Value) > 0 {
- l = 0
- for _, e := range m.Value {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.LinkIndex_ != nil {
- n += m.LinkIndex_.Size()
- }
- if len(m.TimestampsUnixNano) > 0 {
- l = 0
- for _, e := range m.TimestampsUnixNano {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *Sample_LinkIndex) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovProfiles(uint64(m.LinkIndex))
- return n
-}
-func (m *Mapping) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MemoryStart != 0 {
- n += 1 + sovProfiles(uint64(m.MemoryStart))
- }
- if m.MemoryLimit != 0 {
- n += 1 + sovProfiles(uint64(m.MemoryLimit))
- }
- if m.FileOffset != 0 {
- n += 1 + sovProfiles(uint64(m.FileOffset))
- }
- if m.FilenameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.FilenameStrindex))
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- if m.HasFunctions {
- n += 2
- }
- if m.HasFilenames {
- n += 2
- }
- if m.HasLineNumbers {
- n += 2
- }
- if m.HasInlineFrames {
- n += 2
- }
- return n
-}
-
-func (m *Location) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MappingIndex_ != nil {
- n += m.MappingIndex_.Size()
- }
- if m.Address != 0 {
- n += 1 + sovProfiles(uint64(m.Address))
- }
- if len(m.Line) > 0 {
- for _, e := range m.Line {
- l = e.Size()
- n += 1 + l + sovProfiles(uint64(l))
- }
- }
- if m.IsFolded {
- n += 2
- }
- if len(m.AttributeIndices) > 0 {
- l = 0
- for _, e := range m.AttributeIndices {
- l += sovProfiles(uint64(e))
- }
- n += 1 + sovProfiles(uint64(l)) + l
- }
- return n
-}
-
-func (m *Location_MappingIndex) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovProfiles(uint64(m.MappingIndex))
- return n
-}
-func (m *Line) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.FunctionIndex != 0 {
- n += 1 + sovProfiles(uint64(m.FunctionIndex))
- }
- if m.Line != 0 {
- n += 1 + sovProfiles(uint64(m.Line))
- }
- if m.Column != 0 {
- n += 1 + sovProfiles(uint64(m.Column))
- }
- return n
-}
-
-func (m *Function) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.NameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.NameStrindex))
- }
- if m.SystemNameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.SystemNameStrindex))
- }
- if m.FilenameStrindex != 0 {
- n += 1 + sovProfiles(uint64(m.FilenameStrindex))
- }
- if m.StartLine != 0 {
- n += 1 + sovProfiles(uint64(m.StartLine))
- }
- return n
-}
-
-func sovProfiles(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozProfiles(x uint64) (n int) {
- return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ProfilesDictionary) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProfilesDictionary: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProfilesDictionary: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.MappingTable = append(m.MappingTable, &Mapping{})
- if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LocationTable = append(m.LocationTable, &Location{})
- if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.FunctionTable = append(m.FunctionTable, &Function{})
- if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LinkTable = append(m.LinkTable, &Link{})
- if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AttributeTable = append(m.AttributeTable, v1.KeyValue{})
- if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AttributeUnits = append(m.AttributeUnits, &AttributeUnit{})
- if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ProfilesData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{})
- if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceProfiles) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{})
- if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeProfiles) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Profiles = append(m.Profiles, &Profile{})
- if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Profile) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Profile: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SampleType = append(m.SampleType, &ValueType{})
- if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Sample = append(m.Sample, &Sample{})
- if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LocationIndices = append(m.LocationIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.LocationIndices) == 0 {
- m.LocationIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LocationIndices = append(m.LocationIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType)
- }
- m.TimeNanos = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TimeNanos |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType)
- }
- m.DurationNanos = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DurationNanos |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
- }
- m.Period = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Period |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CommentStrindices = append(m.CommentStrindices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.CommentStrindices) == 0 {
- m.CommentStrindices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CommentStrindices = append(m.CommentStrindices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleTypeIndex", wireType)
- }
- m.DefaultSampleTypeIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DefaultSampleTypeIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...)
- if m.OriginalPayload == nil {
- m.OriginalPayload = []byte{}
- }
- iNdEx = postIndex
- case 14:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AttributeUnit) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AttributeUnit: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AttributeUnit: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeKeyStrindex", wireType)
- }
- m.AttributeKeyStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AttributeKeyStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
- }
- m.UnitStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UnitStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Link) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Link: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ValueType) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ValueType: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
- }
- m.TypeStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TypeStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
- }
- m.UnitStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UnitStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
- }
- m.AggregationTemporality = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Sample) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Sample: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType)
- }
- m.LocationsStartIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LocationsStartIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType)
- }
- m.LocationsLength = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LocationsLength |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType == 0 {
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = append(m.Value, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Value) == 0 {
- m.Value = make([]int64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Value = append(m.Value, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- case 4:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.LinkIndex_ = &Sample_LinkIndex{v}
- case 6:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.TimestampsUnixNano) == 0 {
- m.TimestampsUnixNano = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Mapping) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Mapping: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
- }
- m.MemoryStart = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemoryStart |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
- }
- m.MemoryLimit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemoryLimit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
- }
- m.FileOffset = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FileOffset |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
- }
- m.FilenameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FilenameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasFunctions = bool(v != 0)
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasFilenames = bool(v != 0)
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasLineNumbers = bool(v != 0)
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasInlineFrames = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Location) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Location: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.MappingIndex_ = &Location_MappingIndex{v}
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
- }
- m.Address = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Address |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Line = append(m.Line, &Line{})
- if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsFolded = bool(v != 0)
- case 5:
- if wireType == 0 {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthProfiles
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthProfiles
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.AttributeIndices) == 0 {
- m.AttributeIndices = make([]int32, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.AttributeIndices = append(m.AttributeIndices, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Line) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Line: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
- }
- m.FunctionIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FunctionIndex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
- }
- m.Line = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Line |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
- }
- m.Column = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Column |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Function) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Function: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
- }
- m.NameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
- }
- m.SystemNameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SystemNameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
- }
- m.FilenameStrindex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.FilenameStrindex |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
- }
- m.StartLine = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartLine |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipProfiles(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthProfiles
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipProfiles(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowProfiles
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthProfiles
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupProfiles
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthProfiles
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go
deleted file mode 100644
index eedc2c0a40e..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go
+++ /dev/null
@@ -1,450 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/resource/v1/resource.proto
-
-package v1
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// Resource information.
-type Resource struct {
- // Set of attributes that describe the resource.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
- // no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Set of entities that participate in this Resource.
- //
- // Note: keys in the references MUST exist in attributes of this message.
- //
- // Status: [Development]
- EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"`
-}
-
-func (m *Resource) Reset() { *m = Resource{} }
-func (m *Resource) String() string { return proto.CompactTextString(m) }
-func (*Resource) ProtoMessage() {}
-func (*Resource) Descriptor() ([]byte, []int) {
- return fileDescriptor_446f73eacf88f3f5, []int{0}
-}
-func (m *Resource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Resource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Resource.Merge(m, src)
-}
-func (m *Resource) XXX_Size() int {
- return m.Size()
-}
-func (m *Resource) XXX_DiscardUnknown() {
- xxx_messageInfo_Resource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Resource proto.InternalMessageInfo
-
-func (m *Resource) GetAttributes() []v1.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Resource) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Resource) GetEntityRefs() []*v1.EntityRef {
- if m != nil {
- return m.EntityRefs
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5)
-}
-
-var fileDescriptor_446f73eacf88f3f5 = []byte{
- // 334 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
- 0x10, 0xc6, 0xb3, 0xfa, 0xe7, 0x4f, 0x59, 0xf1, 0x12, 0x4a, 0x09, 0x1e, 0xa2, 0x78, 0xa9, 0xf4,
- 0xb0, 0x21, 0xed, 0xa5, 0xd7, 0x5a, 0x5a, 0x28, 0xa5, 0x54, 0x42, 0xf1, 0xd0, 0x8b, 0xc4, 0x38,
- 0x86, 0x40, 0xdc, 0x09, 0x9b, 0x89, 0xe0, 0x5b, 0xf4, 0x39, 0xfa, 0x02, 0x7d, 0x05, 0x8f, 0x1e,
- 0x7b, 0x92, 0xa2, 0x2f, 0x52, 0xb2, 0x31, 0xa9, 0x2d, 0x82, 0xb7, 0x6f, 0xe7, 0xfb, 0xe6, 0x37,
- 0xc3, 0x2c, 0x17, 0x98, 0x80, 0x24, 0x88, 0x61, 0x06, 0xa4, 0x16, 0x4e, 0xa2, 0x90, 0xd0, 0x51,
- 0x90, 0x62, 0xa6, 0x02, 0x70, 0xe6, 0x6e, 0xa5, 0x85, 0xb6, 0xcc, 0xf6, 0xaf, 0x7c, 0x51, 0x14,
- 0x55, 0x66, 0xee, 0xb6, 0x4e, 0x43, 0x0c, 0xb1, 0xc0, 0xe4, 0xaa, 0x48, 0xb4, 0x2e, 0x0e, 0x8d,
- 0x09, 0x70, 0x36, 0x43, 0x99, 0x0f, 0x29, 0x54, 0x91, 0xed, 0xae, 0x19, 0x3f, 0xf1, 0x76, 0x44,
- 0xf3, 0x89, 0x73, 0x9f, 0x48, 0x45, 0xe3, 0x8c, 0x20, 0xb5, 0x58, 0xa7, 0xde, 0x6b, 0x5c, 0x9e,
- 0x8b, 0x43, 0x4b, 0xec, 0x18, 0x73, 0x57, 0x3c, 0xc2, 0x62, 0xe8, 0xc7, 0x19, 0xf4, 0xff, 0x2d,
- 0xd7, 0x6d, 0xc3, 0xdb, 0x03, 0x98, 0xd7, 0xdc, 0x9a, 0x28, 0x4c, 0x12, 0x98, 0x8c, 0x7e, 0xaa,
- 0xa3, 0x00, 0x33, 0x49, 0x56, 0xad, 0xc3, 0x7a, 0x4d, 0xef, 0x6c, 0xe7, 0xdf, 0x54, 0xf6, 0x6d,
- 0xee, 0x9a, 0x0f, 0xbc, 0x01, 0x92, 0x22, 0x5a, 0x8c, 0x14, 0x4c, 0x53, 0xab, 0xae, 0x37, 0xe9,
- 0x1d, 0xd9, 0xe4, 0x4e, 0x77, 0x78, 0x30, 0xf5, 0x38, 0x94, 0x32, 0xed, 0x7f, 0xb0, 0xe5, 0xc6,
- 0x66, 0xab, 0x8d, 0xcd, 0xbe, 0x36, 0x36, 0x7b, 0xdb, 0xda, 0xc6, 0x6a, 0x6b, 0x1b, 0x9f, 0x5b,
- 0xdb, 0xe0, 0xdd, 0x08, 0xc5, 0x91, 0x0b, 0xf7, 0x9b, 0xe5, 0x71, 0x06, 0xb9, 0x35, 0x60, 0xaf,
- 0xf7, 0xe1, 0xdf, 0xa6, 0x28, 0x3f, 0x6e, 0x1c, 0x43, 0x40, 0xa8, 0x9c, 0x64, 0xe2, 0x93, 0xef,
- 0x44, 0x92, 0x40, 0x49, 0x3f, 0x76, 0xf4, 0x4b, 0x53, 0x43, 0x90, 0xfb, 0x5f, 0xfd, 0x5e, 0x6b,
- 0x3f, 0x27, 0x20, 0x5f, 0x2a, 0x8a, 0xe6, 0x8b, 0x72, 0x9a, 0x18, 0xba, 0xe3, 0xff, 0xba, 0xef,
- 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x10, 0xa9, 0xec, 0x36, 0x02, 0x00, 0x00,
-}
-
-func (m *Resource) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Resource) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EntityRefs) > 0 {
- for iNdEx := len(m.EntityRefs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.EntityRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintResource(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintResource(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintResource(dAtA []byte, offset int, v uint64) int {
- offset -= sovResource(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Resource) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovResource(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovResource(uint64(m.DroppedAttributesCount))
- }
- if len(m.EntityRefs) > 0 {
- for _, e := range m.EntityRefs {
- l = e.Size()
- n += 1 + l + sovResource(uint64(l))
- }
- }
- return n
-}
-
-func sovResource(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozResource(x uint64) (n int) {
- return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Resource) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Resource: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthResource
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthResource
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v1.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowResource
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthResource
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthResource
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EntityRefs = append(m.EntityRefs, &v1.EntityRef{})
- if err := m.EntityRefs[len(m.EntityRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipResource(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthResource
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipResource(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowResource
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthResource
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupResource
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthResource
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowResource = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go
deleted file mode 100644
index b0bddfb985f..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go
+++ /dev/null
@@ -1,3045 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: opentelemetry/proto/trace/v1/trace.proto
-
-package v1
-
-import (
- encoding_binary "encoding/binary"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
-
- go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
- v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
- v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// SpanFlags represents constants used to interpret the
-// Span.flags field, which is protobuf 'fixed32' type and is to
-// be used as bit-fields. Each non-zero value defined in this enum is
-// a bit-mask. To extract the bit-field, for example, use an
-// expression like:
-//
-// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
-//
-// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
-//
-// Note that Span flags were introduced in version 1.1 of the
-// OpenTelemetry protocol. Older Span producers do not set this
-// field, consequently consumers should not rely on the absence of a
-// particular flag bit to indicate the presence of a particular feature.
-type SpanFlags int32
-
-const (
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
- // Bits 0-7 are used for trace flags.
- SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
- // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
- // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
- // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
- SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
- SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
-)
-
-var SpanFlags_name = map[int32]string{
- 0: "SPAN_FLAGS_DO_NOT_USE",
- 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
- 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
- 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
-}
-
-var SpanFlags_value = map[string]int32{
- "SPAN_FLAGS_DO_NOT_USE": 0,
- "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
- "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
- "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
-}
-
-func (x SpanFlags) String() string {
- return proto.EnumName(SpanFlags_name, int32(x))
-}
-
-func (SpanFlags) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{0}
-}
-
-// SpanKind is the type of span. Can be used to specify additional relationships between spans
-// in addition to a parent/child relationship.
-type Span_SpanKind int32
-
-const (
- // Unspecified. Do NOT use as default.
- // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
- Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
- // Indicates that the span represents an internal operation within an application,
- // as opposed to an operation happening at the boundaries. Default value.
- Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
- Span_SPAN_KIND_SERVER Span_SpanKind = 2
- // Indicates that the span describes a request to some remote service.
- Span_SPAN_KIND_CLIENT Span_SpanKind = 3
- // Indicates that the span describes a producer sending a message to a broker.
- // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
- // between producer and consumer spans. A PRODUCER span ends when the message was accepted
- // by the broker while the logical processing of the message might span a much longer time.
- Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
- // Indicates that the span describes consumer receiving a message from a broker.
- // Like the PRODUCER kind, there is often no direct critical path latency relationship
- // between producer and consumer spans.
- Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
-)
-
-var Span_SpanKind_name = map[int32]string{
- 0: "SPAN_KIND_UNSPECIFIED",
- 1: "SPAN_KIND_INTERNAL",
- 2: "SPAN_KIND_SERVER",
- 3: "SPAN_KIND_CLIENT",
- 4: "SPAN_KIND_PRODUCER",
- 5: "SPAN_KIND_CONSUMER",
-}
-
-var Span_SpanKind_value = map[string]int32{
- "SPAN_KIND_UNSPECIFIED": 0,
- "SPAN_KIND_INTERNAL": 1,
- "SPAN_KIND_SERVER": 2,
- "SPAN_KIND_CLIENT": 3,
- "SPAN_KIND_PRODUCER": 4,
- "SPAN_KIND_CONSUMER": 5,
-}
-
-func (x Span_SpanKind) String() string {
- return proto.EnumName(Span_SpanKind_name, int32(x))
-}
-
-func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 0}
-}
-
-// For the semantics of status codes see
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
-type Status_StatusCode int32
-
-const (
- // The default status.
- Status_STATUS_CODE_UNSET Status_StatusCode = 0
- // The Span has been validated by an Application developer or Operator to
- // have completed successfully.
- Status_STATUS_CODE_OK Status_StatusCode = 1
- // The Span contains an error.
- Status_STATUS_CODE_ERROR Status_StatusCode = 2
-)
-
-var Status_StatusCode_name = map[int32]string{
- 0: "STATUS_CODE_UNSET",
- 1: "STATUS_CODE_OK",
- 2: "STATUS_CODE_ERROR",
-}
-
-var Status_StatusCode_value = map[string]int32{
- "STATUS_CODE_UNSET": 0,
- "STATUS_CODE_OK": 1,
- "STATUS_CODE_ERROR": 2,
-}
-
-func (x Status_StatusCode) String() string {
- return proto.EnumName(Status_StatusCode_name, int32(x))
-}
-
-func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{4, 0}
-}
-
-// TracesData represents the traces data that can be stored in a persistent storage,
-// OR can be embedded by other protocols that transfer OTLP traces data but do
-// not implement the OTLP protocol.
-//
-// The main difference between this message and collector protocol is that
-// in this message there will not be any "control" or "metadata" specific to
-// OTLP protocol.
-//
-// When new fields are added into this message, the OTLP request MUST be updated
-// as well.
-type TracesData struct {
- // An array of ResourceSpans.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
-}
-
-func (m *TracesData) Reset() { *m = TracesData{} }
-func (m *TracesData) String() string { return proto.CompactTextString(m) }
-func (*TracesData) ProtoMessage() {}
-func (*TracesData) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{0}
-}
-func (m *TracesData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TracesData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TracesData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TracesData.Merge(m, src)
-}
-func (m *TracesData) XXX_Size() int {
- return m.Size()
-}
-func (m *TracesData) XXX_DiscardUnknown() {
- xxx_messageInfo_TracesData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TracesData proto.InternalMessageInfo
-
-func (m *TracesData) GetResourceSpans() []*ResourceSpans {
- if m != nil {
- return m.ResourceSpans
- }
- return nil
-}
-
-// A collection of ScopeSpans from a Resource.
-type ResourceSpans struct {
- DeprecatedScopeSpans []*ScopeSpans `protobuf:"bytes,1000,rep,name=deprecated_scope_spans,json=deprecatedScopeSpans,proto3" json:"deprecated_scope_spans,omitempty"`
- // The resource for the spans in this message.
- // If this field is not set then no resource info is known.
- Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
- // A list of ScopeSpans that originate from a resource.
- ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_spans" field which have their own schema_url field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ResourceSpans) Reset() { *m = ResourceSpans{} }
-func (m *ResourceSpans) String() string { return proto.CompactTextString(m) }
-func (*ResourceSpans) ProtoMessage() {}
-func (*ResourceSpans) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{1}
-}
-func (m *ResourceSpans) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResourceSpans) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSpans.Merge(m, src)
-}
-func (m *ResourceSpans) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSpans) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSpans.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo
-
-func (m *ResourceSpans) GetDeprecatedScopeSpans() []*ScopeSpans {
- if m != nil {
- return m.DeprecatedScopeSpans
- }
- return nil
-}
-
-func (m *ResourceSpans) GetResource() v1.Resource {
- if m != nil {
- return m.Resource
- }
- return v1.Resource{}
-}
-
-func (m *ResourceSpans) GetScopeSpans() []*ScopeSpans {
- if m != nil {
- return m.ScopeSpans
- }
- return nil
-}
-
-func (m *ResourceSpans) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A collection of Spans produced by an InstrumentationScope.
-type ScopeSpans struct {
- // The instrumentation scope information for the spans in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
- // A list of Spans that originate from an instrumentation scope.
- Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
- // The Schema URL, if known. This is the identifier of the Schema that the span data
- // is recorded in. Notably, the last part of the URL path is the version number of the
- // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all spans and span events in the "spans" field.
- SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
-}
-
-func (m *ScopeSpans) Reset() { *m = ScopeSpans{} }
-func (m *ScopeSpans) String() string { return proto.CompactTextString(m) }
-func (*ScopeSpans) ProtoMessage() {}
-func (*ScopeSpans) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{2}
-}
-func (m *ScopeSpans) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ScopeSpans.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ScopeSpans) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeSpans.Merge(m, src)
-}
-func (m *ScopeSpans) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeSpans) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeSpans.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScopeSpans proto.InternalMessageInfo
-
-func (m *ScopeSpans) GetScope() v11.InstrumentationScope {
- if m != nil {
- return m.Scope
- }
- return v11.InstrumentationScope{}
-}
-
-func (m *ScopeSpans) GetSpans() []*Span {
- if m != nil {
- return m.Spans
- }
- return nil
-}
-
-func (m *ScopeSpans) GetSchemaUrl() string {
- if m != nil {
- return m.SchemaUrl
- }
- return ""
-}
-
-// A Span represents a single operation performed by a single component of the system.
-//
-// The next available field id is 17.
-type Span struct {
- // A unique identifier for a trace. All spans from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
- // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is required.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
- // other than 8 bytes is considered invalid (empty string in OTLP/JSON
- // is zero-length and thus is also invalid).
- //
- // This field is required.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // trace_state conveys information about request position in multiple distributed tracing graphs.
- // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
- // See also https://github.com/w3c/distributed-tracing for more details about this field.
- TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
- // The `span_id` of this span's parent span. If this is a root span, then this
- // field must be empty. The ID is an 8-byte array.
- ParentSpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"parent_span_id"`
- // Flags, a bit field.
- //
- // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
- // Context specification. To read the 8-bit W3C trace flag, use
- // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
- //
- // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
- //
- // Bits 8 and 9 represent the 3 states of whether a span's parent
- // is remote. The states are (unknown, is not remote, is remote).
- // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
- // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
- //
- // When creating span messages, if the message is logically forwarded from another source
- // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
- // be copied as-is. If creating from a source that does not have an equivalent flags field
- // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
- // be set to zero.
- // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
- //
- // [Optional].
- Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
- // A description of the span's operation.
- //
- // For example, the name can be a qualified method name or a file name
- // and a line number where the operation is called. A best practice is to use
- // the same display name at the same call point in an application.
- // This makes it easier to correlate spans in different traces.
- //
- // This field is semantically required to be set to non-empty string.
- // Empty value is equivalent to an unknown span name.
- //
- // This field is required.
- Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
- // Distinguishes between spans generated in a particular context. For example,
- // two spans with the same name may be distinguished using `CLIENT` (caller)
- // and `SERVER` (callee) to identify queueing latency associated with the span.
- Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
- // start_time_unix_nano is the start time of the span. On the client side, this is the time
- // kept by the local machine where the span execution starts. On the server side, this
- // is the time when the server's application handler starts running.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- //
- // This field is semantically required and it is expected that end_time >= start_time.
- StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // end_time_unix_nano is the end time of the span. On the client side, this is the time
- // kept by the local machine where the span execution ends. On the server side, this
- // is the time when the server application handler stops running.
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- //
- // This field is semantically required and it is expected that end_time >= start_time.
- EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
- // attributes is a collection of key/value pairs. Note, global attributes
- // like server name can be set using the resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "example.com/myattribute": true
- // "example.com/score": 10.239
- //
- // The OpenTelemetry API specification further restricts the allowed value types:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of attributes that were discarded. Attributes
- // can be discarded because their keys are too long or because there are too many
- // attributes. If this value is 0, then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // events is a collection of Event items.
- Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
- // dropped_events_count is the number of dropped events. If the value is 0, then no
- // events were dropped.
- DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
- // links is a collection of Links, which are references from this span to a span
- // in the same or different trace.
- Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
- // dropped_links_count is the number of dropped links after the maximum size was
- // enforced. If this value is 0, then no links were dropped.
- DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
- // An optional final status for this span. Semantically when Status isn't set, it means
- // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
- Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"`
-}
-
-func (m *Span) Reset() { *m = Span{} }
-func (m *Span) String() string { return proto.CompactTextString(m) }
-func (*Span) ProtoMessage() {}
-func (*Span) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3}
-}
-func (m *Span) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span.Merge(m, src)
-}
-func (m *Span) XXX_Size() int {
- return m.Size()
-}
-func (m *Span) XXX_DiscardUnknown() {
- xxx_messageInfo_Span.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span proto.InternalMessageInfo
-
-func (m *Span) GetTraceState() string {
- if m != nil {
- return m.TraceState
- }
- return ""
-}
-
-func (m *Span) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func (m *Span) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Span) GetKind() Span_SpanKind {
- if m != nil {
- return m.Kind
- }
- return Span_SPAN_KIND_UNSPECIFIED
-}
-
-func (m *Span) GetStartTimeUnixNano() uint64 {
- if m != nil {
- return m.StartTimeUnixNano
- }
- return 0
-}
-
-func (m *Span) GetEndTimeUnixNano() uint64 {
- if m != nil {
- return m.EndTimeUnixNano
- }
- return 0
-}
-
-func (m *Span) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Span) GetEvents() []*Span_Event {
- if m != nil {
- return m.Events
- }
- return nil
-}
-
-func (m *Span) GetDroppedEventsCount() uint32 {
- if m != nil {
- return m.DroppedEventsCount
- }
- return 0
-}
-
-func (m *Span) GetLinks() []*Span_Link {
- if m != nil {
- return m.Links
- }
- return nil
-}
-
-func (m *Span) GetDroppedLinksCount() uint32 {
- if m != nil {
- return m.DroppedLinksCount
- }
- return 0
-}
-
-func (m *Span) GetStatus() Status {
- if m != nil {
- return m.Status
- }
- return Status{}
-}
-
-// Event is a time-stamped annotation of the span, consisting of user-supplied
-// text description and key-value pairs.
-type Span_Event struct {
- // time_unix_nano is the time the event occurred.
- TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // name of the event.
- // This field is semantically required to be set to non-empty string.
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // attributes is a collection of attribute key/value pairs on the event.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
- // then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
-}
-
-func (m *Span_Event) Reset() { *m = Span_Event{} }
-func (m *Span_Event) String() string { return proto.CompactTextString(m) }
-func (*Span_Event) ProtoMessage() {}
-func (*Span_Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 0}
-}
-func (m *Span_Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span_Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Event.Merge(m, src)
-}
-func (m *Span_Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Span_Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Event proto.InternalMessageInfo
-
-func (m *Span_Event) GetTimeUnixNano() uint64 {
- if m != nil {
- return m.TimeUnixNano
- }
- return 0
-}
-
-func (m *Span_Event) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Span_Event) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span_Event) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
-type Span_Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
- // The trace_state associated with the link.
- TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
- // attributes is a collection of attribute key/value pairs on the link.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
- // then no attributes were dropped.
- DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // Flags, a bit field.
- //
- // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
- // Context specification. To read the 8-bit W3C trace flag, use
- // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
- //
- // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
- //
- // Bits 8 and 9 represent the 3 states of whether the link is remote.
- // The states are (unknown, is not remote, is remote).
- // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
- // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
- //
- // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
- // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
- //
- // [Optional].
- Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *Span_Link) Reset() { *m = Span_Link{} }
-func (m *Span_Link) String() string { return proto.CompactTextString(m) }
-func (*Span_Link) ProtoMessage() {}
-func (*Span_Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{3, 1}
-}
-func (m *Span_Link) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Span_Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Link.Merge(m, src)
-}
-func (m *Span_Link) XXX_Size() int {
- return m.Size()
-}
-func (m *Span_Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Link proto.InternalMessageInfo
-
-func (m *Span_Link) GetTraceState() string {
- if m != nil {
- return m.TraceState
- }
- return ""
-}
-
-func (m *Span_Link) GetAttributes() []v11.KeyValue {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span_Link) GetDroppedAttributesCount() uint32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-func (m *Span_Link) GetFlags() uint32 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-// The Status type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs.
-type Status struct {
- // A developer-facing human readable error message.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- // The status code.
- Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
-}
-
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_5c407ac9c675a601, []int{4}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return m.Size()
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *Status) GetCode() Status_StatusCode {
- if m != nil {
- return m.Code
- }
- return Status_STATUS_CODE_UNSET
-}
-
-func init() {
- proto.RegisterEnum("opentelemetry.proto.trace.v1.SpanFlags", SpanFlags_name, SpanFlags_value)
- proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
- proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value)
- proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData")
- proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans")
- proto.RegisterType((*ScopeSpans)(nil), "opentelemetry.proto.trace.v1.ScopeSpans")
- proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span")
- proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event")
- proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link")
- proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status")
-}
-
-func init() {
- proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601)
-}
-
-var fileDescriptor_5c407ac9c675a601 = []byte{
- // 1112 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
- 0x14, 0xf6, 0x3a, 0x6b, 0x3b, 0x79, 0x49, 0xdc, 0xed, 0xe0, 0x56, 0x4b, 0x28, 0x8e, 0xb1, 0x0a,
- 0x98, 0x56, 0xb2, 0x49, 0x7b, 0x29, 0x07, 0x44, 0x1d, 0x7b, 0x03, 0x8b, 0x13, 0x3b, 0x9a, 0x5d,
- 0x47, 0x80, 0x90, 0x96, 0xad, 0x77, 0x6a, 0x56, 0xb1, 0x67, 0xad, 0xdd, 0x71, 0xd4, 0xde, 0xf8,
- 0x13, 0xb8, 0x22, 0x71, 0x47, 0x02, 0xce, 0xdc, 0xb8, 0x57, 0x9c, 0x7a, 0x44, 0x1c, 0x2a, 0x94,
- 0x5c, 0xf8, 0x2f, 0x8a, 0x66, 0x66, 0xd7, 0x5e, 0x47, 0x91, 0xd3, 0x48, 0xf4, 0xc2, 0x25, 0x99,
- 0x79, 0x3f, 0xbe, 0xef, 0x7b, 0x6f, 0xde, 0x8c, 0x17, 0x6a, 0xc1, 0x84, 0x50, 0x46, 0x46, 0x64,
- 0x4c, 0x58, 0xf8, 0xb4, 0x31, 0x09, 0x03, 0x16, 0x34, 0x58, 0xe8, 0x0e, 0x48, 0xe3, 0x64, 0x47,
- 0x2e, 0xea, 0xc2, 0x88, 0x6e, 0x2d, 0x44, 0x4a, 0x63, 0x5d, 0x06, 0x9c, 0xec, 0x6c, 0x95, 0x86,
- 0xc1, 0x30, 0x90, 0xd9, 0x7c, 0x25, 0xdd, 0x5b, 0x77, 0x2e, 0x42, 0x1f, 0x04, 0xe3, 0x71, 0x40,
- 0x39, 0xbc, 0x5c, 0xc5, 0xb1, 0xf5, 0x8b, 0x62, 0x43, 0x12, 0x05, 0xd3, 0x50, 0x8a, 0x49, 0xd6,
- 0x32, 0xbe, 0xfa, 0x0d, 0x80, 0xcd, 0xd9, 0xa3, 0xb6, 0xcb, 0x5c, 0x84, 0xa1, 0x98, 0xf8, 0x9d,
- 0x68, 0xe2, 0xd2, 0x48, 0x57, 0x2a, 0x2b, 0xb5, 0xf5, 0x7b, 0x77, 0xeb, 0xcb, 0x64, 0xd7, 0x71,
- 0x9c, 0x63, 0xf1, 0x14, 0xbc, 0x19, 0xa6, 0xb7, 0xd5, 0x9f, 0xb2, 0xb0, 0xb9, 0x10, 0x80, 0x1c,
- 0xb8, 0xe9, 0x91, 0x49, 0x48, 0x06, 0x2e, 0x23, 0x9e, 0x13, 0x0d, 0x82, 0x49, 0xc2, 0xf6, 0x4f,
- 0x41, 0xd0, 0xd5, 0x96, 0xd3, 0x59, 0x3c, 0x43, 0x72, 0x95, 0xe6, 0x40, 0x73, 0x2b, 0xea, 0xc0,
- 0x6a, 0xa2, 0x41, 0x57, 0x2a, 0x4a, 0x6d, 0xfd, 0xde, 0x07, 0x17, 0x22, 0xce, 0x7a, 0x91, 0xaa,
- 0x61, 0x57, 0x7d, 0xf6, 0x62, 0x3b, 0x83, 0x67, 0x00, 0xc8, 0x84, 0xf5, 0xb4, 0xc4, 0xec, 0x15,
- 0x15, 0x42, 0x34, 0xd7, 0xf5, 0x36, 0x40, 0x34, 0xf8, 0x96, 0x8c, 0x5d, 0x67, 0x1a, 0x8e, 0xf4,
- 0x95, 0x8a, 0x52, 0x5b, 0xc3, 0x6b, 0xd2, 0xd2, 0x0f, 0x47, 0xd5, 0xdf, 0x14, 0x80, 0x54, 0x15,
- 0x3d, 0xc8, 0x89, 0xdc, 0xb8, 0x84, 0xfb, 0x17, 0x52, 0xc6, 0x87, 0x7f, 0xb2, 0x53, 0x37, 0x69,
- 0xc4, 0xc2, 0xe9, 0x98, 0x50, 0xe6, 0x32, 0x3f, 0xa0, 0x02, 0x28, 0x2e, 0x46, 0xe2, 0xa0, 0x07,
- 0x90, 0x4b, 0xd7, 0x50, 0xbd, 0xa4, 0x86, 0x89, 0x4b, 0xb1, 0x4c, 0xb8, 0x4c, 0xf8, 0xaf, 0x9b,
- 0xa0, 0xf2, 0x70, 0xf4, 0x35, 0xac, 0x8a, 0x7c, 0xc7, 0xf7, 0x84, 0xea, 0x8d, 0xdd, 0x26, 0x17,
- 0xf0, 0xd7, 0x8b, 0xed, 0x8f, 0x86, 0xc1, 0x39, 0x3a, 0x9f, 0xcf, 0xf0, 0x68, 0x44, 0x06, 0x2c,
- 0x08, 0x1b, 0x13, 0xcf, 0x65, 0x6e, 0xc3, 0xa7, 0x8c, 0x84, 0xd4, 0x1d, 0x35, 0xf8, 0xae, 0x2e,
- 0xe6, 0xd2, 0x6c, 0xe3, 0x82, 0x80, 0x34, 0x3d, 0xf4, 0x25, 0x14, 0xb8, 0x1c, 0x0e, 0x9e, 0x15,
- 0xe0, 0x0f, 0x63, 0xf0, 0x07, 0x57, 0x07, 0xe7, 0x72, 0xcd, 0x36, 0xce, 0x73, 0x40, 0xd3, 0x43,
- 0xdb, 0xb0, 0x2e, 0x85, 0x47, 0xcc, 0x65, 0x24, 0xae, 0x10, 0x84, 0xc9, 0xe2, 0x16, 0xf4, 0x18,
- 0x8a, 0x13, 0x37, 0x24, 0x94, 0x39, 0x89, 0x04, 0xf5, 0x3f, 0x92, 0xb0, 0x21, 0x71, 0x2d, 0x29,
- 0xa4, 0x04, 0xb9, 0xc7, 0x23, 0x77, 0x18, 0xe9, 0x5a, 0x45, 0xa9, 0x15, 0xb0, 0xdc, 0x20, 0x04,
- 0x2a, 0x75, 0xc7, 0x44, 0xcf, 0x09, 0x5d, 0x62, 0x8d, 0x3e, 0x01, 0xf5, 0xd8, 0xa7, 0x9e, 0x9e,
- 0xaf, 0x28, 0xb5, 0xe2, 0x65, 0x37, 0x94, 0xa3, 0x8b, 0x3f, 0x1d, 0x9f, 0x7a, 0x58, 0x24, 0xa2,
- 0x06, 0x94, 0x22, 0xe6, 0x86, 0xcc, 0x61, 0xfe, 0x98, 0x38, 0x53, 0xea, 0x3f, 0x71, 0xa8, 0x4b,
- 0x03, 0xbd, 0x50, 0x51, 0x6a, 0x79, 0x7c, 0x5d, 0xf8, 0x6c, 0x7f, 0x4c, 0xfa, 0xd4, 0x7f, 0xd2,
- 0x75, 0x69, 0x80, 0xee, 0x02, 0x22, 0xd4, 0x3b, 0x1f, 0xbe, 0x2a, 0xc2, 0xaf, 0x11, 0xea, 0x2d,
- 0x04, 0x1f, 0x00, 0xb8, 0x8c, 0x85, 0xfe, 0xa3, 0x29, 0x23, 0x91, 0xbe, 0x26, 0x26, 0xee, 0xfd,
- 0x4b, 0x46, 0xb8, 0x43, 0x9e, 0x1e, 0xb9, 0xa3, 0x69, 0x32, 0xb6, 0x29, 0x00, 0xf4, 0x00, 0x74,
- 0x2f, 0x0c, 0x26, 0x13, 0xe2, 0x39, 0x73, 0xab, 0x33, 0x08, 0xa6, 0x94, 0xe9, 0x50, 0x51, 0x6a,
- 0x9b, 0xf8, 0x66, 0xec, 0x6f, 0xce, 0xdc, 0x2d, 0xee, 0x45, 0x0f, 0x21, 0x4f, 0x4e, 0x08, 0x65,
- 0x91, 0xbe, 0xfe, 0x4a, 0x57, 0x97, 0x77, 0xca, 0xe0, 0x09, 0x38, 0xce, 0x43, 0x1f, 0x42, 0x29,
- 0xe1, 0x96, 0x96, 0x98, 0x77, 0x43, 0xf0, 0xa2, 0xd8, 0x27, 0x72, 0x62, 0xce, 0x8f, 0x21, 0x37,
- 0xf2, 0xe9, 0x71, 0xa4, 0x6f, 0x2e, 0xa9, 0x7b, 0x91, 0x72, 0xdf, 0xa7, 0xc7, 0x58, 0x66, 0xa1,
- 0x3a, 0xbc, 0x91, 0x10, 0x0a, 0x43, 0xcc, 0x57, 0x14, 0x7c, 0xd7, 0x63, 0x17, 0x4f, 0x88, 0xe9,
- 0x76, 0x21, 0xcf, 0xe7, 0x76, 0x1a, 0xe9, 0xd7, 0xc4, 0x53, 0x71, 0xfb, 0x12, 0x3e, 0x11, 0x1b,
- 0x37, 0x39, 0xce, 0xdc, 0xfa, 0x43, 0x81, 0x9c, 0x28, 0x01, 0xdd, 0x86, 0xe2, 0xb9, 0x23, 0x56,
- 0xc4, 0x11, 0x6f, 0xb0, 0xf4, 0xf9, 0x26, 0x23, 0x99, 0x4d, 0x8d, 0xe4, 0xe2, 0x99, 0xaf, 0xbc,
- 0xce, 0x33, 0x57, 0x97, 0x9d, 0xf9, 0xd6, 0xcb, 0x2c, 0xa8, 0xbc, 0x3f, 0xff, 0xe3, 0x07, 0x69,
- 0xb1, 0xd7, 0xea, 0xeb, 0xec, 0x75, 0x6e, 0xe9, 0xfd, 0x9a, 0xbd, 0x58, 0xf9, 0xd4, 0x8b, 0x55,
- 0xfd, 0x41, 0x81, 0xd5, 0xe4, 0xbd, 0x41, 0x6f, 0xc2, 0x0d, 0xeb, 0xb0, 0xd9, 0x75, 0x3a, 0x66,
- 0xb7, 0xed, 0xf4, 0xbb, 0xd6, 0xa1, 0xd1, 0x32, 0xf7, 0x4c, 0xa3, 0xad, 0x65, 0xd0, 0x4d, 0x40,
- 0x73, 0x97, 0xd9, 0xb5, 0x0d, 0xdc, 0x6d, 0xee, 0x6b, 0x0a, 0x2a, 0x81, 0x36, 0xb7, 0x5b, 0x06,
- 0x3e, 0x32, 0xb0, 0x96, 0x5d, 0xb4, 0xb6, 0xf6, 0x4d, 0xa3, 0x6b, 0x6b, 0x2b, 0x8b, 0x18, 0x87,
- 0xb8, 0xd7, 0xee, 0xb7, 0x0c, 0xac, 0xa9, 0x8b, 0xf6, 0x56, 0xaf, 0x6b, 0xf5, 0x0f, 0x0c, 0xac,
- 0xe5, 0xaa, 0xbf, 0x2b, 0x90, 0x97, 0x77, 0x00, 0xe9, 0x50, 0x18, 0x93, 0x28, 0x72, 0x87, 0xc9,
- 0x20, 0x27, 0x5b, 0xd4, 0x02, 0x75, 0x10, 0x78, 0xb2, 0xf3, 0xc5, 0x7b, 0x8d, 0x57, 0xb9, 0x51,
- 0xf1, 0xbf, 0x56, 0xe0, 0x11, 0x2c, 0x92, 0xab, 0x5d, 0x80, 0xb9, 0x0d, 0xdd, 0x80, 0xeb, 0x96,
- 0xdd, 0xb4, 0xfb, 0x96, 0xd3, 0xea, 0xb5, 0x0d, 0xde, 0x08, 0xc3, 0xd6, 0x32, 0x08, 0x41, 0x31,
- 0x6d, 0xee, 0x75, 0x34, 0xe5, 0x7c, 0xa8, 0x81, 0x71, 0x0f, 0x6b, 0xd9, 0xcf, 0xd5, 0x55, 0x45,
- 0xcb, 0xde, 0xf9, 0x51, 0x81, 0x35, 0xde, 0xdb, 0x3d, 0xf1, 0xdb, 0x90, 0x34, 0x77, 0x6f, 0xbf,
- 0xf9, 0xa9, 0xe5, 0xb4, 0x7b, 0x4e, 0xb7, 0x67, 0x3b, 0x7d, 0xcb, 0xd0, 0x32, 0xa8, 0x02, 0x6f,
- 0xa5, 0x5c, 0x36, 0x6e, 0xb6, 0x8c, 0x78, 0x7d, 0xd0, 0xb4, 0x3a, 0xda, 0x4b, 0x05, 0xdd, 0x81,
- 0x77, 0x53, 0x11, 0xad, 0x5e, 0xd7, 0x36, 0xbe, 0xb0, 0x9d, 0xcf, 0x9a, 0x96, 0x63, 0x5a, 0x0e,
- 0x36, 0x0e, 0x7a, 0xb6, 0x21, 0x63, 0xbf, 0xcb, 0xa2, 0xf7, 0xe0, 0x9d, 0x0b, 0x62, 0xcf, 0xc7,
- 0xa9, 0xbb, 0xbf, 0x28, 0xcf, 0x4e, 0xcb, 0xca, 0xf3, 0xd3, 0xb2, 0xf2, 0xf7, 0x69, 0x59, 0xf9,
- 0xfe, 0xac, 0x9c, 0x79, 0x7e, 0x56, 0xce, 0xfc, 0x79, 0x56, 0xce, 0xc0, 0xb6, 0x1f, 0x2c, 0x6d,
- 0xe4, 0xae, 0xfc, 0x18, 0x3d, 0xe4, 0xc6, 0x43, 0xe5, 0xab, 0xd6, 0x95, 0xaf, 0x91, 0xfc, 0xe0,
- 0x1d, 0x12, 0x3a, 0xfb, 0xfa, 0xfe, 0x39, 0x7b, 0xab, 0x37, 0x21, 0xd4, 0x9e, 0x41, 0x08, 0x70,
- 0x79, 0x97, 0xeb, 0x47, 0x3b, 0x8f, 0xf2, 0x22, 0xe3, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0xfd, 0xbe, 0x84, 0xc3, 0xc3, 0x0b, 0x00, 0x00,
-}
-
-func (m *TracesData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TracesData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResourceSpans) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DeprecatedScopeSpans) > 0 {
- for iNdEx := len(m.DeprecatedScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DeprecatedScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- }
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.ScopeSpans) > 0 {
- for iNdEx := len(m.ScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ScopeSpans) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ScopeSpans) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ScopeSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.SchemaUrl) > 0 {
- i -= len(m.SchemaUrl)
- copy(dAtA[i:], m.SchemaUrl)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Spans) > 0 {
- for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Span) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x85
- }
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x7a
- if m.DroppedLinksCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount))
- i--
- dAtA[i] = 0x70
- }
- if len(m.Links) > 0 {
- for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x6a
- }
- }
- if m.DroppedEventsCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount))
- i--
- dAtA[i] = 0x60
- }
- if len(m.Events) > 0 {
- for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x50
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- }
- if m.EndTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano))
- i--
- dAtA[i] = 0x41
- }
- if m.StartTimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
- i--
- dAtA[i] = 0x39
- }
- if m.Kind != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.Kind))
- i--
- dAtA[i] = 0x30
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x2a
- }
- {
- size := m.ParentSpanId.Size()
- i -= size
- if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- if len(m.TraceState) > 0 {
- i -= len(m.TraceState)
- copy(dAtA[i:], m.TraceState)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
- i--
- dAtA[i] = 0x1a
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Span_Event) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x20
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x12
- }
- if m.TimeUnixNano != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
- i--
- dAtA[i] = 0x9
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Span_Link) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Flags != 0 {
- i -= 4
- encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
- i--
- dAtA[i] = 0x35
- }
- if m.DroppedAttributesCount != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
- i--
- dAtA[i] = 0x28
- }
- if len(m.Attributes) > 0 {
- for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.TraceState) > 0 {
- i -= len(m.TraceState)
- copy(dAtA[i:], m.TraceState)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
- i--
- dAtA[i] = 0x1a
- }
- {
- size := m.SpanId.Size()
- i -= size
- if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size := m.TraceId.Size()
- i -= size
- if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintTrace(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Status) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Status) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Code != 0 {
- i = encodeVarintTrace(dAtA, i, uint64(m.Code))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Message) > 0 {
- i -= len(m.Message)
- copy(dAtA[i:], m.Message)
- i = encodeVarintTrace(dAtA, i, uint64(len(m.Message)))
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTrace(dAtA []byte, offset int, v uint64) int {
- offset -= sovTrace(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *TracesData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceSpans) > 0 {
- for _, e := range m.ResourceSpans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceSpans) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovTrace(uint64(l))
- if len(m.ScopeSpans) > 0 {
- for _, e := range m.ScopeSpans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.DeprecatedScopeSpans) > 0 {
- for _, e := range m.DeprecatedScopeSpans {
- l = e.Size()
- n += 2 + l + sovTrace(uint64(l))
- }
- }
- return n
-}
-
-func (m *ScopeSpans) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Scope.Size()
- n += 1 + l + sovTrace(uint64(l))
- if len(m.Spans) > 0 {
- for _, e := range m.Spans {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- l = len(m.SchemaUrl)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- return n
-}
-
-func (m *Span) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.TraceState)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- l = m.ParentSpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if m.Kind != 0 {
- n += 1 + sovTrace(uint64(m.Kind))
- }
- if m.StartTimeUnixNano != 0 {
- n += 9
- }
- if m.EndTimeUnixNano != 0 {
- n += 9
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- if len(m.Events) > 0 {
- for _, e := range m.Events {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedEventsCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedEventsCount))
- }
- if len(m.Links) > 0 {
- for _, e := range m.Links {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedLinksCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedLinksCount))
- }
- l = m.Status.Size()
- n += 1 + l + sovTrace(uint64(l))
- if m.Flags != 0 {
- n += 6
- }
- return n
-}
-
-func (m *Span_Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TimeUnixNano != 0 {
- n += 9
- }
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- return n
-}
-
-func (m *Span_Link) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.TraceId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = m.SpanId.Size()
- n += 1 + l + sovTrace(uint64(l))
- l = len(m.TraceState)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if len(m.Attributes) > 0 {
- for _, e := range m.Attributes {
- l = e.Size()
- n += 1 + l + sovTrace(uint64(l))
- }
- }
- if m.DroppedAttributesCount != 0 {
- n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
- }
- if m.Flags != 0 {
- n += 5
- }
- return n
-}
-
-func (m *Status) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Message)
- if l > 0 {
- n += 1 + l + sovTrace(uint64(l))
- }
- if m.Code != 0 {
- n += 1 + sovTrace(uint64(m.Code))
- }
- return n
-}
-
-func sovTrace(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTrace(x uint64) (n int) {
- return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *TracesData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TracesData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{})
- if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResourceSpans) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ScopeSpans = append(m.ScopeSpans, &ScopeSpans{})
- if err := m.ScopeSpans[len(m.ScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DeprecatedScopeSpans = append(m.DeprecatedScopeSpans, &ScopeSpans{})
- if err := m.DeprecatedScopeSpans[len(m.DeprecatedScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ScopeSpans) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ScopeSpans: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ScopeSpans: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Spans = append(m.Spans, &Span{})
- if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SchemaUrl = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Span: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TraceState = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
- }
- m.Kind = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Kind |= Span_SpanKind(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
- }
- m.StartTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 8:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
- }
- m.EndTimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Events = append(m.Events, &Span_Event{})
- if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
- }
- m.DroppedEventsCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedEventsCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Links = append(m.Links, &Span_Link{})
- if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
- }
- m.DroppedLinksCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedLinksCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 15:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 16:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span_Event) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Event: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
- }
- m.TimeUnixNano = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
- iNdEx += 8
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Span_Link) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Link: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TraceState = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Attributes = append(m.Attributes, v11.KeyValue{})
- if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
- }
- m.DroppedAttributesCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DroppedAttributesCount |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 5 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 4) > l {
- return io.ErrUnexpectedEOF
- }
- m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
- iNdEx += 4
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Status) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Status: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTrace
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTrace
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Message = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
- }
- m.Code = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Code |= Status_StatusCode(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTrace(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTrace
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTrace(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTrace
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTrace
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTrace
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTrace
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go
deleted file mode 100644
index 25110f8b445..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "errors"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const spanIDSize = 8
-
-var (
- errMarshalSpanID = errors.New("marshal: invalid buffer length for SpanID")
- errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
-)
-
-// SpanID is a custom data type that is used for all span_id fields in OTLP
-// Protobuf messages.
-type SpanID [spanIDSize]byte
-
-var _ proto.Sizer = (*SpanID)(nil)
-
-// Size returns the size of the data to serialize.
-func (sid SpanID) Size() int {
- if sid.IsEmpty() {
- return 0
- }
- return spanIDSize
-}
-
-// IsEmpty returns true if id contains at least one non-zero byte.
-func (sid SpanID) IsEmpty() bool {
- return sid == [spanIDSize]byte{}
-}
-
-// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
-func (sid SpanID) MarshalTo(data []byte) (n int, err error) {
- if sid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < spanIDSize {
- return 0, errMarshalSpanID
- }
-
- return copy(data, sid[:]), nil
-}
-
-// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
-func (sid *SpanID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *sid = [spanIDSize]byte{}
- return nil
- }
-
- if len(data) != spanIDSize {
- return errUnmarshalSpanID
- }
-
- copy(sid[:], data)
- return nil
-}
-
-// MarshalJSON converts SpanID into a hex string enclosed in quotes.
-func (sid SpanID) MarshalJSON() ([]byte, error) {
- if sid.IsEmpty() {
- return []byte(`""`), nil
- }
- return marshalJSON(sid[:])
-}
-
-// UnmarshalJSON decodes SpanID from hex string, possibly enclosed in quotes.
-// Called by Protobuf JSON deserialization.
-func (sid *SpanID) UnmarshalJSON(data []byte) error {
- *sid = [spanIDSize]byte{}
- return unmarshalJSON(sid[:], data)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go
deleted file mode 100644
index 4828ee02bd0..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package data // import "go.opentelemetry.io/collector/pdata/internal/data"
-
-import (
- "errors"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const traceIDSize = 16
-
-var (
- errMarshalTraceID = errors.New("marshal: invalid buffer length for TraceID")
- errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
-)
-
-// TraceID is a custom data type that is used for all trace_id fields in OTLP
-// Protobuf messages.
-type TraceID [traceIDSize]byte
-
-var _ proto.Sizer = (*SpanID)(nil)
-
-// Size returns the size of the data to serialize.
-func (tid TraceID) Size() int {
- if tid.IsEmpty() {
- return 0
- }
- return traceIDSize
-}
-
-// IsEmpty returns true if id contains at leas one non-zero byte.
-func (tid TraceID) IsEmpty() bool {
- return tid == [traceIDSize]byte{}
-}
-
-// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
-func (tid TraceID) MarshalTo(data []byte) (n int, err error) {
- if tid.IsEmpty() {
- return 0, nil
- }
-
- if len(data) < traceIDSize {
- return 0, errMarshalTraceID
- }
-
- return copy(data, tid[:]), nil
-}
-
-// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
-func (tid *TraceID) Unmarshal(data []byte) error {
- if len(data) == 0 {
- *tid = [traceIDSize]byte{}
- return nil
- }
-
- if len(data) != traceIDSize {
- return errUnmarshalTraceID
- }
-
- copy(tid[:], data)
- return nil
-}
-
-// MarshalJSON converts trace id into a hex string enclosed in quotes.
-func (tid TraceID) MarshalJSON() ([]byte, error) {
- if tid.IsEmpty() {
- return []byte(`""`), nil
- }
- return marshalJSON(tid[:])
-}
-
-// UnmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-// Called by Protobuf JSON deserialization.
-func (tid *TraceID) UnmarshalJSON(data []byte) error {
- *tid = [traceIDSize]byte{}
- return unmarshalJSON(tid[:], data)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go
new file mode 100644
index 00000000000..6c3a7208dbc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_aggregationtemporality.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = AggregationTemporality(0)
+ AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = AggregationTemporality(1)
+ AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = AggregationTemporality(2)
+)
+
+// AggregationTemporality defines how a metric aggregator reports aggregated values.
+// It describes how those values relate to the time interval over which they are aggregated.
+type AggregationTemporality int32
+
+var AggregationTemporality_name = map[int32]string{
+ 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
+ 1: "AGGREGATION_TEMPORALITY_DELTA",
+ 2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
+}
+
+var AggregationTemporality_value = map[string]int32{
+ "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
+ "AGGREGATION_TEMPORALITY_DELTA": 1,
+ "AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go
new file mode 100644
index 00000000000..4ad9a5b29db
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_severitynumber.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED = SeverityNumber(0)
+ SeverityNumber_SEVERITY_NUMBER_TRACE = SeverityNumber(1)
+ SeverityNumber_SEVERITY_NUMBER_TRACE2 = SeverityNumber(2)
+ SeverityNumber_SEVERITY_NUMBER_TRACE3 = SeverityNumber(3)
+ SeverityNumber_SEVERITY_NUMBER_TRACE4 = SeverityNumber(4)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG = SeverityNumber(5)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG2 = SeverityNumber(6)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG3 = SeverityNumber(7)
+ SeverityNumber_SEVERITY_NUMBER_DEBUG4 = SeverityNumber(8)
+ SeverityNumber_SEVERITY_NUMBER_INFO = SeverityNumber(9)
+ SeverityNumber_SEVERITY_NUMBER_INFO2 = SeverityNumber(10)
+ SeverityNumber_SEVERITY_NUMBER_INFO3 = SeverityNumber(11)
+ SeverityNumber_SEVERITY_NUMBER_INFO4 = SeverityNumber(12)
+ SeverityNumber_SEVERITY_NUMBER_WARN = SeverityNumber(13)
+ SeverityNumber_SEVERITY_NUMBER_WARN2 = SeverityNumber(14)
+ SeverityNumber_SEVERITY_NUMBER_WARN3 = SeverityNumber(15)
+ SeverityNumber_SEVERITY_NUMBER_WARN4 = SeverityNumber(16)
+ SeverityNumber_SEVERITY_NUMBER_ERROR = SeverityNumber(17)
+ SeverityNumber_SEVERITY_NUMBER_ERROR2 = SeverityNumber(18)
+ SeverityNumber_SEVERITY_NUMBER_ERROR3 = SeverityNumber(19)
+ SeverityNumber_SEVERITY_NUMBER_ERROR4 = SeverityNumber(20)
+ SeverityNumber_SEVERITY_NUMBER_FATAL = SeverityNumber(21)
+ SeverityNumber_SEVERITY_NUMBER_FATAL2 = SeverityNumber(22)
+ SeverityNumber_SEVERITY_NUMBER_FATAL3 = SeverityNumber(23)
+ SeverityNumber_SEVERITY_NUMBER_FATAL4 = SeverityNumber(24)
+)
+
+// SeverityNumber represent possible values for LogRecord.SeverityNumber
+type SeverityNumber int32
+
+var SeverityNumber_name = map[int32]string{
+ 0: "SEVERITY_NUMBER_UNSPECIFIED",
+ 1: "SEVERITY_NUMBER_TRACE ",
+ 2: "SEVERITY_NUMBER_TRACE2",
+ 3: "SEVERITY_NUMBER_TRACE3",
+ 4: "SEVERITY_NUMBER_TRACE4",
+ 5: "SEVERITY_NUMBER_DEBUG",
+ 6: "SEVERITY_NUMBER_DEBUG2",
+ 7: "SEVERITY_NUMBER_DEBUG3",
+ 8: "SEVERITY_NUMBER_DEBUG4",
+ 9: "SEVERITY_NUMBER_INFO",
+ 10: "SEVERITY_NUMBER_INFO2",
+ 11: "SEVERITY_NUMBER_INFO3",
+ 12: "SEVERITY_NUMBER_INFO4",
+ 13: "SEVERITY_NUMBER_WARN",
+ 14: "SEVERITY_NUMBER_WARN2",
+ 15: "SEVERITY_NUMBER_WARN3",
+ 16: "SEVERITY_NUMBER_WARN4",
+ 17: "SEVERITY_NUMBER_ERROR",
+ 18: "SEVERITY_NUMBER_ERROR2",
+ 19: "SEVERITY_NUMBER_ERROR3",
+ 20: "SEVERITY_NUMBER_ERROR4",
+ 21: "SEVERITY_NUMBER_FATAL",
+ 22: "SEVERITY_NUMBER_FATAL2",
+ 23: "SEVERITY_NUMBER_FATAL3",
+ 24: "SEVERITY_NUMBER_FATAL4",
+}
+
+var SeverityNumber_value = map[string]int32{
+ "SEVERITY_NUMBER_UNSPECIFIED": 0,
+ "SEVERITY_NUMBER_TRACE ": 1,
+ "SEVERITY_NUMBER_TRACE2": 2,
+ "SEVERITY_NUMBER_TRACE3": 3,
+ "SEVERITY_NUMBER_TRACE4": 4,
+ "SEVERITY_NUMBER_DEBUG": 5,
+ "SEVERITY_NUMBER_DEBUG2": 6,
+ "SEVERITY_NUMBER_DEBUG3": 7,
+ "SEVERITY_NUMBER_DEBUG4": 8,
+ "SEVERITY_NUMBER_INFO": 9,
+ "SEVERITY_NUMBER_INFO2": 10,
+ "SEVERITY_NUMBER_INFO3": 11,
+ "SEVERITY_NUMBER_INFO4": 12,
+ "SEVERITY_NUMBER_WARN": 13,
+ "SEVERITY_NUMBER_WARN2": 14,
+ "SEVERITY_NUMBER_WARN3": 15,
+ "SEVERITY_NUMBER_WARN4": 16,
+ "SEVERITY_NUMBER_ERROR": 17,
+ "SEVERITY_NUMBER_ERROR2": 18,
+ "SEVERITY_NUMBER_ERROR3": 19,
+ "SEVERITY_NUMBER_ERROR4": 20,
+ "SEVERITY_NUMBER_FATAL": 21,
+ "SEVERITY_NUMBER_FATAL2": 22,
+ "SEVERITY_NUMBER_FATAL3": 23,
+ "SEVERITY_NUMBER_FATAL4": 24,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go
new file mode 100644
index 00000000000..1632aadde2a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_spankind.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ SpanKind_SPAN_KIND_UNSPECIFIED = SpanKind(0)
+ SpanKind_SPAN_KIND_INTERNAL = SpanKind(1)
+ SpanKind_SPAN_KIND_SERVER = SpanKind(2)
+ SpanKind_SPAN_KIND_CLIENT = SpanKind(3)
+ SpanKind_SPAN_KIND_PRODUCER = SpanKind(4)
+ SpanKind_SPAN_KIND_CONSUMER = SpanKind(5)
+)
+
+// SpanKind is the type of span.
+// Can be used to specify additional relationships between spans in addition to a parent/child relationship.
+type SpanKind int32
+
+var SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SPAN_KIND_INTERNAL",
+ 2: "SPAN_KIND_SERVER",
+ 3: "SPAN_KIND_CLIENT",
+ 4: "SPAN_KIND_PRODUCER",
+ 5: "SPAN_KIND_CONSUMER",
+}
+
+var SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SPAN_KIND_INTERNAL": 1,
+ "SPAN_KIND_SERVER": 2,
+ "SPAN_KIND_CLIENT": 3,
+ "SPAN_KIND_PRODUCER": 4,
+ "SPAN_KIND_CONSUMER": 5,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go
new file mode 100644
index 00000000000..ac5aab75934
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_enum_statuscode.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+const (
+ StatusCode_STATUS_CODE_UNSET = StatusCode(0)
+ StatusCode_STATUS_CODE_OK = StatusCode(1)
+ StatusCode_STATUS_CODE_ERROR = StatusCode(2)
+)
+
+// StatusCode is the status of the span, for the semantics of codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+var StatusCode_name = map[int32]string{
+ 0: "STATUS_CODE_UNSET",
+ 1: "STATUS_CODE_OK",
+ 2: "STATUS_CODE_ERROR",
+}
+
+var StatusCode_value = map[string]int32{
+ "STATUS_CODE_UNSET": 0,
+ "STATUS_CODE_OK": 1,
+ "STATUS_CODE_ERROR": 2,
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go
new file mode 100644
index 00000000000..ddc86601cd8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_anyvalue.go
@@ -0,0 +1,770 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *AnyValue) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type AnyValue_StringValue struct {
+ StringValue string
+}
+
+func (m *AnyValue) GetStringValue() string {
+ if v, ok := m.GetValue().(*AnyValue_StringValue); ok {
+ return v.StringValue
+ }
+ return ""
+}
+
+type AnyValue_BoolValue struct {
+ BoolValue bool
+}
+
+func (m *AnyValue) GetBoolValue() bool {
+ if v, ok := m.GetValue().(*AnyValue_BoolValue); ok {
+ return v.BoolValue
+ }
+ return false
+}
+
+type AnyValue_IntValue struct {
+ IntValue int64
+}
+
+func (m *AnyValue) GetIntValue() int64 {
+ if v, ok := m.GetValue().(*AnyValue_IntValue); ok {
+ return v.IntValue
+ }
+ return int64(0)
+}
+
+type AnyValue_DoubleValue struct {
+ DoubleValue float64
+}
+
+func (m *AnyValue) GetDoubleValue() float64 {
+ if v, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
+ return v.DoubleValue
+ }
+ return float64(0)
+}
+
+type AnyValue_ArrayValue struct {
+ ArrayValue *ArrayValue
+}
+
+func (m *AnyValue) GetArrayValue() *ArrayValue {
+ if v, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
+ return v.ArrayValue
+ }
+ return nil
+}
+
+type AnyValue_KvlistValue struct {
+ KvlistValue *KeyValueList
+}
+
+func (m *AnyValue) GetKvlistValue() *KeyValueList {
+ if v, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
+ return v.KvlistValue
+ }
+ return nil
+}
+
+type AnyValue_BytesValue struct {
+ BytesValue []byte
+}
+
+func (m *AnyValue) GetBytesValue() []byte {
+ if v, ok := m.GetValue().(*AnyValue_BytesValue); ok {
+ return v.BytesValue
+ }
+ return nil
+}
+
+type AnyValue struct {
+ Value any
+}
+
+var (
+ protoPoolAnyValue = sync.Pool{
+ New: func() any {
+ return &AnyValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_StringValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_StringValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_BoolValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_BoolValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_IntValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_IntValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_DoubleValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_DoubleValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_ArrayValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_ArrayValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_KvlistValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_KvlistValue{}
+ },
+ }
+
+ ProtoPoolAnyValue_BytesValue = sync.Pool{
+ New: func() any {
+ return &AnyValue_BytesValue{}
+ },
+ }
+)
+
+func NewAnyValue() *AnyValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue{}
+ }
+ return protoPoolAnyValue.Get().(*AnyValue)
+}
+
+func DeleteAnyValue(orig *AnyValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ switch ov := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.StringValue = ""
+ ProtoPoolAnyValue_StringValue.Put(ov)
+ }
+ case *AnyValue_BoolValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.BoolValue = false
+ ProtoPoolAnyValue_BoolValue.Put(ov)
+ }
+ case *AnyValue_IntValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.IntValue = int64(0)
+ ProtoPoolAnyValue_IntValue.Put(ov)
+ }
+ case *AnyValue_DoubleValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.DoubleValue = float64(0)
+ ProtoPoolAnyValue_DoubleValue.Put(ov)
+ }
+ case *AnyValue_ArrayValue:
+ DeleteArrayValue(ov.ArrayValue, true)
+ ov.ArrayValue = nil
+ ProtoPoolAnyValue_ArrayValue.Put(ov)
+ case *AnyValue_KvlistValue:
+ DeleteKeyValueList(ov.KvlistValue, true)
+ ov.KvlistValue = nil
+ ProtoPoolAnyValue_KvlistValue.Put(ov)
+ case *AnyValue_BytesValue:
+ if UseProtoPooling.IsEnabled() {
+ ov.BytesValue = nil
+ ProtoPoolAnyValue_BytesValue.Put(ov)
+ }
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolAnyValue.Put(orig)
+ }
+}
+
+func CopyAnyValue(dest, src *AnyValue) *AnyValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewAnyValue()
+ }
+ switch t := src.Value.(type) {
+ case *AnyValue_StringValue:
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = t.StringValue
+ dest.Value = ov
+ case *AnyValue_BoolValue:
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = t.BoolValue
+ dest.Value = ov
+ case *AnyValue_IntValue:
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = t.IntValue
+ dest.Value = ov
+ case *AnyValue_DoubleValue:
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = t.DoubleValue
+ dest.Value = ov
+ case *AnyValue_ArrayValue:
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ CopyArrayValue(ov.ArrayValue, t.ArrayValue)
+ dest.Value = ov
+
+ case *AnyValue_KvlistValue:
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ CopyKeyValueList(ov.KvlistValue, t.KvlistValue)
+ dest.Value = ov
+
+ case *AnyValue_BytesValue:
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ ov.BytesValue = t.BytesValue
+ dest.Value = ov
+ default:
+ dest.Value = nil
+ }
+
+ return dest
+}
+
+func CopyAnyValueSlice(dest, src []AnyValue) []AnyValue {
+ var newDest []AnyValue
+ if cap(dest) < len(src) {
+ newDest = make([]AnyValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteAnyValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyAnyValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyAnyValuePtrSlice(dest, src []*AnyValue) []*AnyValue {
+ var newDest []*AnyValue
+ if cap(dest) < len(src) {
+ newDest = make([]*AnyValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewAnyValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteAnyValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewAnyValue()
+ }
+ }
+ for i := range src {
+ CopyAnyValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *AnyValue) Reset() {
+ *orig = AnyValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *AnyValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ switch orig := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ dest.WriteObjectField("stringValue")
+ dest.WriteString(orig.StringValue)
+ case *AnyValue_BoolValue:
+ dest.WriteObjectField("boolValue")
+ dest.WriteBool(orig.BoolValue)
+ case *AnyValue_IntValue:
+ dest.WriteObjectField("intValue")
+ dest.WriteInt64(orig.IntValue)
+ case *AnyValue_DoubleValue:
+ dest.WriteObjectField("doubleValue")
+ dest.WriteFloat64(orig.DoubleValue)
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ dest.WriteObjectField("arrayValue")
+ orig.ArrayValue.MarshalJSON(dest)
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ dest.WriteObjectField("kvlistValue")
+ orig.KvlistValue.MarshalJSON(dest)
+ }
+ case *AnyValue_BytesValue:
+
+ dest.WriteObjectField("bytesValue")
+ dest.WriteBytes(orig.BytesValue)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *AnyValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+
+ case "stringValue", "string_value":
+ {
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = iter.ReadString()
+ orig.Value = ov
+ }
+
+ case "boolValue", "bool_value":
+ {
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = iter.ReadBool()
+ orig.Value = ov
+ }
+
+ case "intValue", "int_value":
+ {
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = iter.ReadInt64()
+ orig.Value = ov
+ }
+
+ case "doubleValue", "double_value":
+ {
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = iter.ReadFloat64()
+ orig.Value = ov
+ }
+
+ case "arrayValue", "array_value":
+ {
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ ov.ArrayValue.UnmarshalJSON(iter)
+ orig.Value = ov
+ }
+
+ case "kvlistValue", "kvlist_value":
+ {
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ ov.KvlistValue.UnmarshalJSON(iter)
+ orig.Value = ov
+ }
+
+ case "bytesValue", "bytes_value":
+ {
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ ov.BytesValue = iter.ReadBytes()
+ orig.Value = ov
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *AnyValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ switch orig := orig.Value.(type) {
+ case nil:
+ _ = orig
+ break
+ case *AnyValue_StringValue:
+ l = len(orig.StringValue)
+ n += 1 + proto.Sov(uint64(l)) + l
+ case *AnyValue_BoolValue:
+ n += 2
+ case *AnyValue_IntValue:
+ n += 1 + proto.Sov(uint64(orig.IntValue))
+ case *AnyValue_DoubleValue:
+ n += 9
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ l = orig.ArrayValue.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ l = orig.KvlistValue.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *AnyValue_BytesValue:
+ l = len(orig.BytesValue)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *AnyValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ switch orig := orig.Value.(type) {
+ case *AnyValue_StringValue:
+ l = len(orig.StringValue)
+ pos -= l
+ copy(buf[pos:], orig.StringValue)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ case *AnyValue_BoolValue:
+ pos--
+ if orig.BoolValue {
+ buf[pos] = 1
+ } else {
+ buf[pos] = 0
+ }
+ pos--
+ buf[pos] = 0x10
+
+ case *AnyValue_IntValue:
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue))
+ pos--
+ buf[pos] = 0x18
+
+ case *AnyValue_DoubleValue:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue))
+ pos--
+ buf[pos] = 0x21
+
+ case *AnyValue_ArrayValue:
+ if orig.ArrayValue != nil {
+ l = orig.ArrayValue.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *AnyValue_KvlistValue:
+ if orig.KvlistValue != nil {
+ l = orig.KvlistValue.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ case *AnyValue_BytesValue:
+ l = len(orig.BytesValue)
+ pos -= l
+ copy(buf[pos:], orig.BytesValue)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+
+ }
+ return len(buf) - pos
+}
+
+func (orig *AnyValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_StringValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_StringValue{}
+ } else {
+ ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
+ }
+ ov.StringValue = string(buf[startPos:pos])
+ orig.Value = ov
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_BoolValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BoolValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+ }
+ ov.BoolValue = num != 0
+ orig.Value = ov
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_IntValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_IntValue{}
+ } else {
+ ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
+ }
+ ov.IntValue = int64(num)
+ orig.Value = ov
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *AnyValue_DoubleValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_DoubleValue{}
+ } else {
+ ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+ }
+ ov.DoubleValue = math.Float64frombits(num)
+ orig.Value = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_ArrayValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_ArrayValue{}
+ } else {
+ ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+ }
+ ov.ArrayValue = NewArrayValue()
+ err = ov.ArrayValue.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Value = ov
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_KvlistValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_KvlistValue{}
+ } else {
+ ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
+ }
+ ov.KvlistValue = NewKeyValueList()
+ err = ov.KvlistValue.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Value = ov
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *AnyValue_BytesValue
+ if !UseProtoPooling.IsEnabled() {
+ ov = &AnyValue_BytesValue{}
+ } else {
+ ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+ }
+ if length != 0 {
+ ov.BytesValue = make([]byte, length)
+ copy(ov.BytesValue, buf[startPos:pos])
+ }
+ orig.Value = ov
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestAnyValue() *AnyValue {
+ orig := NewAnyValue()
+ orig.Value = &AnyValue_StringValue{StringValue: "test_stringvalue"}
+ return orig
+}
+
+func GenTestAnyValuePtrSlice() []*AnyValue {
+ orig := make([]*AnyValue, 5)
+ orig[0] = NewAnyValue()
+ orig[1] = GenTestAnyValue()
+ orig[2] = NewAnyValue()
+ orig[3] = GenTestAnyValue()
+ orig[4] = NewAnyValue()
+ return orig
+}
+
+func GenTestAnyValueSlice() []AnyValue {
+ orig := make([]AnyValue, 5)
+ orig[1] = *GenTestAnyValue()
+ orig[3] = *GenTestAnyValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go
new file mode 100644
index 00000000000..a4367eac263
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_arrayvalue.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+ Values []AnyValue
+}
+
+var (
+ protoPoolArrayValue = sync.Pool{
+ New: func() any {
+ return &ArrayValue{}
+ },
+ }
+)
+
+func NewArrayValue() *ArrayValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &ArrayValue{}
+ }
+ return protoPoolArrayValue.Get().(*ArrayValue)
+}
+
+func DeleteArrayValue(orig *ArrayValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Values {
+ DeleteAnyValue(&orig.Values[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolArrayValue.Put(orig)
+ }
+}
+
+func CopyArrayValue(dest, src *ArrayValue) *ArrayValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewArrayValue()
+ }
+ dest.Values = CopyAnyValueSlice(dest.Values, src.Values)
+
+ return dest
+}
+
+func CopyArrayValueSlice(dest, src []ArrayValue) []ArrayValue {
+ var newDest []ArrayValue
+ if cap(dest) < len(src) {
+ newDest = make([]ArrayValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteArrayValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyArrayValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyArrayValuePtrSlice(dest, src []*ArrayValue) []*ArrayValue {
+ var newDest []*ArrayValue
+ if cap(dest) < len(src) {
+ newDest = make([]*ArrayValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewArrayValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteArrayValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewArrayValue()
+ }
+ }
+ for i := range src {
+ CopyArrayValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ArrayValue) Reset() {
+ *orig = ArrayValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ArrayValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ orig.Values[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ orig.Values[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ArrayValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, AnyValue{})
+ orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ArrayValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Values {
+ l = orig.Values[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ArrayValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Values) - 1; i >= 0; i-- {
+ l = orig.Values[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ArrayValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Values = append(orig.Values, AnyValue{})
+ err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestArrayValue() *ArrayValue {
+ orig := NewArrayValue()
+ orig.Values = []AnyValue{{}, *GenTestAnyValue()}
+ return orig
+}
+
+func GenTestArrayValuePtrSlice() []*ArrayValue {
+ orig := make([]*ArrayValue, 5)
+ orig[0] = NewArrayValue()
+ orig[1] = GenTestArrayValue()
+ orig[2] = NewArrayValue()
+ orig[3] = GenTestArrayValue()
+ orig[4] = NewArrayValue()
+ return orig
+}
+
+func GenTestArrayValueSlice() []ArrayValue {
+ orig := make([]ArrayValue, 5)
+ orig[1] = *GenTestArrayValue()
+ orig[3] = *GenTestArrayValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go
new file mode 100644
index 00000000000..47c562e6813
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_entityref.go
@@ -0,0 +1,346 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type EntityRef struct {
+ SchemaUrl string
+ Type string
+ IdKeys []string
+ DescriptionKeys []string
+}
+
+var (
+ protoPoolEntityRef = sync.Pool{
+ New: func() any {
+ return &EntityRef{}
+ },
+ }
+)
+
+func NewEntityRef() *EntityRef {
+ if !UseProtoPooling.IsEnabled() {
+ return &EntityRef{}
+ }
+ return protoPoolEntityRef.Get().(*EntityRef)
+}
+
+func DeleteEntityRef(orig *EntityRef, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolEntityRef.Put(orig)
+ }
+}
+
+func CopyEntityRef(dest, src *EntityRef) *EntityRef {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewEntityRef()
+ }
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.Type = src.Type
+
+ dest.IdKeys = append(dest.IdKeys[:0], src.IdKeys...)
+ dest.DescriptionKeys = append(dest.DescriptionKeys[:0], src.DescriptionKeys...)
+
+ return dest
+}
+
+func CopyEntityRefSlice(dest, src []EntityRef) []EntityRef {
+ var newDest []EntityRef
+ if cap(dest) < len(src) {
+ newDest = make([]EntityRef, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteEntityRef(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyEntityRef(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyEntityRefPtrSlice(dest, src []*EntityRef) []*EntityRef {
+ var newDest []*EntityRef
+ if cap(dest) < len(src) {
+ newDest = make([]*EntityRef, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewEntityRef()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteEntityRef(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewEntityRef()
+ }
+ }
+ for i := range src {
+ CopyEntityRef(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *EntityRef) Reset() {
+ *orig = EntityRef{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *EntityRef) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if orig.Type != "" {
+ dest.WriteObjectField("type")
+ dest.WriteString(orig.Type)
+ }
+ if len(orig.IdKeys) > 0 {
+ dest.WriteObjectField("idKeys")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.IdKeys[0])
+ for i := 1; i < len(orig.IdKeys); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.IdKeys[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.DescriptionKeys) > 0 {
+ dest.WriteObjectField("descriptionKeys")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.DescriptionKeys[0])
+ for i := 1; i < len(orig.DescriptionKeys); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.DescriptionKeys[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *EntityRef) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "type":
+ orig.Type = iter.ReadString()
+ case "idKeys", "id_keys":
+ for iter.ReadArray() {
+ orig.IdKeys = append(orig.IdKeys, iter.ReadString())
+ }
+
+ case "descriptionKeys", "description_keys":
+ for iter.ReadArray() {
+ orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *EntityRef) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Type)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.IdKeys {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.DescriptionKeys {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *EntityRef) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Type)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Type)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.IdKeys) - 1; i >= 0; i-- {
+ l = len(orig.IdKeys[i])
+ pos -= l
+ copy(buf[pos:], orig.IdKeys[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- {
+ l = len(orig.DescriptionKeys[i])
+ pos -= l
+ copy(buf[pos:], orig.DescriptionKeys[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ return len(buf) - pos
+}
+
+func (orig *EntityRef) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Type = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos]))
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos]))
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestEntityRef() *EntityRef {
+ orig := NewEntityRef()
+ orig.SchemaUrl = "test_schemaurl"
+ orig.Type = "test_type"
+ orig.IdKeys = []string{"", "test_idkeys"}
+ orig.DescriptionKeys = []string{"", "test_descriptionkeys"}
+ return orig
+}
+
+func GenTestEntityRefPtrSlice() []*EntityRef {
+ orig := make([]*EntityRef, 5)
+ orig[0] = NewEntityRef()
+ orig[1] = GenTestEntityRef()
+ orig[2] = NewEntityRef()
+ orig[3] = GenTestEntityRef()
+ orig[4] = NewEntityRef()
+ return orig
+}
+
+func GenTestEntityRefSlice() []EntityRef {
+ orig := make([]EntityRef, 5)
+ orig[1] = *GenTestEntityRef()
+ orig[3] = *GenTestEntityRef()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go
new file mode 100644
index 00000000000..29223128d0d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exemplar.go
@@ -0,0 +1,523 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *Exemplar) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Exemplar_AsDouble struct {
+ AsDouble float64
+}
+
+func (m *Exemplar) GetAsDouble() float64 {
+ if v, ok := m.GetValue().(*Exemplar_AsDouble); ok {
+ return v.AsDouble
+ }
+ return float64(0)
+}
+
+type Exemplar_AsInt struct {
+ AsInt int64
+}
+
+func (m *Exemplar) GetAsInt() int64 {
+ if v, ok := m.GetValue().(*Exemplar_AsInt); ok {
+ return v.AsInt
+ }
+ return int64(0)
+}
+
+// Exemplar is a sample input double measurement.
+//
+// Exemplars also hold information about the environment when the measurement was recorded,
+// for example the span and trace ID of the active span when the exemplar was recorded.
+type Exemplar struct {
+ FilteredAttributes []KeyValue
+ TimeUnixNano uint64
+ Value any
+ TraceId TraceID
+ SpanId SpanID
+}
+
+var (
+ protoPoolExemplar = sync.Pool{
+ New: func() any {
+ return &Exemplar{}
+ },
+ }
+
+ ProtoPoolExemplar_AsDouble = sync.Pool{
+ New: func() any {
+ return &Exemplar_AsDouble{}
+ },
+ }
+
+ ProtoPoolExemplar_AsInt = sync.Pool{
+ New: func() any {
+ return &Exemplar_AsInt{}
+ },
+ }
+)
+
+func NewExemplar() *Exemplar {
+ if !UseProtoPooling.IsEnabled() {
+ return &Exemplar{}
+ }
+ return protoPoolExemplar.Get().(*Exemplar)
+}
+
+func DeleteExemplar(orig *Exemplar, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.FilteredAttributes {
+ DeleteKeyValue(&orig.FilteredAttributes[i], false)
+ }
+ switch ov := orig.Value.(type) {
+ case *Exemplar_AsDouble:
+ if UseProtoPooling.IsEnabled() {
+ ov.AsDouble = float64(0)
+ ProtoPoolExemplar_AsDouble.Put(ov)
+ }
+ case *Exemplar_AsInt:
+ if UseProtoPooling.IsEnabled() {
+ ov.AsInt = int64(0)
+ ProtoPoolExemplar_AsInt.Put(ov)
+ }
+
+ }
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExemplar.Put(orig)
+ }
+}
+
+func CopyExemplar(dest, src *Exemplar) *Exemplar {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExemplar()
+ }
+ dest.FilteredAttributes = CopyKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes)
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ switch t := src.Value.(type) {
+ case *Exemplar_AsDouble:
+ var ov *Exemplar_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsDouble{}
+ } else {
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
+ }
+ ov.AsDouble = t.AsDouble
+ dest.Value = ov
+ case *Exemplar_AsInt:
+ var ov *Exemplar_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsInt{}
+ } else {
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
+ }
+ ov.AsInt = t.AsInt
+ dest.Value = ov
+ default:
+ dest.Value = nil
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ return dest
+}
+
+func CopyExemplarSlice(dest, src []Exemplar) []Exemplar {
+ var newDest []Exemplar
+ if cap(dest) < len(src) {
+ newDest = make([]Exemplar, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExemplar(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExemplar(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExemplarPtrSlice(dest, src []*Exemplar) []*Exemplar {
+ var newDest []*Exemplar
+ if cap(dest) < len(src) {
+ newDest = make([]*Exemplar, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExemplar()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExemplar(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExemplar()
+ }
+ }
+ for i := range src {
+ CopyExemplar(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Exemplar) Reset() {
+ *orig = Exemplar{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Exemplar) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.FilteredAttributes) > 0 {
+ dest.WriteObjectField("filteredAttributes")
+ dest.WriteArrayStart()
+ orig.FilteredAttributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.FilteredAttributes); i++ {
+ dest.WriteMore()
+ orig.FilteredAttributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ switch orig := orig.Value.(type) {
+ case *Exemplar_AsDouble:
+ dest.WriteObjectField("asDouble")
+ dest.WriteFloat64(orig.AsDouble)
+ case *Exemplar_AsInt:
+ dest.WriteObjectField("asInt")
+ dest.WriteInt64(orig.AsInt)
+ }
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Exemplar) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "filteredAttributes", "filtered_attributes":
+ for iter.ReadArray() {
+ orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
+ orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+
+ case "asDouble", "as_double":
+ {
+ var ov *Exemplar_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsDouble{}
+ } else {
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
+ }
+ ov.AsDouble = iter.ReadFloat64()
+ orig.Value = ov
+ }
+
+ case "asInt", "as_int":
+ {
+ var ov *Exemplar_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsInt{}
+ } else {
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
+ }
+ ov.AsInt = iter.ReadInt64()
+ orig.Value = ov
+ }
+
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Exemplar) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.FilteredAttributes {
+ l = orig.FilteredAttributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ switch orig := orig.Value.(type) {
+ case nil:
+ _ = orig
+ break
+ case *Exemplar_AsDouble:
+ n += 9
+ case *Exemplar_AsInt:
+ n += 9
+ }
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *Exemplar) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- {
+ l = orig.FilteredAttributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x11
+ }
+ switch orig := orig.Value.(type) {
+ case *Exemplar_AsDouble:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
+ pos--
+ buf[pos] = 0x19
+
+ case *Exemplar_AsInt:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
+ pos--
+ buf[pos] = 0x31
+
+ }
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+
+ return len(buf) - pos
+}
+
+func (orig *Exemplar) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
+ err = orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *Exemplar_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsDouble{}
+ } else {
+ ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
+ }
+ ov.AsDouble = math.Float64frombits(num)
+ orig.Value = ov
+
+ case 6:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *Exemplar_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Exemplar_AsInt{}
+ } else {
+ ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
+ }
+ ov.AsInt = int64(num)
+ orig.Value = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExemplar() *Exemplar {
+ orig := NewExemplar()
+ orig.FilteredAttributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.TimeUnixNano = uint64(13)
+ orig.Value = &Exemplar_AsDouble{AsDouble: float64(3.1415926)}
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ return orig
+}
+
+func GenTestExemplarPtrSlice() []*Exemplar {
+ orig := make([]*Exemplar, 5)
+ orig[0] = NewExemplar()
+ orig[1] = GenTestExemplar()
+ orig[2] = NewExemplar()
+ orig[3] = GenTestExemplar()
+ orig[4] = NewExemplar()
+ return orig
+}
+
+func GenTestExemplarSlice() []Exemplar {
+ orig := make([]Exemplar, 5)
+ orig[1] = *GenTestExemplar()
+ orig[3] = *GenTestExemplar()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go
new file mode 100644
index 00000000000..926816346a9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogram.go
@@ -0,0 +1,277 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+type ExponentialHistogram struct {
+ DataPoints []*ExponentialHistogramDataPoint
+ AggregationTemporality AggregationTemporality
+}
+
+var (
+ protoPoolExponentialHistogram = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogram{}
+ },
+ }
+)
+
+func NewExponentialHistogram() *ExponentialHistogram {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExponentialHistogram{}
+ }
+ return protoPoolExponentialHistogram.Get().(*ExponentialHistogram)
+}
+
+func DeleteExponentialHistogram(orig *ExponentialHistogram, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteExponentialHistogramDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExponentialHistogram.Put(orig)
+ }
+}
+
+func CopyExponentialHistogram(dest, src *ExponentialHistogram) *ExponentialHistogram {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogram()
+ }
+ dest.DataPoints = CopyExponentialHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
+}
+
+func CopyExponentialHistogramSlice(dest, src []ExponentialHistogram) []ExponentialHistogram {
+ var newDest []ExponentialHistogram
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogram, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogram(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogram(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExponentialHistogramPtrSlice(dest, src []*ExponentialHistogram) []*ExponentialHistogram {
+ var newDest []*ExponentialHistogram
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogram, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogram()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogram(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogram()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogram(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogram) Reset() {
+ *orig = ExponentialHistogram{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogram) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogram) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExponentialHistogram) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ return n
+}
+
+func (orig *ExponentialHistogram) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x10
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExponentialHistogram) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExponentialHistogram() *ExponentialHistogram {
+ orig := NewExponentialHistogram()
+ orig.DataPoints = []*ExponentialHistogramDataPoint{{}, GenTestExponentialHistogramDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ return orig
+}
+
+func GenTestExponentialHistogramPtrSlice() []*ExponentialHistogram {
+ orig := make([]*ExponentialHistogram, 5)
+ orig[0] = NewExponentialHistogram()
+ orig[1] = GenTestExponentialHistogram()
+ orig[2] = NewExponentialHistogram()
+ orig[3] = GenTestExponentialHistogram()
+ orig[4] = NewExponentialHistogram()
+ return orig
+}
+
+func GenTestExponentialHistogramSlice() []ExponentialHistogram {
+ orig := make([]ExponentialHistogram, 5)
+ orig[1] = *GenTestExponentialHistogram()
+ orig[3] = *GenTestExponentialHistogram()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go
new file mode 100644
index 00000000000..d16c0120ebd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapoint.go
@@ -0,0 +1,858 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *ExponentialHistogramDataPoint) GetSum_() any {
+ if m != nil {
+ return m.Sum_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Sum struct {
+ Sum float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetSum() float64 {
+ if v, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
+ return v.Sum
+ }
+ return float64(0)
+}
+
+func (m *ExponentialHistogramDataPoint) GetMin_() any {
+ if m != nil {
+ return m.Min_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Min struct {
+ Min float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetMin() float64 {
+ if v, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
+ return v.Min
+ }
+ return float64(0)
+}
+
+func (m *ExponentialHistogramDataPoint) GetMax_() any {
+ if m != nil {
+ return m.Max_
+ }
+ return nil
+}
+
+type ExponentialHistogramDataPoint_Max struct {
+ Max float64
+}
+
+func (m *ExponentialHistogramDataPoint) GetMax() float64 {
+ if v, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
+ return v.Max
+ }
+ return float64(0)
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+type ExponentialHistogramDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum_ any
+ Scale int32
+ ZeroCount uint64
+ Positive ExponentialHistogramDataPointBuckets
+ Negative ExponentialHistogramDataPointBuckets
+ Flags uint32
+ Exemplars []Exemplar
+ Min_ any
+ Max_ any
+ ZeroThreshold float64
+}
+
+var (
+ protoPoolExponentialHistogramDataPoint = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPoint{}
+ },
+ }
+ ProtoPoolExponentialHistogramDataPoint_Sum = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPoint_Sum{}
+ },
+ }
+
+ ProtoPoolExponentialHistogramDataPoint_Min = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPoint_Min{}
+ },
+ }
+
+ ProtoPoolExponentialHistogramDataPoint_Max = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPoint_Max{}
+ },
+ }
+)
+
+func NewExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExponentialHistogramDataPoint{}
+ }
+ return protoPoolExponentialHistogramDataPoint.Get().(*ExponentialHistogramDataPoint)
+}
+
+func DeleteExponentialHistogramDataPoint(orig *ExponentialHistogramDataPoint, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ switch ov := orig.Sum_.(type) {
+ case *ExponentialHistogramDataPoint_Sum:
+ if UseProtoPooling.IsEnabled() {
+ ov.Sum = float64(0)
+ ProtoPoolExponentialHistogramDataPoint_Sum.Put(ov)
+ }
+
+ }
+ DeleteExponentialHistogramDataPointBuckets(&orig.Positive, false)
+ DeleteExponentialHistogramDataPointBuckets(&orig.Negative, false)
+ for i := range orig.Exemplars {
+ DeleteExemplar(&orig.Exemplars[i], false)
+ }
+ switch ov := orig.Min_.(type) {
+ case *ExponentialHistogramDataPoint_Min:
+ if UseProtoPooling.IsEnabled() {
+ ov.Min = float64(0)
+ ProtoPoolExponentialHistogramDataPoint_Min.Put(ov)
+ }
+
+ }
+ switch ov := orig.Max_.(type) {
+ case *ExponentialHistogramDataPoint_Max:
+ if UseProtoPooling.IsEnabled() {
+ ov.Max = float64(0)
+ ProtoPoolExponentialHistogramDataPoint_Max.Put(ov)
+ }
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExponentialHistogramDataPoint.Put(orig)
+ }
+}
+
+func CopyExponentialHistogramDataPoint(dest, src *ExponentialHistogramDataPoint) *ExponentialHistogramDataPoint {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogramDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.StartTimeUnixNano = src.StartTimeUnixNano
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.Count = src.Count
+
+ switch t := src.Sum_.(type) {
+ case *ExponentialHistogramDataPoint_Sum:
+ var ov *ExponentialHistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
+ }
+ ov.Sum = t.Sum
+ dest.Sum_ = ov
+ default:
+ dest.Sum_ = nil
+ }
+
+ dest.Scale = src.Scale
+
+ dest.ZeroCount = src.ZeroCount
+
+ CopyExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive)
+
+ CopyExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative)
+
+ dest.Flags = src.Flags
+
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
+ switch t := src.Min_.(type) {
+ case *ExponentialHistogramDataPoint_Min:
+ var ov *ExponentialHistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
+ }
+ ov.Min = t.Min
+ dest.Min_ = ov
+ default:
+ dest.Min_ = nil
+ }
+
+ switch t := src.Max_.(type) {
+ case *ExponentialHistogramDataPoint_Max:
+ var ov *ExponentialHistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
+ }
+ ov.Max = t.Max
+ dest.Max_ = ov
+ default:
+ dest.Max_ = nil
+ }
+
+ dest.ZeroThreshold = src.ZeroThreshold
+
+ return dest
+}
+
+func CopyExponentialHistogramDataPointSlice(dest, src []ExponentialHistogramDataPoint) []ExponentialHistogramDataPoint {
+ var newDest []ExponentialHistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogramDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExponentialHistogramDataPointPtrSlice(dest, src []*ExponentialHistogramDataPoint) []*ExponentialHistogramDataPoint {
+ var newDest []*ExponentialHistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogramDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPoint()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogramDataPoint) Reset() {
+ *orig = ExponentialHistogramDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogramDataPoint) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.StartTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("startTimeUnixNano")
+ dest.WriteUint64(orig.StartTimeUnixNano)
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.Count != uint64(0) {
+ dest.WriteObjectField("count")
+ dest.WriteUint64(orig.Count)
+ }
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
+ dest.WriteObjectField("sum")
+ dest.WriteFloat64(orig.Sum)
+ }
+ if orig.Scale != int32(0) {
+ dest.WriteObjectField("scale")
+ dest.WriteInt32(orig.Scale)
+ }
+ if orig.ZeroCount != uint64(0) {
+ dest.WriteObjectField("zeroCount")
+ dest.WriteUint64(orig.ZeroCount)
+ }
+ dest.WriteObjectField("positive")
+ orig.Positive.MarshalJSON(dest)
+ dest.WriteObjectField("negative")
+ orig.Negative.MarshalJSON(dest)
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ if len(orig.Exemplars) > 0 {
+ dest.WriteObjectField("exemplars")
+ dest.WriteArrayStart()
+ orig.Exemplars[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Exemplars); i++ {
+ dest.WriteMore()
+ orig.Exemplars[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
+ dest.WriteObjectField("min")
+ dest.WriteFloat64(orig.Min)
+ }
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
+ dest.WriteObjectField("max")
+ dest.WriteFloat64(orig.Max)
+ }
+ if orig.ZeroThreshold != float64(0) {
+ dest.WriteObjectField("zeroThreshold")
+ dest.WriteFloat64(orig.ZeroThreshold)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "startTimeUnixNano", "start_time_unix_nano":
+ orig.StartTimeUnixNano = iter.ReadUint64()
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "count":
+ orig.Count = iter.ReadUint64()
+ case "sum":
+ {
+ var ov *ExponentialHistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
+ }
+ ov.Sum = iter.ReadFloat64()
+ orig.Sum_ = ov
+ }
+
+ case "scale":
+ orig.Scale = iter.ReadInt32()
+ case "zeroCount", "zero_count":
+ orig.ZeroCount = iter.ReadUint64()
+ case "positive":
+
+ orig.Positive.UnmarshalJSON(iter)
+ case "negative":
+
+ orig.Negative.UnmarshalJSON(iter)
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ case "exemplars":
+ for iter.ReadArray() {
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
+ }
+
+ case "min":
+ {
+ var ov *ExponentialHistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
+ }
+ ov.Min = iter.ReadFloat64()
+ orig.Min_ = ov
+ }
+
+ case "max":
+ {
+ var ov *ExponentialHistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
+ }
+ ov.Max = iter.ReadFloat64()
+ orig.Max_ = ov
+ }
+
+ case "zeroThreshold", "zero_threshold":
+ orig.ZeroThreshold = iter.ReadFloat64()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExponentialHistogramDataPoint) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.StartTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.Count != 0 {
+ n += 9
+ }
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
+ _ = orig
+ n += 9
+ }
+ if orig.Scale != 0 {
+ n += 1 + proto.Soz(uint64(orig.Scale))
+ }
+ if orig.ZeroCount != 0 {
+ n += 9
+ }
+ l = orig.Positive.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.Negative.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.Flags != 0 {
+ n += 1 + proto.Sov(uint64(orig.Flags))
+ }
+ for i := range orig.Exemplars {
+ l = orig.Exemplars[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
+ _ = orig
+ n += 9
+ }
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
+ _ = orig
+ n += 9
+ }
+ if orig.ZeroThreshold != 0 {
+ n += 9
+ }
+ return n
+}
+
+func (orig *ExponentialHistogramDataPoint) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.StartTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
+ pos--
+ buf[pos] = 0x11
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ if orig.Count != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
+ pos--
+ buf[pos] = 0x21
+ }
+ if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
+ pos--
+ buf[pos] = 0x29
+ }
+ if orig.Scale != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Scale)<<1)^uint32(orig.Scale>>31)))
+ pos--
+ buf[pos] = 0x30
+ }
+ if orig.ZeroCount != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ZeroCount))
+ pos--
+ buf[pos] = 0x39
+ }
+ l = orig.Positive.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x42
+
+ l = orig.Negative.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+
+ if orig.Flags != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
+ pos--
+ buf[pos] = 0x50
+ }
+ for i := len(orig.Exemplars) - 1; i >= 0; i-- {
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x5a
+ }
+ if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
+ pos--
+ buf[pos] = 0x61
+ }
+ if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
+ pos--
+ buf[pos] = 0x69
+ }
+ if orig.ZeroThreshold != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ZeroThreshold))
+ pos--
+ buf[pos] = 0x71
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExponentialHistogramDataPoint) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartTimeUnixNano = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Count = uint64(num)
+
+ case 5:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *ExponentialHistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
+ }
+ ov.Sum = math.Float64frombits(num)
+ orig.Sum_ = ov
+
+ case 6:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Scale = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
+
+ case 7:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.ZeroCount = uint64(num)
+
+ case 8:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Positive.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Negative.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 10:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+
+ case 11:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 12:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *ExponentialHistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
+ }
+ ov.Min = math.Float64frombits(num)
+ orig.Min_ = ov
+
+ case 13:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *ExponentialHistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &ExponentialHistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
+ }
+ ov.Max = math.Float64frombits(num)
+ orig.Max_ = ov
+
+ case 14:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.ZeroThreshold = math.Float64frombits(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
+ orig := NewExponentialHistogramDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum_ = &ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)}
+ orig.Scale = int32(13)
+ orig.ZeroCount = uint64(13)
+ orig.Positive = *GenTestExponentialHistogramDataPointBuckets()
+ orig.Negative = *GenTestExponentialHistogramDataPointBuckets()
+ orig.Flags = uint32(13)
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Min_ = &ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)}
+ orig.Max_ = &ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)}
+ orig.ZeroThreshold = float64(3.1415926)
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointPtrSlice() []*ExponentialHistogramDataPoint {
+ orig := make([]*ExponentialHistogramDataPoint, 5)
+ orig[0] = NewExponentialHistogramDataPoint()
+ orig[1] = GenTestExponentialHistogramDataPoint()
+ orig[2] = NewExponentialHistogramDataPoint()
+ orig[3] = GenTestExponentialHistogramDataPoint()
+ orig[4] = NewExponentialHistogramDataPoint()
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointSlice() []ExponentialHistogramDataPoint {
+ orig := make([]ExponentialHistogramDataPoint, 5)
+ orig[1] = *GenTestExponentialHistogramDataPoint()
+ orig[3] = *GenTestExponentialHistogramDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go
new file mode 100644
index 00000000000..459dfe0c38b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exponentialhistogramdatapointbuckets.go
@@ -0,0 +1,290 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts.
+type ExponentialHistogramDataPointBuckets struct {
+ Offset int32
+ BucketCounts []uint64
+}
+
+var (
+ protoPoolExponentialHistogramDataPointBuckets = sync.Pool{
+ New: func() any {
+ return &ExponentialHistogramDataPointBuckets{}
+ },
+ }
+)
+
+func NewExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExponentialHistogramDataPointBuckets{}
+ }
+ return protoPoolExponentialHistogramDataPointBuckets.Get().(*ExponentialHistogramDataPointBuckets)
+}
+
+func DeleteExponentialHistogramDataPointBuckets(orig *ExponentialHistogramDataPointBuckets, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExponentialHistogramDataPointBuckets.Put(orig)
+ }
+}
+
+func CopyExponentialHistogramDataPointBuckets(dest, src *ExponentialHistogramDataPointBuckets) *ExponentialHistogramDataPointBuckets {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExponentialHistogramDataPointBuckets()
+ }
+ dest.Offset = src.Offset
+
+ dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
+
+ return dest
+}
+
+func CopyExponentialHistogramDataPointBucketsSlice(dest, src []ExponentialHistogramDataPointBuckets) []ExponentialHistogramDataPointBuckets {
+ var newDest []ExponentialHistogramDataPointBuckets
+ if cap(dest) < len(src) {
+ newDest = make([]ExponentialHistogramDataPointBuckets, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPointBuckets(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPointBuckets(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExponentialHistogramDataPointBucketsPtrSlice(dest, src []*ExponentialHistogramDataPointBuckets) []*ExponentialHistogramDataPointBuckets {
+ var newDest []*ExponentialHistogramDataPointBuckets
+ if cap(dest) < len(src) {
+ newDest = make([]*ExponentialHistogramDataPointBuckets, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPointBuckets()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExponentialHistogramDataPointBuckets(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExponentialHistogramDataPointBuckets()
+ }
+ }
+ for i := range src {
+ CopyExponentialHistogramDataPointBuckets(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) Reset() {
+ *orig = ExponentialHistogramDataPointBuckets{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExponentialHistogramDataPointBuckets) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Offset != int32(0) {
+ dest.WriteObjectField("offset")
+ dest.WriteInt32(orig.Offset)
+ }
+ if len(orig.BucketCounts) > 0 {
+ dest.WriteObjectField("bucketCounts")
+ dest.WriteArrayStart()
+ dest.WriteUint64(orig.BucketCounts[0])
+ for i := 1; i < len(orig.BucketCounts); i++ {
+ dest.WriteMore()
+ dest.WriteUint64(orig.BucketCounts[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExponentialHistogramDataPointBuckets) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "offset":
+ orig.Offset = iter.ReadInt32()
+ case "bucketCounts", "bucket_counts":
+ for iter.ReadArray() {
+ orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.Offset != 0 {
+ n += 1 + proto.Soz(uint64(orig.Offset))
+ }
+ if len(orig.BucketCounts) > 0 {
+ l = 0
+ for _, e := range orig.BucketCounts {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.Offset != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31)))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.BucketCounts)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExponentialHistogramDataPointBuckets) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
+ case 2:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
+ orig := NewExponentialHistogramDataPointBuckets()
+ orig.Offset = int32(13)
+ orig.BucketCounts = []uint64{uint64(0), uint64(13)}
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointBucketsPtrSlice() []*ExponentialHistogramDataPointBuckets {
+ orig := make([]*ExponentialHistogramDataPointBuckets, 5)
+ orig[0] = NewExponentialHistogramDataPointBuckets()
+ orig[1] = GenTestExponentialHistogramDataPointBuckets()
+ orig[2] = NewExponentialHistogramDataPointBuckets()
+ orig[3] = GenTestExponentialHistogramDataPointBuckets()
+ orig[4] = NewExponentialHistogramDataPointBuckets()
+ return orig
+}
+
+func GenTestExponentialHistogramDataPointBucketsSlice() []ExponentialHistogramDataPointBuckets {
+ orig := make([]ExponentialHistogramDataPointBuckets, 5)
+ orig[1] = *GenTestExponentialHistogramDataPointBuckets()
+ orig[3] = *GenTestExponentialHistogramDataPointBuckets()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go
new file mode 100644
index 00000000000..f93525d3455
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogspartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportLogsPartialSuccess struct {
+ RejectedLogRecords int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportLogsPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportLogsPartialSuccess{}
+ },
+ }
+)
+
+func NewExportLogsPartialSuccess() *ExportLogsPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsPartialSuccess{}
+ }
+ return protoPoolExportLogsPartialSuccess.Get().(*ExportLogsPartialSuccess)
+}
+
+func DeleteExportLogsPartialSuccess(orig *ExportLogsPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportLogsPartialSuccess(dest, src *ExportLogsPartialSuccess) *ExportLogsPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsPartialSuccess()
+ }
+ dest.RejectedLogRecords = src.RejectedLogRecords
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportLogsPartialSuccessSlice(dest, src []ExportLogsPartialSuccess) []ExportLogsPartialSuccess {
+ var newDest []ExportLogsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsPartialSuccessPtrSlice(dest, src []*ExportLogsPartialSuccess) []*ExportLogsPartialSuccess {
+ var newDest []*ExportLogsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportLogsPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsPartialSuccess) Reset() {
+ *orig = ExportLogsPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedLogRecords != int64(0) {
+ dest.WriteObjectField("rejectedLogRecords")
+ dest.WriteInt64(orig.RejectedLogRecords)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedLogRecords", "rejected_log_records":
+ orig.RejectedLogRecords = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedLogRecords != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedLogRecords))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportLogsPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedLogRecords != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedLogRecords = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsPartialSuccess() *ExportLogsPartialSuccess {
+ orig := NewExportLogsPartialSuccess()
+ orig.RejectedLogRecords = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportLogsPartialSuccessPtrSlice() []*ExportLogsPartialSuccess {
+ orig := make([]*ExportLogsPartialSuccess, 5)
+ orig[0] = NewExportLogsPartialSuccess()
+ orig[1] = GenTestExportLogsPartialSuccess()
+ orig[2] = NewExportLogsPartialSuccess()
+ orig[3] = GenTestExportLogsPartialSuccess()
+ orig[4] = NewExportLogsPartialSuccess()
+ return orig
+}
+
+func GenTestExportLogsPartialSuccessSlice() []ExportLogsPartialSuccess {
+ orig := make([]ExportLogsPartialSuccess, 5)
+ orig[1] = *GenTestExportLogsPartialSuccess()
+ orig[3] = *GenTestExportLogsPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go
new file mode 100644
index 00000000000..1c849ec22db
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Logs is the top-level struct that is propagated through the logs pipeline.
+// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
+type ExportLogsServiceRequest struct {
+ ResourceLogs []*ResourceLogs
+}
+
+var (
+ protoPoolExportLogsServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportLogsServiceRequest{}
+ },
+ }
+)
+
+func NewExportLogsServiceRequest() *ExportLogsServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsServiceRequest{}
+ }
+ return protoPoolExportLogsServiceRequest.Get().(*ExportLogsServiceRequest)
+}
+
+func DeleteExportLogsServiceRequest(orig *ExportLogsServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceLogs {
+ DeleteResourceLogs(orig.ResourceLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportLogsServiceRequest(dest, src *ExportLogsServiceRequest) *ExportLogsServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsServiceRequest()
+ }
+ dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
+
+ return dest
+}
+
+func CopyExportLogsServiceRequestSlice(dest, src []ExportLogsServiceRequest) []ExportLogsServiceRequest {
+ var newDest []ExportLogsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsServiceRequestPtrSlice(dest, src []*ExportLogsServiceRequest) []*ExportLogsServiceRequest {
+ var newDest []*ExportLogsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsServiceRequest) Reset() {
+ *orig = ExportLogsServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceLogs) > 0 {
+ dest.WriteObjectField("resourceLogs")
+ dest.WriteArrayStart()
+ orig.ResourceLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceLogs); i++ {
+ dest.WriteMore()
+ orig.ResourceLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceLogs", "resource_logs":
+ for iter.ReadArray() {
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceLogs {
+ l = orig.ResourceLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportLogsServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
+ l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsServiceRequest() *ExportLogsServiceRequest {
+ orig := NewExportLogsServiceRequest()
+ orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
+ return orig
+}
+
+func GenTestExportLogsServiceRequestPtrSlice() []*ExportLogsServiceRequest {
+ orig := make([]*ExportLogsServiceRequest, 5)
+ orig[0] = NewExportLogsServiceRequest()
+ orig[1] = GenTestExportLogsServiceRequest()
+ orig[2] = NewExportLogsServiceRequest()
+ orig[3] = GenTestExportLogsServiceRequest()
+ orig[4] = NewExportLogsServiceRequest()
+ return orig
+}
+
+func GenTestExportLogsServiceRequestSlice() []ExportLogsServiceRequest {
+ orig := make([]ExportLogsServiceRequest, 5)
+ orig[1] = *GenTestExportLogsServiceRequest()
+ orig[3] = *GenTestExportLogsServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go
new file mode 100644
index 00000000000..7b78c73604d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportlogsserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportLogsServiceResponse struct {
+ PartialSuccess ExportLogsPartialSuccess
+}
+
+var (
+ protoPoolExportLogsServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportLogsServiceResponse{}
+ },
+ }
+)
+
+func NewExportLogsServiceResponse() *ExportLogsServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportLogsServiceResponse{}
+ }
+ return protoPoolExportLogsServiceResponse.Get().(*ExportLogsServiceResponse)
+}
+
+func DeleteExportLogsServiceResponse(orig *ExportLogsServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportLogsPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportLogsServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportLogsServiceResponse(dest, src *ExportLogsServiceResponse) *ExportLogsServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportLogsServiceResponse()
+ }
+ CopyExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportLogsServiceResponseSlice(dest, src []ExportLogsServiceResponse) []ExportLogsServiceResponse {
+ var newDest []ExportLogsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportLogsServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportLogsServiceResponsePtrSlice(dest, src []*ExportLogsServiceResponse) []*ExportLogsServiceResponse {
+ var newDest []*ExportLogsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportLogsServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportLogsServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportLogsServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportLogsServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportLogsServiceResponse) Reset() {
+ *orig = ExportLogsServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportLogsServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportLogsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportLogsServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportLogsServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportLogsServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportLogsServiceResponse() *ExportLogsServiceResponse {
+ orig := NewExportLogsServiceResponse()
+ orig.PartialSuccess = *GenTestExportLogsPartialSuccess()
+ return orig
+}
+
+func GenTestExportLogsServiceResponsePtrSlice() []*ExportLogsServiceResponse {
+ orig := make([]*ExportLogsServiceResponse, 5)
+ orig[0] = NewExportLogsServiceResponse()
+ orig[1] = GenTestExportLogsServiceResponse()
+ orig[2] = NewExportLogsServiceResponse()
+ orig[3] = GenTestExportLogsServiceResponse()
+ orig[4] = NewExportLogsServiceResponse()
+ return orig
+}
+
+func GenTestExportLogsServiceResponseSlice() []ExportLogsServiceResponse {
+ orig := make([]ExportLogsServiceResponse, 5)
+ orig[1] = *GenTestExportLogsServiceResponse()
+ orig[3] = *GenTestExportLogsServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go
new file mode 100644
index 00000000000..ff18f954c52
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricspartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportMetricsPartialSuccess struct {
+ RejectedDataPoints int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportMetricsPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportMetricsPartialSuccess{}
+ },
+ }
+)
+
+func NewExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsPartialSuccess{}
+ }
+ return protoPoolExportMetricsPartialSuccess.Get().(*ExportMetricsPartialSuccess)
+}
+
+func DeleteExportMetricsPartialSuccess(orig *ExportMetricsPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportMetricsPartialSuccess(dest, src *ExportMetricsPartialSuccess) *ExportMetricsPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsPartialSuccess()
+ }
+ dest.RejectedDataPoints = src.RejectedDataPoints
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportMetricsPartialSuccessSlice(dest, src []ExportMetricsPartialSuccess) []ExportMetricsPartialSuccess {
+ var newDest []ExportMetricsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsPartialSuccessPtrSlice(dest, src []*ExportMetricsPartialSuccess) []*ExportMetricsPartialSuccess {
+ var newDest []*ExportMetricsPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsPartialSuccess) Reset() {
+ *orig = ExportMetricsPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedDataPoints != int64(0) {
+ dest.WriteObjectField("rejectedDataPoints")
+ dest.WriteInt64(orig.RejectedDataPoints)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedDataPoints", "rejected_data_points":
+ orig.RejectedDataPoints = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedDataPoints != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedDataPoints))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportMetricsPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedDataPoints != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedDataPoints = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
+ orig := NewExportMetricsPartialSuccess()
+ orig.RejectedDataPoints = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportMetricsPartialSuccessPtrSlice() []*ExportMetricsPartialSuccess {
+ orig := make([]*ExportMetricsPartialSuccess, 5)
+ orig[0] = NewExportMetricsPartialSuccess()
+ orig[1] = GenTestExportMetricsPartialSuccess()
+ orig[2] = NewExportMetricsPartialSuccess()
+ orig[3] = GenTestExportMetricsPartialSuccess()
+ orig[4] = NewExportMetricsPartialSuccess()
+ return orig
+}
+
+func GenTestExportMetricsPartialSuccessSlice() []ExportMetricsPartialSuccess {
+ orig := make([]ExportMetricsPartialSuccess, 5)
+ orig[1] = *GenTestExportMetricsPartialSuccess()
+ orig[3] = *GenTestExportMetricsPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go
new file mode 100644
index 00000000000..f4a4a0fa7b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Metrics is the top-level struct that is propagated through the metrics pipeline.
+// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
+type ExportMetricsServiceRequest struct {
+ ResourceMetrics []*ResourceMetrics
+}
+
+var (
+ protoPoolExportMetricsServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportMetricsServiceRequest{}
+ },
+ }
+)
+
+func NewExportMetricsServiceRequest() *ExportMetricsServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsServiceRequest{}
+ }
+ return protoPoolExportMetricsServiceRequest.Get().(*ExportMetricsServiceRequest)
+}
+
+func DeleteExportMetricsServiceRequest(orig *ExportMetricsServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceMetrics {
+ DeleteResourceMetrics(orig.ResourceMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportMetricsServiceRequest(dest, src *ExportMetricsServiceRequest) *ExportMetricsServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsServiceRequest()
+ }
+ dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
+
+ return dest
+}
+
+func CopyExportMetricsServiceRequestSlice(dest, src []ExportMetricsServiceRequest) []ExportMetricsServiceRequest {
+ var newDest []ExportMetricsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsServiceRequestPtrSlice(dest, src []*ExportMetricsServiceRequest) []*ExportMetricsServiceRequest {
+ var newDest []*ExportMetricsServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsServiceRequest) Reset() {
+ *orig = ExportMetricsServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceMetrics) > 0 {
+ dest.WriteObjectField("resourceMetrics")
+ dest.WriteArrayStart()
+ orig.ResourceMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceMetrics); i++ {
+ dest.WriteMore()
+ orig.ResourceMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceMetrics", "resource_metrics":
+ for iter.ReadArray() {
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceMetrics {
+ l = orig.ResourceMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportMetricsServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
+ l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsServiceRequest() *ExportMetricsServiceRequest {
+ orig := NewExportMetricsServiceRequest()
+ orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
+ return orig
+}
+
+func GenTestExportMetricsServiceRequestPtrSlice() []*ExportMetricsServiceRequest {
+ orig := make([]*ExportMetricsServiceRequest, 5)
+ orig[0] = NewExportMetricsServiceRequest()
+ orig[1] = GenTestExportMetricsServiceRequest()
+ orig[2] = NewExportMetricsServiceRequest()
+ orig[3] = GenTestExportMetricsServiceRequest()
+ orig[4] = NewExportMetricsServiceRequest()
+ return orig
+}
+
+func GenTestExportMetricsServiceRequestSlice() []ExportMetricsServiceRequest {
+ orig := make([]ExportMetricsServiceRequest, 5)
+ orig[1] = *GenTestExportMetricsServiceRequest()
+ orig[3] = *GenTestExportMetricsServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go
new file mode 100644
index 00000000000..19e69384ed6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportmetricsserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportMetricsServiceResponse struct {
+ PartialSuccess ExportMetricsPartialSuccess
+}
+
+var (
+ protoPoolExportMetricsServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportMetricsServiceResponse{}
+ },
+ }
+)
+
+func NewExportMetricsServiceResponse() *ExportMetricsServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportMetricsServiceResponse{}
+ }
+ return protoPoolExportMetricsServiceResponse.Get().(*ExportMetricsServiceResponse)
+}
+
+func DeleteExportMetricsServiceResponse(orig *ExportMetricsServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportMetricsPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportMetricsServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportMetricsServiceResponse(dest, src *ExportMetricsServiceResponse) *ExportMetricsServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportMetricsServiceResponse()
+ }
+ CopyExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportMetricsServiceResponseSlice(dest, src []ExportMetricsServiceResponse) []ExportMetricsServiceResponse {
+ var newDest []ExportMetricsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportMetricsServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportMetricsServiceResponsePtrSlice(dest, src []*ExportMetricsServiceResponse) []*ExportMetricsServiceResponse {
+ var newDest []*ExportMetricsServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportMetricsServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportMetricsServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportMetricsServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportMetricsServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportMetricsServiceResponse) Reset() {
+ *orig = ExportMetricsServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportMetricsServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportMetricsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportMetricsServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportMetricsServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportMetricsServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportMetricsServiceResponse() *ExportMetricsServiceResponse {
+ orig := NewExportMetricsServiceResponse()
+ orig.PartialSuccess = *GenTestExportMetricsPartialSuccess()
+ return orig
+}
+
+func GenTestExportMetricsServiceResponsePtrSlice() []*ExportMetricsServiceResponse {
+ orig := make([]*ExportMetricsServiceResponse, 5)
+ orig[0] = NewExportMetricsServiceResponse()
+ orig[1] = GenTestExportMetricsServiceResponse()
+ orig[2] = NewExportMetricsServiceResponse()
+ orig[3] = GenTestExportMetricsServiceResponse()
+ orig[4] = NewExportMetricsServiceResponse()
+ return orig
+}
+
+func GenTestExportMetricsServiceResponseSlice() []ExportMetricsServiceResponse {
+ orig := make([]ExportMetricsServiceResponse, 5)
+ orig[1] = *GenTestExportMetricsServiceResponse()
+ orig[3] = *GenTestExportMetricsServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go
new file mode 100644
index 00000000000..03a20a62483
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilespartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportProfilesPartialSuccess struct {
+ RejectedProfiles int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportProfilesPartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportProfilesPartialSuccess{}
+ },
+ }
+)
+
+func NewExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesPartialSuccess{}
+ }
+ return protoPoolExportProfilesPartialSuccess.Get().(*ExportProfilesPartialSuccess)
+}
+
+func DeleteExportProfilesPartialSuccess(orig *ExportProfilesPartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesPartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportProfilesPartialSuccess(dest, src *ExportProfilesPartialSuccess) *ExportProfilesPartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesPartialSuccess()
+ }
+ dest.RejectedProfiles = src.RejectedProfiles
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportProfilesPartialSuccessSlice(dest, src []ExportProfilesPartialSuccess) []ExportProfilesPartialSuccess {
+ var newDest []ExportProfilesPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesPartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesPartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesPartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesPartialSuccessPtrSlice(dest, src []*ExportProfilesPartialSuccess) []*ExportProfilesPartialSuccess {
+ var newDest []*ExportProfilesPartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesPartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesPartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesPartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesPartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesPartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesPartialSuccess) Reset() {
+ *orig = ExportProfilesPartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesPartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedProfiles != int64(0) {
+ dest.WriteObjectField("rejectedProfiles")
+ dest.WriteInt64(orig.RejectedProfiles)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedProfiles", "rejected_profiles":
+ orig.RejectedProfiles = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesPartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedProfiles != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedProfiles))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportProfilesPartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedProfiles != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesPartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedProfiles = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
+ orig := NewExportProfilesPartialSuccess()
+ orig.RejectedProfiles = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportProfilesPartialSuccessPtrSlice() []*ExportProfilesPartialSuccess {
+ orig := make([]*ExportProfilesPartialSuccess, 5)
+ orig[0] = NewExportProfilesPartialSuccess()
+ orig[1] = GenTestExportProfilesPartialSuccess()
+ orig[2] = NewExportProfilesPartialSuccess()
+ orig[3] = GenTestExportProfilesPartialSuccess()
+ orig[4] = NewExportProfilesPartialSuccess()
+ return orig
+}
+
+func GenTestExportProfilesPartialSuccessSlice() []ExportProfilesPartialSuccess {
+ orig := make([]ExportProfilesPartialSuccess, 5)
+ orig[1] = *GenTestExportProfilesPartialSuccess()
+ orig[3] = *GenTestExportProfilesPartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go
new file mode 100644
index 00000000000..5906ce52093
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesservicerequest.go
@@ -0,0 +1,280 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Profiles is the top-level struct that is propagated through the profiles pipeline.
+// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
+type ExportProfilesServiceRequest struct {
+ ResourceProfiles []*ResourceProfiles
+ Dictionary ProfilesDictionary
+}
+
+var (
+ protoPoolExportProfilesServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportProfilesServiceRequest{}
+ },
+ }
+)
+
+func NewExportProfilesServiceRequest() *ExportProfilesServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesServiceRequest{}
+ }
+ return protoPoolExportProfilesServiceRequest.Get().(*ExportProfilesServiceRequest)
+}
+
+func DeleteExportProfilesServiceRequest(orig *ExportProfilesServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceProfiles {
+ DeleteResourceProfiles(orig.ResourceProfiles[i], true)
+ }
+ DeleteProfilesDictionary(&orig.Dictionary, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportProfilesServiceRequest(dest, src *ExportProfilesServiceRequest) *ExportProfilesServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesServiceRequest()
+ }
+ dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
+
+ CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
+
+ return dest
+}
+
+func CopyExportProfilesServiceRequestSlice(dest, src []ExportProfilesServiceRequest) []ExportProfilesServiceRequest {
+ var newDest []ExportProfilesServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesServiceRequestPtrSlice(dest, src []*ExportProfilesServiceRequest) []*ExportProfilesServiceRequest {
+ var newDest []*ExportProfilesServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesServiceRequest) Reset() {
+ *orig = ExportProfilesServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceProfiles) > 0 {
+ dest.WriteObjectField("resourceProfiles")
+ dest.WriteArrayStart()
+ orig.ResourceProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceProfiles); i++ {
+ dest.WriteMore()
+ orig.ResourceProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectField("dictionary")
+ orig.Dictionary.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceProfiles", "resource_profiles":
+ for iter.ReadArray() {
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "dictionary":
+
+ orig.Dictionary.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceProfiles {
+ l = orig.ResourceProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Dictionary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportProfilesServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
+ l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Dictionary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesServiceRequest() *ExportProfilesServiceRequest {
+ orig := NewExportProfilesServiceRequest()
+ orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
+ orig.Dictionary = *GenTestProfilesDictionary()
+ return orig
+}
+
+func GenTestExportProfilesServiceRequestPtrSlice() []*ExportProfilesServiceRequest {
+ orig := make([]*ExportProfilesServiceRequest, 5)
+ orig[0] = NewExportProfilesServiceRequest()
+ orig[1] = GenTestExportProfilesServiceRequest()
+ orig[2] = NewExportProfilesServiceRequest()
+ orig[3] = GenTestExportProfilesServiceRequest()
+ orig[4] = NewExportProfilesServiceRequest()
+ return orig
+}
+
+func GenTestExportProfilesServiceRequestSlice() []ExportProfilesServiceRequest {
+ orig := make([]ExportProfilesServiceRequest, 5)
+ orig[1] = *GenTestExportProfilesServiceRequest()
+ orig[3] = *GenTestExportProfilesServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go
new file mode 100644
index 00000000000..0153c060994
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exportprofilesserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportProfilesServiceResponse struct {
+ PartialSuccess ExportProfilesPartialSuccess
+}
+
+var (
+ protoPoolExportProfilesServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportProfilesServiceResponse{}
+ },
+ }
+)
+
+func NewExportProfilesServiceResponse() *ExportProfilesServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportProfilesServiceResponse{}
+ }
+ return protoPoolExportProfilesServiceResponse.Get().(*ExportProfilesServiceResponse)
+}
+
+func DeleteExportProfilesServiceResponse(orig *ExportProfilesServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportProfilesPartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportProfilesServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportProfilesServiceResponse(dest, src *ExportProfilesServiceResponse) *ExportProfilesServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportProfilesServiceResponse()
+ }
+ CopyExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportProfilesServiceResponseSlice(dest, src []ExportProfilesServiceResponse) []ExportProfilesServiceResponse {
+ var newDest []ExportProfilesServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportProfilesServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportProfilesServiceResponsePtrSlice(dest, src []*ExportProfilesServiceResponse) []*ExportProfilesServiceResponse {
+ var newDest []*ExportProfilesServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportProfilesServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportProfilesServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportProfilesServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportProfilesServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportProfilesServiceResponse) Reset() {
+ *orig = ExportProfilesServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportProfilesServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportProfilesServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportProfilesServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportProfilesServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportProfilesServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportProfilesServiceResponse() *ExportProfilesServiceResponse {
+ orig := NewExportProfilesServiceResponse()
+ orig.PartialSuccess = *GenTestExportProfilesPartialSuccess()
+ return orig
+}
+
+func GenTestExportProfilesServiceResponsePtrSlice() []*ExportProfilesServiceResponse {
+ orig := make([]*ExportProfilesServiceResponse, 5)
+ orig[0] = NewExportProfilesServiceResponse()
+ orig[1] = GenTestExportProfilesServiceResponse()
+ orig[2] = NewExportProfilesServiceResponse()
+ orig[3] = GenTestExportProfilesServiceResponse()
+ orig[4] = NewExportProfilesServiceResponse()
+ return orig
+}
+
+func GenTestExportProfilesServiceResponseSlice() []ExportProfilesServiceResponse {
+ orig := make([]ExportProfilesServiceResponse, 5)
+ orig[1] = *GenTestExportProfilesServiceResponse()
+ orig[3] = *GenTestExportProfilesServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go
new file mode 100644
index 00000000000..df8025572b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttracepartialsuccess.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportPartialSuccess represents the details of a partially successful export request.
+type ExportTracePartialSuccess struct {
+ RejectedSpans int64
+ ErrorMessage string
+}
+
+var (
+ protoPoolExportTracePartialSuccess = sync.Pool{
+ New: func() any {
+ return &ExportTracePartialSuccess{}
+ },
+ }
+)
+
+func NewExportTracePartialSuccess() *ExportTracePartialSuccess {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTracePartialSuccess{}
+ }
+ return protoPoolExportTracePartialSuccess.Get().(*ExportTracePartialSuccess)
+}
+
+func DeleteExportTracePartialSuccess(orig *ExportTracePartialSuccess, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTracePartialSuccess.Put(orig)
+ }
+}
+
+func CopyExportTracePartialSuccess(dest, src *ExportTracePartialSuccess) *ExportTracePartialSuccess {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTracePartialSuccess()
+ }
+ dest.RejectedSpans = src.RejectedSpans
+
+ dest.ErrorMessage = src.ErrorMessage
+
+ return dest
+}
+
+func CopyExportTracePartialSuccessSlice(dest, src []ExportTracePartialSuccess) []ExportTracePartialSuccess {
+ var newDest []ExportTracePartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTracePartialSuccess, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTracePartialSuccess(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTracePartialSuccess(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTracePartialSuccessPtrSlice(dest, src []*ExportTracePartialSuccess) []*ExportTracePartialSuccess {
+ var newDest []*ExportTracePartialSuccess
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTracePartialSuccess, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTracePartialSuccess()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTracePartialSuccess(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTracePartialSuccess()
+ }
+ }
+ for i := range src {
+ CopyExportTracePartialSuccess(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTracePartialSuccess) Reset() {
+ *orig = ExportTracePartialSuccess{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTracePartialSuccess) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RejectedSpans != int64(0) {
+ dest.WriteObjectField("rejectedSpans")
+ dest.WriteInt64(orig.RejectedSpans)
+ }
+ if orig.ErrorMessage != "" {
+ dest.WriteObjectField("errorMessage")
+ dest.WriteString(orig.ErrorMessage)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTracePartialSuccess) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "rejectedSpans", "rejected_spans":
+ orig.RejectedSpans = iter.ReadInt64()
+ case "errorMessage", "error_message":
+ orig.ErrorMessage = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTracePartialSuccess) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RejectedSpans != 0 {
+ n += 1 + proto.Sov(uint64(orig.RejectedSpans))
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportTracePartialSuccess) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RejectedSpans != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.ErrorMessage)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.ErrorMessage)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportTracePartialSuccess) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.RejectedSpans = int64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ErrorMessage = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTracePartialSuccess() *ExportTracePartialSuccess {
+ orig := NewExportTracePartialSuccess()
+ orig.RejectedSpans = int64(13)
+ orig.ErrorMessage = "test_errormessage"
+ return orig
+}
+
+func GenTestExportTracePartialSuccessPtrSlice() []*ExportTracePartialSuccess {
+ orig := make([]*ExportTracePartialSuccess, 5)
+ orig[0] = NewExportTracePartialSuccess()
+ orig[1] = GenTestExportTracePartialSuccess()
+ orig[2] = NewExportTracePartialSuccess()
+ orig[3] = GenTestExportTracePartialSuccess()
+ orig[4] = NewExportTracePartialSuccess()
+ return orig
+}
+
+func GenTestExportTracePartialSuccessSlice() []ExportTracePartialSuccess {
+ orig := make([]ExportTracePartialSuccess, 5)
+ orig[1] = *GenTestExportTracePartialSuccess()
+ orig[3] = *GenTestExportTracePartialSuccess()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go
new file mode 100644
index 00000000000..e64316f8f3c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceservicerequest.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Traces is the top-level struct that is propagated through the traces pipeline.
+// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
+type ExportTraceServiceRequest struct {
+ ResourceSpans []*ResourceSpans
+}
+
+var (
+ protoPoolExportTraceServiceRequest = sync.Pool{
+ New: func() any {
+ return &ExportTraceServiceRequest{}
+ },
+ }
+)
+
+func NewExportTraceServiceRequest() *ExportTraceServiceRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTraceServiceRequest{}
+ }
+ return protoPoolExportTraceServiceRequest.Get().(*ExportTraceServiceRequest)
+}
+
+func DeleteExportTraceServiceRequest(orig *ExportTraceServiceRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceSpans {
+ DeleteResourceSpans(orig.ResourceSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTraceServiceRequest.Put(orig)
+ }
+}
+
+func CopyExportTraceServiceRequest(dest, src *ExportTraceServiceRequest) *ExportTraceServiceRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTraceServiceRequest()
+ }
+ dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
+
+ return dest
+}
+
+func CopyExportTraceServiceRequestSlice(dest, src []ExportTraceServiceRequest) []ExportTraceServiceRequest {
+ var newDest []ExportTraceServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTraceServiceRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTraceServiceRequestPtrSlice(dest, src []*ExportTraceServiceRequest) []*ExportTraceServiceRequest {
+ var newDest []*ExportTraceServiceRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTraceServiceRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceRequest()
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTraceServiceRequest) Reset() {
+ *orig = ExportTraceServiceRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTraceServiceRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceSpans) > 0 {
+ dest.WriteObjectField("resourceSpans")
+ dest.WriteArrayStart()
+ orig.ResourceSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceSpans); i++ {
+ dest.WriteMore()
+ orig.ResourceSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTraceServiceRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceSpans", "resource_spans":
+ for iter.ReadArray() {
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTraceServiceRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceSpans {
+ l = orig.ResourceSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ExportTraceServiceRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
+ l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *ExportTraceServiceRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTraceServiceRequest() *ExportTraceServiceRequest {
+ orig := NewExportTraceServiceRequest()
+ orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
+ return orig
+}
+
+func GenTestExportTraceServiceRequestPtrSlice() []*ExportTraceServiceRequest {
+ orig := make([]*ExportTraceServiceRequest, 5)
+ orig[0] = NewExportTraceServiceRequest()
+ orig[1] = GenTestExportTraceServiceRequest()
+ orig[2] = NewExportTraceServiceRequest()
+ orig[3] = GenTestExportTraceServiceRequest()
+ orig[4] = NewExportTraceServiceRequest()
+ return orig
+}
+
+func GenTestExportTraceServiceRequestSlice() []ExportTraceServiceRequest {
+ orig := make([]ExportTraceServiceRequest, 5)
+ orig[1] = *GenTestExportTraceServiceRequest()
+ orig[3] = *GenTestExportTraceServiceRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go
new file mode 100644
index 00000000000..86be1a0fdaf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_exporttraceserviceresponse.go
@@ -0,0 +1,229 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+type ExportTraceServiceResponse struct {
+ PartialSuccess ExportTracePartialSuccess
+}
+
+var (
+ protoPoolExportTraceServiceResponse = sync.Pool{
+ New: func() any {
+ return &ExportTraceServiceResponse{}
+ },
+ }
+)
+
+func NewExportTraceServiceResponse() *ExportTraceServiceResponse {
+ if !UseProtoPooling.IsEnabled() {
+ return &ExportTraceServiceResponse{}
+ }
+ return protoPoolExportTraceServiceResponse.Get().(*ExportTraceServiceResponse)
+}
+
+func DeleteExportTraceServiceResponse(orig *ExportTraceServiceResponse, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteExportTracePartialSuccess(&orig.PartialSuccess, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolExportTraceServiceResponse.Put(orig)
+ }
+}
+
+func CopyExportTraceServiceResponse(dest, src *ExportTraceServiceResponse) *ExportTraceServiceResponse {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewExportTraceServiceResponse()
+ }
+ CopyExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
+
+ return dest
+}
+
+func CopyExportTraceServiceResponseSlice(dest, src []ExportTraceServiceResponse) []ExportTraceServiceResponse {
+ var newDest []ExportTraceServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]ExportTraceServiceResponse, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceResponse(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceResponse(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyExportTraceServiceResponsePtrSlice(dest, src []*ExportTraceServiceResponse) []*ExportTraceServiceResponse {
+ var newDest []*ExportTraceServiceResponse
+ if cap(dest) < len(src) {
+ newDest = make([]*ExportTraceServiceResponse, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceResponse()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteExportTraceServiceResponse(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewExportTraceServiceResponse()
+ }
+ }
+ for i := range src {
+ CopyExportTraceServiceResponse(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ExportTraceServiceResponse) Reset() {
+ *orig = ExportTraceServiceResponse{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ExportTraceServiceResponse) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("partialSuccess")
+ orig.PartialSuccess.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ExportTraceServiceResponse) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "partialSuccess", "partial_success":
+
+ orig.PartialSuccess.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ExportTraceServiceResponse) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.PartialSuccess.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ExportTraceServiceResponse) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.PartialSuccess.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ return len(buf) - pos
+}
+
+func (orig *ExportTraceServiceResponse) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestExportTraceServiceResponse() *ExportTraceServiceResponse {
+ orig := NewExportTraceServiceResponse()
+ orig.PartialSuccess = *GenTestExportTracePartialSuccess()
+ return orig
+}
+
+func GenTestExportTraceServiceResponsePtrSlice() []*ExportTraceServiceResponse {
+ orig := make([]*ExportTraceServiceResponse, 5)
+ orig[0] = NewExportTraceServiceResponse()
+ orig[1] = GenTestExportTraceServiceResponse()
+ orig[2] = NewExportTraceServiceResponse()
+ orig[3] = GenTestExportTraceServiceResponse()
+ orig[4] = NewExportTraceServiceResponse()
+ return orig
+}
+
+func GenTestExportTraceServiceResponseSlice() []ExportTraceServiceResponse {
+ orig := make([]ExportTraceServiceResponse, 5)
+ orig[1] = *GenTestExportTraceServiceResponse()
+ orig[3] = *GenTestExportTraceServiceResponse()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go
new file mode 100644
index 00000000000..4e822539092
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_function.go
@@ -0,0 +1,314 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
+type Function struct {
+ NameStrindex int32
+ SystemNameStrindex int32
+ FilenameStrindex int32
+ StartLine int64
+}
+
+var (
+ protoPoolFunction = sync.Pool{
+ New: func() any {
+ return &Function{}
+ },
+ }
+)
+
+func NewFunction() *Function {
+ if !UseProtoPooling.IsEnabled() {
+ return &Function{}
+ }
+ return protoPoolFunction.Get().(*Function)
+}
+
+func DeleteFunction(orig *Function, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolFunction.Put(orig)
+ }
+}
+
+func CopyFunction(dest, src *Function) *Function {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewFunction()
+ }
+ dest.NameStrindex = src.NameStrindex
+
+ dest.SystemNameStrindex = src.SystemNameStrindex
+
+ dest.FilenameStrindex = src.FilenameStrindex
+
+ dest.StartLine = src.StartLine
+
+ return dest
+}
+
+func CopyFunctionSlice(dest, src []Function) []Function {
+ var newDest []Function
+ if cap(dest) < len(src) {
+ newDest = make([]Function, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteFunction(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyFunction(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyFunctionPtrSlice(dest, src []*Function) []*Function {
+ var newDest []*Function
+ if cap(dest) < len(src) {
+ newDest = make([]*Function, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewFunction()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteFunction(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewFunction()
+ }
+ }
+ for i := range src {
+ CopyFunction(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Function) Reset() {
+ *orig = Function{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Function) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.NameStrindex != int32(0) {
+ dest.WriteObjectField("nameStrindex")
+ dest.WriteInt32(orig.NameStrindex)
+ }
+ if orig.SystemNameStrindex != int32(0) {
+ dest.WriteObjectField("systemNameStrindex")
+ dest.WriteInt32(orig.SystemNameStrindex)
+ }
+ if orig.FilenameStrindex != int32(0) {
+ dest.WriteObjectField("filenameStrindex")
+ dest.WriteInt32(orig.FilenameStrindex)
+ }
+ if orig.StartLine != int64(0) {
+ dest.WriteObjectField("startLine")
+ dest.WriteInt64(orig.StartLine)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Function) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "nameStrindex", "name_strindex":
+ orig.NameStrindex = iter.ReadInt32()
+ case "systemNameStrindex", "system_name_strindex":
+ orig.SystemNameStrindex = iter.ReadInt32()
+ case "filenameStrindex", "filename_strindex":
+ orig.FilenameStrindex = iter.ReadInt32()
+ case "startLine", "start_line":
+ orig.StartLine = iter.ReadInt64()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Function) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.NameStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.NameStrindex))
+ }
+ if orig.SystemNameStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.SystemNameStrindex))
+ }
+ if orig.FilenameStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
+ }
+ if orig.StartLine != 0 {
+ n += 1 + proto.Sov(uint64(orig.StartLine))
+ }
+ return n
+}
+
+func (orig *Function) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.NameStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.NameStrindex))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.SystemNameStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.SystemNameStrindex))
+ pos--
+ buf[pos] = 0x10
+ }
+ if orig.FilenameStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
+ pos--
+ buf[pos] = 0x18
+ }
+ if orig.StartLine != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.StartLine))
+ pos--
+ buf[pos] = 0x20
+ }
+ return len(buf) - pos
+}
+
+func (orig *Function) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.NameStrindex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.SystemNameStrindex = int32(num)
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FilenameStrindex = int32(num)
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartLine = int64(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestFunction() *Function {
+ orig := NewFunction()
+ orig.NameStrindex = int32(13)
+ orig.SystemNameStrindex = int32(13)
+ orig.FilenameStrindex = int32(13)
+ orig.StartLine = int64(13)
+ return orig
+}
+
+func GenTestFunctionPtrSlice() []*Function {
+ orig := make([]*Function, 5)
+ orig[0] = NewFunction()
+ orig[1] = GenTestFunction()
+ orig[2] = NewFunction()
+ orig[3] = GenTestFunction()
+ orig[4] = NewFunction()
+ return orig
+}
+
+func GenTestFunctionSlice() []Function {
+ orig := make([]Function, 5)
+ orig[1] = *GenTestFunction()
+ orig[3] = *GenTestFunction()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go
new file mode 100644
index 00000000000..e39eb2e7958
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_gauge.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
+type Gauge struct {
+ DataPoints []*NumberDataPoint
+}
+
+var (
+ protoPoolGauge = sync.Pool{
+ New: func() any {
+ return &Gauge{}
+ },
+ }
+)
+
+func NewGauge() *Gauge {
+ if !UseProtoPooling.IsEnabled() {
+ return &Gauge{}
+ }
+ return protoPoolGauge.Get().(*Gauge)
+}
+
+func DeleteGauge(orig *Gauge, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteNumberDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolGauge.Put(orig)
+ }
+}
+
+func CopyGauge(dest, src *Gauge) *Gauge {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewGauge()
+ }
+ dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ return dest
+}
+
+func CopyGaugeSlice(dest, src []Gauge) []Gauge {
+ var newDest []Gauge
+ if cap(dest) < len(src) {
+ newDest = make([]Gauge, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteGauge(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyGauge(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyGaugePtrSlice(dest, src []*Gauge) []*Gauge {
+ var newDest []*Gauge
+ if cap(dest) < len(src) {
+ newDest = make([]*Gauge, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewGauge()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteGauge(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewGauge()
+ }
+ }
+ for i := range src {
+ CopyGauge(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Gauge) Reset() {
+ *orig = Gauge{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Gauge) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Gauge) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Gauge) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Gauge) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Gauge) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestGauge() *Gauge {
+ orig := NewGauge()
+ orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
+ return orig
+}
+
+func GenTestGaugePtrSlice() []*Gauge {
+ orig := make([]*Gauge, 5)
+ orig[0] = NewGauge()
+ orig[1] = GenTestGauge()
+ orig[2] = NewGauge()
+ orig[3] = GenTestGauge()
+ orig[4] = NewGauge()
+ return orig
+}
+
+func GenTestGaugeSlice() []Gauge {
+ orig := make([]Gauge, 5)
+ orig[1] = *GenTestGauge()
+ orig[3] = *GenTestGauge()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go
new file mode 100644
index 00000000000..b35b328f913
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogram.go
@@ -0,0 +1,276 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
+type Histogram struct {
+ DataPoints []*HistogramDataPoint
+ AggregationTemporality AggregationTemporality
+}
+
+var (
+ protoPoolHistogram = sync.Pool{
+ New: func() any {
+ return &Histogram{}
+ },
+ }
+)
+
+func NewHistogram() *Histogram {
+ if !UseProtoPooling.IsEnabled() {
+ return &Histogram{}
+ }
+ return protoPoolHistogram.Get().(*Histogram)
+}
+
+func DeleteHistogram(orig *Histogram, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteHistogramDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolHistogram.Put(orig)
+ }
+}
+
+func CopyHistogram(dest, src *Histogram) *Histogram {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewHistogram()
+ }
+ dest.DataPoints = CopyHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
+}
+
+func CopyHistogramSlice(dest, src []Histogram) []Histogram {
+ var newDest []Histogram
+ if cap(dest) < len(src) {
+ newDest = make([]Histogram, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogram(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyHistogram(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyHistogramPtrSlice(dest, src []*Histogram) []*Histogram {
+ var newDest []*Histogram
+ if cap(dest) < len(src) {
+ newDest = make([]*Histogram, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogram()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogram(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogram()
+ }
+ }
+ for i := range src {
+ CopyHistogram(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Histogram) Reset() {
+ *orig = Histogram{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Histogram) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Histogram) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Histogram) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ return n
+}
+
+func (orig *Histogram) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x10
+ }
+ return len(buf) - pos
+}
+
+func (orig *Histogram) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestHistogram() *Histogram {
+ orig := NewHistogram()
+ orig.DataPoints = []*HistogramDataPoint{{}, GenTestHistogramDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ return orig
+}
+
+func GenTestHistogramPtrSlice() []*Histogram {
+ orig := make([]*Histogram, 5)
+ orig[0] = NewHistogram()
+ orig[1] = GenTestHistogram()
+ orig[2] = NewHistogram()
+ orig[3] = GenTestHistogram()
+ orig[4] = NewHistogram()
+ return orig
+}
+
+func GenTestHistogramSlice() []Histogram {
+ orig := make([]Histogram, 5)
+ orig[1] = *GenTestHistogram()
+ orig[3] = *GenTestHistogram()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go
new file mode 100644
index 00000000000..42953b3f91d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_histogramdatapoint.go
@@ -0,0 +1,825 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *HistogramDataPoint) GetSum_() any {
+ if m != nil {
+ return m.Sum_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Sum struct {
+ Sum float64
+}
+
+func (m *HistogramDataPoint) GetSum() float64 {
+ if v, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
+ return v.Sum
+ }
+ return float64(0)
+}
+
+func (m *HistogramDataPoint) GetMin_() any {
+ if m != nil {
+ return m.Min_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Min struct {
+ Min float64
+}
+
+func (m *HistogramDataPoint) GetMin() float64 {
+ if v, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
+ return v.Min
+ }
+ return float64(0)
+}
+
+func (m *HistogramDataPoint) GetMax_() any {
+ if m != nil {
+ return m.Max_
+ }
+ return nil
+}
+
+type HistogramDataPoint_Max struct {
+ Max float64
+}
+
+func (m *HistogramDataPoint) GetMax() float64 {
+ if v, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
+ return v.Max
+ }
+ return float64(0)
+}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.
+type HistogramDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum_ any
+ BucketCounts []uint64
+ ExplicitBounds []float64
+ Exemplars []Exemplar
+ Flags uint32
+ Min_ any
+ Max_ any
+}
+
+var (
+ protoPoolHistogramDataPoint = sync.Pool{
+ New: func() any {
+ return &HistogramDataPoint{}
+ },
+ }
+ ProtoPoolHistogramDataPoint_Sum = sync.Pool{
+ New: func() any {
+ return &HistogramDataPoint_Sum{}
+ },
+ }
+
+ ProtoPoolHistogramDataPoint_Min = sync.Pool{
+ New: func() any {
+ return &HistogramDataPoint_Min{}
+ },
+ }
+
+ ProtoPoolHistogramDataPoint_Max = sync.Pool{
+ New: func() any {
+ return &HistogramDataPoint_Max{}
+ },
+ }
+)
+
+func NewHistogramDataPoint() *HistogramDataPoint {
+ if !UseProtoPooling.IsEnabled() {
+ return &HistogramDataPoint{}
+ }
+ return protoPoolHistogramDataPoint.Get().(*HistogramDataPoint)
+}
+
+func DeleteHistogramDataPoint(orig *HistogramDataPoint, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ switch ov := orig.Sum_.(type) {
+ case *HistogramDataPoint_Sum:
+ if UseProtoPooling.IsEnabled() {
+ ov.Sum = float64(0)
+ ProtoPoolHistogramDataPoint_Sum.Put(ov)
+ }
+
+ }
+ for i := range orig.Exemplars {
+ DeleteExemplar(&orig.Exemplars[i], false)
+ }
+ switch ov := orig.Min_.(type) {
+ case *HistogramDataPoint_Min:
+ if UseProtoPooling.IsEnabled() {
+ ov.Min = float64(0)
+ ProtoPoolHistogramDataPoint_Min.Put(ov)
+ }
+
+ }
+ switch ov := orig.Max_.(type) {
+ case *HistogramDataPoint_Max:
+ if UseProtoPooling.IsEnabled() {
+ ov.Max = float64(0)
+ ProtoPoolHistogramDataPoint_Max.Put(ov)
+ }
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolHistogramDataPoint.Put(orig)
+ }
+}
+
+func CopyHistogramDataPoint(dest, src *HistogramDataPoint) *HistogramDataPoint {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewHistogramDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.StartTimeUnixNano = src.StartTimeUnixNano
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.Count = src.Count
+
+ switch t := src.Sum_.(type) {
+ case *HistogramDataPoint_Sum:
+ var ov *HistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
+ }
+ ov.Sum = t.Sum
+ dest.Sum_ = ov
+ default:
+ dest.Sum_ = nil
+ }
+
+ dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
+ dest.ExplicitBounds = append(dest.ExplicitBounds[:0], src.ExplicitBounds...)
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
+ dest.Flags = src.Flags
+
+ switch t := src.Min_.(type) {
+ case *HistogramDataPoint_Min:
+ var ov *HistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
+ }
+ ov.Min = t.Min
+ dest.Min_ = ov
+ default:
+ dest.Min_ = nil
+ }
+
+ switch t := src.Max_.(type) {
+ case *HistogramDataPoint_Max:
+ var ov *HistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
+ }
+ ov.Max = t.Max
+ dest.Max_ = ov
+ default:
+ dest.Max_ = nil
+ }
+
+ return dest
+}
+
+func CopyHistogramDataPointSlice(dest, src []HistogramDataPoint) []HistogramDataPoint {
+ var newDest []HistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]HistogramDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogramDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyHistogramDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyHistogramDataPointPtrSlice(dest, src []*HistogramDataPoint) []*HistogramDataPoint {
+ var newDest []*HistogramDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*HistogramDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogramDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteHistogramDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewHistogramDataPoint()
+ }
+ }
+ for i := range src {
+ CopyHistogramDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *HistogramDataPoint) Reset() {
+ *orig = HistogramDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *HistogramDataPoint) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.StartTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("startTimeUnixNano")
+ dest.WriteUint64(orig.StartTimeUnixNano)
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.Count != uint64(0) {
+ dest.WriteObjectField("count")
+ dest.WriteUint64(orig.Count)
+ }
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
+ dest.WriteObjectField("sum")
+ dest.WriteFloat64(orig.Sum)
+ }
+ if len(orig.BucketCounts) > 0 {
+ dest.WriteObjectField("bucketCounts")
+ dest.WriteArrayStart()
+ dest.WriteUint64(orig.BucketCounts[0])
+ for i := 1; i < len(orig.BucketCounts); i++ {
+ dest.WriteMore()
+ dest.WriteUint64(orig.BucketCounts[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.ExplicitBounds) > 0 {
+ dest.WriteObjectField("explicitBounds")
+ dest.WriteArrayStart()
+ dest.WriteFloat64(orig.ExplicitBounds[0])
+ for i := 1; i < len(orig.ExplicitBounds); i++ {
+ dest.WriteMore()
+ dest.WriteFloat64(orig.ExplicitBounds[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.Exemplars) > 0 {
+ dest.WriteObjectField("exemplars")
+ dest.WriteArrayStart()
+ orig.Exemplars[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Exemplars); i++ {
+ dest.WriteMore()
+ orig.Exemplars[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
+ dest.WriteObjectField("min")
+ dest.WriteFloat64(orig.Min)
+ }
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
+ dest.WriteObjectField("max")
+ dest.WriteFloat64(orig.Max)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *HistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "startTimeUnixNano", "start_time_unix_nano":
+ orig.StartTimeUnixNano = iter.ReadUint64()
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "count":
+ orig.Count = iter.ReadUint64()
+ case "sum":
+ {
+ var ov *HistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
+ }
+ ov.Sum = iter.ReadFloat64()
+ orig.Sum_ = ov
+ }
+
+ case "bucketCounts", "bucket_counts":
+ for iter.ReadArray() {
+ orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
+ }
+
+ case "explicitBounds", "explicit_bounds":
+ for iter.ReadArray() {
+ orig.ExplicitBounds = append(orig.ExplicitBounds, iter.ReadFloat64())
+ }
+
+ case "exemplars":
+ for iter.ReadArray() {
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
+ }
+
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ case "min":
+ {
+ var ov *HistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
+ }
+ ov.Min = iter.ReadFloat64()
+ orig.Min_ = ov
+ }
+
+ case "max":
+ {
+ var ov *HistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
+ }
+ ov.Max = iter.ReadFloat64()
+ orig.Max_ = ov
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *HistogramDataPoint) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.StartTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.Count != 0 {
+ n += 9
+ }
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
+ _ = orig
+ n += 9
+ }
+ l = len(orig.BucketCounts)
+ if l > 0 {
+ l *= 8
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.ExplicitBounds)
+ if l > 0 {
+ l *= 8
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.Exemplars {
+ l = orig.Exemplars[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Flags != 0 {
+ n += 1 + proto.Sov(uint64(orig.Flags))
+ }
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
+ _ = orig
+ n += 9
+ }
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
+ _ = orig
+ n += 9
+ }
+ return n
+}
+
+func (orig *HistogramDataPoint) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+ }
+ if orig.StartTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
+ pos--
+ buf[pos] = 0x11
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ if orig.Count != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
+ pos--
+ buf[pos] = 0x21
+ }
+ if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
+ pos--
+ buf[pos] = 0x29
+ }
+ l = len(orig.BucketCounts)
+ if l > 0 {
+ for i := l - 1; i >= 0; i-- {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.BucketCounts[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(l*8))
+ pos--
+ buf[pos] = 0x32
+ }
+ l = len(orig.ExplicitBounds)
+ if l > 0 {
+ for i := l - 1; i >= 0; i-- {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ExplicitBounds[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(l*8))
+ pos--
+ buf[pos] = 0x3a
+ }
+ for i := len(orig.Exemplars) - 1; i >= 0; i-- {
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x42
+ }
+ if orig.Flags != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
+ pos--
+ buf[pos] = 0x50
+ }
+ if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
+ pos--
+ buf[pos] = 0x59
+ }
+ if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
+ pos--
+ buf[pos] = 0x61
+ }
+ return len(buf) - pos
+}
+
+func (orig *HistogramDataPoint) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartTimeUnixNano = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Count = uint64(num)
+
+ case 5:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *HistogramDataPoint_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Sum{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
+ }
+ ov.Sum = math.Float64frombits(num)
+ orig.Sum_ = ov
+ case 6:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ size := length / 8
+ orig.BucketCounts = make([]uint64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts[i] = uint64(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.BucketCounts = append(orig.BucketCounts, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
+ }
+ case 7:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ size := length / 8
+ orig.ExplicitBounds = make([]float64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.ExplicitBounds[i] = math.Float64frombits(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field ExplicitBounds", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.ExplicitBounds = append(orig.ExplicitBounds, math.Float64frombits(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
+ }
+
+ case 8:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 10:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+
+ case 11:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *HistogramDataPoint_Min
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Min{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
+ }
+ ov.Min = math.Float64frombits(num)
+ orig.Min_ = ov
+
+ case 12:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *HistogramDataPoint_Max
+ if !UseProtoPooling.IsEnabled() {
+ ov = &HistogramDataPoint_Max{}
+ } else {
+ ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
+ }
+ ov.Max = math.Float64frombits(num)
+ orig.Max_ = ov
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestHistogramDataPoint() *HistogramDataPoint {
+ orig := NewHistogramDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum_ = &HistogramDataPoint_Sum{Sum: float64(3.1415926)}
+ orig.BucketCounts = []uint64{uint64(0), uint64(13)}
+ orig.ExplicitBounds = []float64{float64(0), float64(3.1415926)}
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Flags = uint32(13)
+ orig.Min_ = &HistogramDataPoint_Min{Min: float64(3.1415926)}
+ orig.Max_ = &HistogramDataPoint_Max{Max: float64(3.1415926)}
+ return orig
+}
+
+func GenTestHistogramDataPointPtrSlice() []*HistogramDataPoint {
+ orig := make([]*HistogramDataPoint, 5)
+ orig[0] = NewHistogramDataPoint()
+ orig[1] = GenTestHistogramDataPoint()
+ orig[2] = NewHistogramDataPoint()
+ orig[3] = GenTestHistogramDataPoint()
+ orig[4] = NewHistogramDataPoint()
+ return orig
+}
+
+func GenTestHistogramDataPointSlice() []HistogramDataPoint {
+ orig := make([]HistogramDataPoint, 5)
+ orig[1] = *GenTestHistogramDataPoint()
+ orig[3] = *GenTestHistogramDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go
new file mode 100644
index 00000000000..1e64085e946
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_instrumentationscope.go
@@ -0,0 +1,343 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// InstrumentationScope is a message representing the instrumentation scope information.
+type InstrumentationScope struct {
+ Name string
+ Version string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+}
+
+var (
+ protoPoolInstrumentationScope = sync.Pool{
+ New: func() any {
+ return &InstrumentationScope{}
+ },
+ }
+)
+
+func NewInstrumentationScope() *InstrumentationScope {
+ if !UseProtoPooling.IsEnabled() {
+ return &InstrumentationScope{}
+ }
+ return protoPoolInstrumentationScope.Get().(*InstrumentationScope)
+}
+
+func DeleteInstrumentationScope(orig *InstrumentationScope, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolInstrumentationScope.Put(orig)
+ }
+}
+
+func CopyInstrumentationScope(dest, src *InstrumentationScope) *InstrumentationScope {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewInstrumentationScope()
+ }
+ dest.Name = src.Name
+
+ dest.Version = src.Version
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ return dest
+}
+
+func CopyInstrumentationScopeSlice(dest, src []InstrumentationScope) []InstrumentationScope {
+ var newDest []InstrumentationScope
+ if cap(dest) < len(src) {
+ newDest = make([]InstrumentationScope, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteInstrumentationScope(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyInstrumentationScope(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyInstrumentationScopePtrSlice(dest, src []*InstrumentationScope) []*InstrumentationScope {
+ var newDest []*InstrumentationScope
+ if cap(dest) < len(src) {
+ newDest = make([]*InstrumentationScope, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewInstrumentationScope()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteInstrumentationScope(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewInstrumentationScope()
+ }
+ }
+ for i := range src {
+ CopyInstrumentationScope(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *InstrumentationScope) Reset() {
+ *orig = InstrumentationScope{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *InstrumentationScope) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Version != "" {
+ dest.WriteObjectField("version")
+ dest.WriteString(orig.Version)
+ }
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *InstrumentationScope) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "version":
+ orig.Version = iter.ReadString()
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *InstrumentationScope) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Version)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ return n
+}
+
+func (orig *InstrumentationScope) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Version)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Version)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x20
+ }
+ return len(buf) - pos
+}
+
+func (orig *InstrumentationScope) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Version = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestInstrumentationScope() *InstrumentationScope {
+ orig := NewInstrumentationScope()
+ orig.Name = "test_name"
+ orig.Version = "test_version"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ return orig
+}
+
+func GenTestInstrumentationScopePtrSlice() []*InstrumentationScope {
+ orig := make([]*InstrumentationScope, 5)
+ orig[0] = NewInstrumentationScope()
+ orig[1] = GenTestInstrumentationScope()
+ orig[2] = NewInstrumentationScope()
+ orig[3] = GenTestInstrumentationScope()
+ orig[4] = NewInstrumentationScope()
+ return orig
+}
+
+func GenTestInstrumentationScopeSlice() []InstrumentationScope {
+ orig := make([]InstrumentationScope, 5)
+ orig[1] = *GenTestInstrumentationScope()
+ orig[3] = *GenTestInstrumentationScope()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go
new file mode 100644
index 00000000000..2cd6029a63a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_ipaddr.go
@@ -0,0 +1,265 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type IPAddr struct {
+ IP []byte
+ Zone string
+}
+
+var (
+ protoPoolIPAddr = sync.Pool{
+ New: func() any {
+ return &IPAddr{}
+ },
+ }
+)
+
+func NewIPAddr() *IPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &IPAddr{}
+ }
+ return protoPoolIPAddr.Get().(*IPAddr)
+}
+
+func DeleteIPAddr(orig *IPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolIPAddr.Put(orig)
+ }
+}
+
+func CopyIPAddr(dest, src *IPAddr) *IPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewIPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyIPAddrSlice(dest, src []IPAddr) []IPAddr {
+ var newDest []IPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]IPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteIPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyIPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyIPAddrPtrSlice(dest, src []*IPAddr) []*IPAddr {
+ var newDest []*IPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*IPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewIPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteIPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewIPAddr()
+ }
+ }
+ for i := range src {
+ CopyIPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *IPAddr) Reset() {
+ *orig = IPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *IPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *IPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *IPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *IPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *IPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestIPAddr() *IPAddr {
+ orig := NewIPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestIPAddrPtrSlice() []*IPAddr {
+ orig := make([]*IPAddr, 5)
+ orig[0] = NewIPAddr()
+ orig[1] = GenTestIPAddr()
+ orig[2] = NewIPAddr()
+ orig[3] = GenTestIPAddr()
+ orig[4] = NewIPAddr()
+ return orig
+}
+
+func GenTestIPAddrSlice() []IPAddr {
+ orig := make([]IPAddr, 5)
+ orig[1] = *GenTestIPAddr()
+ orig[3] = *GenTestIPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go
new file mode 100644
index 00000000000..3208776fb65
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalue.go
@@ -0,0 +1,262 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type KeyValue struct {
+ Key string
+ Value AnyValue
+}
+
+var (
+ protoPoolKeyValue = sync.Pool{
+ New: func() any {
+ return &KeyValue{}
+ },
+ }
+)
+
+func NewKeyValue() *KeyValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValue{}
+ }
+ return protoPoolKeyValue.Get().(*KeyValue)
+}
+
+func DeleteKeyValue(orig *KeyValue, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteAnyValue(&orig.Value, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValue.Put(orig)
+ }
+}
+
+func CopyKeyValue(dest, src *KeyValue) *KeyValue {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValue()
+ }
+ dest.Key = src.Key
+
+ CopyAnyValue(&dest.Value, &src.Value)
+
+ return dest
+}
+
+func CopyKeyValueSlice(dest, src []KeyValue) []KeyValue {
+ var newDest []KeyValue
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValue, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValue(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValue(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValuePtrSlice(dest, src []*KeyValue) []*KeyValue {
+ var newDest []*KeyValue
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValue, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValue()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValue(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValue()
+ }
+ }
+ for i := range src {
+ CopyKeyValue(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValue) Reset() {
+ *orig = KeyValue{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValue) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Key != "" {
+ dest.WriteObjectField("key")
+ dest.WriteString(orig.Key)
+ }
+ dest.WriteObjectField("value")
+ orig.Value.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValue) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "key":
+ orig.Key = iter.ReadString()
+ case "value":
+
+ orig.Value.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValue) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Key)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Value.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *KeyValue) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Key)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Key)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Value.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *KeyValue) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Key = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Value.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValue() *KeyValue {
+ orig := NewKeyValue()
+ orig.Key = "test_key"
+ orig.Value = *GenTestAnyValue()
+ return orig
+}
+
+func GenTestKeyValuePtrSlice() []*KeyValue {
+ orig := make([]*KeyValue, 5)
+ orig[0] = NewKeyValue()
+ orig[1] = GenTestKeyValue()
+ orig[2] = NewKeyValue()
+ orig[3] = GenTestKeyValue()
+ orig[4] = NewKeyValue()
+ return orig
+}
+
+func GenTestKeyValueSlice() []KeyValue {
+ orig := make([]KeyValue, 5)
+ orig[1] = *GenTestKeyValue()
+ orig[3] = *GenTestKeyValue()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go
new file mode 100644
index 00000000000..14c2e763d97
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvalueandunit.go
@@ -0,0 +1,291 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// KeyValueAndUnit represents a custom 'dictionary native'
+// style of encoding attributes which is more convenient
+// for profiles than opentelemetry.proto.common.v1.KeyValue.
+type KeyValueAndUnit struct {
+ KeyStrindex int32
+ Value AnyValue
+ UnitStrindex int32
+}
+
+var (
+ protoPoolKeyValueAndUnit = sync.Pool{
+ New: func() any {
+ return &KeyValueAndUnit{}
+ },
+ }
+)
+
+func NewKeyValueAndUnit() *KeyValueAndUnit {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValueAndUnit{}
+ }
+ return protoPoolKeyValueAndUnit.Get().(*KeyValueAndUnit)
+}
+
+func DeleteKeyValueAndUnit(orig *KeyValueAndUnit, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteAnyValue(&orig.Value, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValueAndUnit.Put(orig)
+ }
+}
+
+func CopyKeyValueAndUnit(dest, src *KeyValueAndUnit) *KeyValueAndUnit {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValueAndUnit()
+ }
+ dest.KeyStrindex = src.KeyStrindex
+
+ CopyAnyValue(&dest.Value, &src.Value)
+
+ dest.UnitStrindex = src.UnitStrindex
+
+ return dest
+}
+
+func CopyKeyValueAndUnitSlice(dest, src []KeyValueAndUnit) []KeyValueAndUnit {
+ var newDest []KeyValueAndUnit
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValueAndUnit, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueAndUnit(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValueAndUnit(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValueAndUnitPtrSlice(dest, src []*KeyValueAndUnit) []*KeyValueAndUnit {
+ var newDest []*KeyValueAndUnit
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValueAndUnit, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueAndUnit()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueAndUnit(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueAndUnit()
+ }
+ }
+ for i := range src {
+ CopyKeyValueAndUnit(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValueAndUnit) Reset() {
+ *orig = KeyValueAndUnit{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValueAndUnit) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.KeyStrindex != int32(0) {
+ dest.WriteObjectField("keyStrindex")
+ dest.WriteInt32(orig.KeyStrindex)
+ }
+ dest.WriteObjectField("value")
+ orig.Value.MarshalJSON(dest)
+ if orig.UnitStrindex != int32(0) {
+ dest.WriteObjectField("unitStrindex")
+ dest.WriteInt32(orig.UnitStrindex)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValueAndUnit) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "keyStrindex", "key_strindex":
+ orig.KeyStrindex = iter.ReadInt32()
+ case "value":
+
+ orig.Value.UnmarshalJSON(iter)
+ case "unitStrindex", "unit_strindex":
+ orig.UnitStrindex = iter.ReadInt32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValueAndUnit) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.KeyStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.KeyStrindex))
+ }
+ l = orig.Value.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.UnitStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.UnitStrindex))
+ }
+ return n
+}
+
+func (orig *KeyValueAndUnit) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.KeyStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = orig.Value.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ if orig.UnitStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *KeyValueAndUnit) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.KeyStrindex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Value.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.UnitStrindex = int32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValueAndUnit() *KeyValueAndUnit {
+ orig := NewKeyValueAndUnit()
+ orig.KeyStrindex = int32(13)
+ orig.Value = *GenTestAnyValue()
+ orig.UnitStrindex = int32(13)
+ return orig
+}
+
+func GenTestKeyValueAndUnitPtrSlice() []*KeyValueAndUnit {
+ orig := make([]*KeyValueAndUnit, 5)
+ orig[0] = NewKeyValueAndUnit()
+ orig[1] = GenTestKeyValueAndUnit()
+ orig[2] = NewKeyValueAndUnit()
+ orig[3] = GenTestKeyValueAndUnit()
+ orig[4] = NewKeyValueAndUnit()
+ return orig
+}
+
+func GenTestKeyValueAndUnitSlice() []KeyValueAndUnit {
+ orig := make([]KeyValueAndUnit, 5)
+ orig[1] = *GenTestKeyValueAndUnit()
+ orig[3] = *GenTestKeyValueAndUnit()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go
new file mode 100644
index 00000000000..1cb1fef73c8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_keyvaluelist.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message since oneof in AnyValue does not allow repeated fields.
+type KeyValueList struct {
+ Values []KeyValue
+}
+
+var (
+ protoPoolKeyValueList = sync.Pool{
+ New: func() any {
+ return &KeyValueList{}
+ },
+ }
+)
+
+func NewKeyValueList() *KeyValueList {
+ if !UseProtoPooling.IsEnabled() {
+ return &KeyValueList{}
+ }
+ return protoPoolKeyValueList.Get().(*KeyValueList)
+}
+
+func DeleteKeyValueList(orig *KeyValueList, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Values {
+ DeleteKeyValue(&orig.Values[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolKeyValueList.Put(orig)
+ }
+}
+
+func CopyKeyValueList(dest, src *KeyValueList) *KeyValueList {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewKeyValueList()
+ }
+ dest.Values = CopyKeyValueSlice(dest.Values, src.Values)
+
+ return dest
+}
+
+func CopyKeyValueListSlice(dest, src []KeyValueList) []KeyValueList {
+ var newDest []KeyValueList
+ if cap(dest) < len(src) {
+ newDest = make([]KeyValueList, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueList(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyKeyValueList(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyKeyValueListPtrSlice(dest, src []*KeyValueList) []*KeyValueList {
+ var newDest []*KeyValueList
+ if cap(dest) < len(src) {
+ newDest = make([]*KeyValueList, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueList()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteKeyValueList(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewKeyValueList()
+ }
+ }
+ for i := range src {
+ CopyKeyValueList(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *KeyValueList) Reset() {
+ *orig = KeyValueList{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *KeyValueList) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ orig.Values[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ orig.Values[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *KeyValueList) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, KeyValue{})
+ orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *KeyValueList) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Values {
+ l = orig.Values[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *KeyValueList) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Values) - 1; i >= 0; i-- {
+ l = orig.Values[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *KeyValueList) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Values = append(orig.Values, KeyValue{})
+ err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestKeyValueList() *KeyValueList {
+ orig := NewKeyValueList()
+ orig.Values = []KeyValue{{}, *GenTestKeyValue()}
+ return orig
+}
+
+func GenTestKeyValueListPtrSlice() []*KeyValueList {
+ orig := make([]*KeyValueList, 5)
+ orig[0] = NewKeyValueList()
+ orig[1] = GenTestKeyValueList()
+ orig[2] = NewKeyValueList()
+ orig[3] = GenTestKeyValueList()
+ orig[4] = NewKeyValueList()
+ return orig
+}
+
+func GenTestKeyValueListSlice() []KeyValueList {
+ orig := make([]KeyValueList, 5)
+ orig[1] = *GenTestKeyValueList()
+ orig[3] = *GenTestKeyValueList()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go
new file mode 100644
index 00000000000..f9cde7b965a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_line.go
@@ -0,0 +1,284 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Line details a specific line in a source code, linked to a function.
+type Line struct {
+ FunctionIndex int32
+ Line int64
+ Column int64
+}
+
+var (
+ protoPoolLine = sync.Pool{
+ New: func() any {
+ return &Line{}
+ },
+ }
+)
+
+func NewLine() *Line {
+ if !UseProtoPooling.IsEnabled() {
+ return &Line{}
+ }
+ return protoPoolLine.Get().(*Line)
+}
+
+func DeleteLine(orig *Line, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolLine.Put(orig)
+ }
+}
+
+func CopyLine(dest, src *Line) *Line {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLine()
+ }
+ dest.FunctionIndex = src.FunctionIndex
+
+ dest.Line = src.Line
+
+ dest.Column = src.Column
+
+ return dest
+}
+
+func CopyLineSlice(dest, src []Line) []Line {
+ var newDest []Line
+ if cap(dest) < len(src) {
+ newDest = make([]Line, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLine(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLine(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLinePtrSlice(dest, src []*Line) []*Line {
+ var newDest []*Line
+ if cap(dest) < len(src) {
+ newDest = make([]*Line, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLine()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLine(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLine()
+ }
+ }
+ for i := range src {
+ CopyLine(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Line) Reset() {
+ *orig = Line{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Line) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.FunctionIndex != int32(0) {
+ dest.WriteObjectField("functionIndex")
+ dest.WriteInt32(orig.FunctionIndex)
+ }
+ if orig.Line != int64(0) {
+ dest.WriteObjectField("line")
+ dest.WriteInt64(orig.Line)
+ }
+ if orig.Column != int64(0) {
+ dest.WriteObjectField("column")
+ dest.WriteInt64(orig.Column)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Line) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "functionIndex", "function_index":
+ orig.FunctionIndex = iter.ReadInt32()
+ case "line":
+ orig.Line = iter.ReadInt64()
+ case "column":
+ orig.Column = iter.ReadInt64()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Line) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.FunctionIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.FunctionIndex))
+ }
+ if orig.Line != 0 {
+ n += 1 + proto.Sov(uint64(orig.Line))
+ }
+ if orig.Column != 0 {
+ n += 1 + proto.Sov(uint64(orig.Column))
+ }
+ return n
+}
+
+func (orig *Line) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.FunctionIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.FunctionIndex))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.Line != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Line))
+ pos--
+ buf[pos] = 0x10
+ }
+ if orig.Column != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Column))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *Line) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FunctionIndex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Line = int64(num)
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Column = int64(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLine() *Line {
+ orig := NewLine()
+ orig.FunctionIndex = int32(13)
+ orig.Line = int64(13)
+ orig.Column = int64(13)
+ return orig
+}
+
+func GenTestLinePtrSlice() []*Line {
+ orig := make([]*Line, 5)
+ orig[0] = NewLine()
+ orig[1] = GenTestLine()
+ orig[2] = NewLine()
+ orig[3] = GenTestLine()
+ orig[4] = NewLine()
+ return orig
+}
+
+func GenTestLineSlice() []Line {
+ orig := make([]Line, 5)
+ orig[1] = *GenTestLine()
+ orig[3] = *GenTestLine()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go
new file mode 100644
index 00000000000..26962ad2712
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_link.go
@@ -0,0 +1,267 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Link represents a pointer from a profile Sample to a trace Span.
+type Link struct {
+ TraceId TraceID
+ SpanId SpanID
+}
+
+var (
+ protoPoolLink = sync.Pool{
+ New: func() any {
+ return &Link{}
+ },
+ }
+)
+
+func NewLink() *Link {
+ if !UseProtoPooling.IsEnabled() {
+ return &Link{}
+ }
+ return protoPoolLink.Get().(*Link)
+}
+
+func DeleteLink(orig *Link, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolLink.Put(orig)
+ }
+}
+
+func CopyLink(dest, src *Link) *Link {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLink()
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ return dest
+}
+
+func CopyLinkSlice(dest, src []Link) []Link {
+ var newDest []Link
+ if cap(dest) < len(src) {
+ newDest = make([]Link, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLink(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLink(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLinkPtrSlice(dest, src []*Link) []*Link {
+ var newDest []*Link
+ if cap(dest) < len(src) {
+ newDest = make([]*Link, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLink()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLink(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLink()
+ }
+ }
+ for i := range src {
+ CopyLink(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Link) Reset() {
+ *orig = Link{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Link) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Link) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Link) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *Link) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *Link) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLink() *Link {
+ orig := NewLink()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ return orig
+}
+
+func GenTestLinkPtrSlice() []*Link {
+ orig := make([]*Link, 5)
+ orig[0] = NewLink()
+ orig[1] = GenTestLink()
+ orig[2] = NewLink()
+ orig[3] = GenTestLink()
+ orig[4] = NewLink()
+ return orig
+}
+
+func GenTestLinkSlice() []Link {
+ orig := make([]Link, 5)
+ orig[1] = *GenTestLink()
+ orig[3] = *GenTestLink()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go
new file mode 100644
index 00000000000..7b16e381ff5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_location.go
@@ -0,0 +1,371 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Location describes function and line table debug information.
+type Location struct {
+ MappingIndex int32
+ Address uint64
+ Line []*Line
+ AttributeIndices []int32
+}
+
+var (
+ protoPoolLocation = sync.Pool{
+ New: func() any {
+ return &Location{}
+ },
+ }
+)
+
+func NewLocation() *Location {
+ if !UseProtoPooling.IsEnabled() {
+ return &Location{}
+ }
+ return protoPoolLocation.Get().(*Location)
+}
+
+func DeleteLocation(orig *Location, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Line {
+ DeleteLine(orig.Line[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolLocation.Put(orig)
+ }
+}
+
+func CopyLocation(dest, src *Location) *Location {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLocation()
+ }
+ dest.MappingIndex = src.MappingIndex
+
+ dest.Address = src.Address
+
+ dest.Line = CopyLinePtrSlice(dest.Line, src.Line)
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
+}
+
+func CopyLocationSlice(dest, src []Location) []Location {
+ var newDest []Location
+ if cap(dest) < len(src) {
+ newDest = make([]Location, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLocation(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLocation(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLocationPtrSlice(dest, src []*Location) []*Location {
+ var newDest []*Location
+ if cap(dest) < len(src) {
+ newDest = make([]*Location, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLocation()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLocation(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLocation()
+ }
+ }
+ for i := range src {
+ CopyLocation(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Location) Reset() {
+ *orig = Location{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Location) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.MappingIndex != int32(0) {
+ dest.WriteObjectField("mappingIndex")
+ dest.WriteInt32(orig.MappingIndex)
+ }
+ if orig.Address != uint64(0) {
+ dest.WriteObjectField("address")
+ dest.WriteUint64(orig.Address)
+ }
+ if len(orig.Line) > 0 {
+ dest.WriteObjectField("line")
+ dest.WriteArrayStart()
+ orig.Line[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Line); i++ {
+ dest.WriteMore()
+ orig.Line[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Location) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "mappingIndex", "mapping_index":
+ orig.MappingIndex = iter.ReadInt32()
+ case "address":
+ orig.Address = iter.ReadUint64()
+ case "line":
+ for iter.ReadArray() {
+ orig.Line = append(orig.Line, NewLine())
+ orig.Line[len(orig.Line)-1].UnmarshalJSON(iter)
+ }
+
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Location) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.MappingIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.MappingIndex))
+ }
+ if orig.Address != 0 {
+ n += 1 + proto.Sov(uint64(orig.Address))
+ }
+ for i := range orig.Line {
+ l = orig.Line[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Location) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.MappingIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.Address != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Address))
+ pos--
+ buf[pos] = 0x10
+ }
+ for i := len(orig.Line) - 1; i >= 0; i-- {
+ l = orig.Line[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x22
+ }
+ return len(buf) - pos
+}
+
+func (orig *Location) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.MappingIndex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Address = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Line = append(orig.Line, NewLine())
+ err = orig.Line[len(orig.Line)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ case 4:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLocation() *Location {
+ orig := NewLocation()
+ orig.MappingIndex = int32(13)
+ orig.Address = uint64(13)
+ orig.Line = []*Line{{}, GenTestLine()}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestLocationPtrSlice() []*Location {
+ orig := make([]*Location, 5)
+ orig[0] = NewLocation()
+ orig[1] = GenTestLocation()
+ orig[2] = NewLocation()
+ orig[3] = GenTestLocation()
+ orig[4] = NewLocation()
+ return orig
+}
+
+func GenTestLocationSlice() []Location {
+ orig := make([]Location, 5)
+ orig[1] = *GenTestLocation()
+ orig[3] = *GenTestLocation()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go
new file mode 100644
index 00000000000..8ef100c1347
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logrecord.go
@@ -0,0 +1,575 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// LogRecord are experimental implementation of OpenTelemetry Log Data Model.
+
+type LogRecord struct {
+ TimeUnixNano uint64
+ ObservedTimeUnixNano uint64
+ SeverityNumber SeverityNumber
+ SeverityText string
+ Body AnyValue
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Flags uint32
+ TraceId TraceID
+ SpanId SpanID
+ EventName string
+}
+
+var (
+ protoPoolLogRecord = sync.Pool{
+ New: func() any {
+ return &LogRecord{}
+ },
+ }
+)
+
+func NewLogRecord() *LogRecord {
+ if !UseProtoPooling.IsEnabled() {
+ return &LogRecord{}
+ }
+ return protoPoolLogRecord.Get().(*LogRecord)
+}
+
+func DeleteLogRecord(orig *LogRecord, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteAnyValue(&orig.Body, false)
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolLogRecord.Put(orig)
+ }
+}
+
+func CopyLogRecord(dest, src *LogRecord) *LogRecord {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogRecord()
+ }
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano
+
+ dest.SeverityNumber = src.SeverityNumber
+
+ dest.SeverityText = src.SeverityText
+
+ CopyAnyValue(&dest.Body, &src.Body)
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.Flags = src.Flags
+
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ dest.EventName = src.EventName
+
+ return dest
+}
+
+func CopyLogRecordSlice(dest, src []LogRecord) []LogRecord {
+ var newDest []LogRecord
+ if cap(dest) < len(src) {
+ newDest = make([]LogRecord, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogRecord(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogRecord(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLogRecordPtrSlice(dest, src []*LogRecord) []*LogRecord {
+ var newDest []*LogRecord
+ if cap(dest) < len(src) {
+ newDest = make([]*LogRecord, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogRecord()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogRecord(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogRecord()
+ }
+ }
+ for i := range src {
+ CopyLogRecord(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogRecord) Reset() {
+ *orig = LogRecord{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogRecord) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.ObservedTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("observedTimeUnixNano")
+ dest.WriteUint64(orig.ObservedTimeUnixNano)
+ }
+
+ if int32(orig.SeverityNumber) != 0 {
+ dest.WriteObjectField("severityNumber")
+ dest.WriteInt32(int32(orig.SeverityNumber))
+ }
+ if orig.SeverityText != "" {
+ dest.WriteObjectField("severityText")
+ dest.WriteString(orig.SeverityText)
+ }
+ dest.WriteObjectField("body")
+ orig.Body.MarshalJSON(dest)
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ if orig.EventName != "" {
+ dest.WriteObjectField("eventName")
+ dest.WriteString(orig.EventName)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogRecord) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "observedTimeUnixNano", "observed_time_unix_nano":
+ orig.ObservedTimeUnixNano = iter.ReadUint64()
+ case "severityNumber", "severity_number":
+ orig.SeverityNumber = SeverityNumber(iter.ReadEnumValue(SeverityNumber_value))
+ case "severityText", "severity_text":
+ orig.SeverityText = iter.ReadString()
+ case "body":
+
+ orig.Body.UnmarshalJSON(iter)
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ case "eventName", "event_name":
+ orig.EventName = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *LogRecord) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.ObservedTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.SeverityNumber != 0 {
+ n += 1 + proto.Sov(uint64(orig.SeverityNumber))
+ }
+ l = len(orig.SeverityText)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Body.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ if orig.Flags != 0 {
+ n += 5
+ }
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = len(orig.EventName)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *LogRecord) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x9
+ }
+ if orig.ObservedTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ObservedTimeUnixNano))
+ pos--
+ buf[pos] = 0x59
+ }
+ if orig.SeverityNumber != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.SeverityNumber))
+ pos--
+ buf[pos] = 0x10
+ }
+ l = len(orig.SeverityText)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SeverityText)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ l = orig.Body.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x38
+ }
+ if orig.Flags != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
+ pos--
+ buf[pos] = 0x45
+ }
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x52
+
+ l = len(orig.EventName)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.EventName)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x62
+ }
+ return len(buf) - pos
+}
+
+func (orig *LogRecord) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 11:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.ObservedTimeUnixNano = uint64(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.SeverityNumber = SeverityNumber(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SeverityText = string(buf[startPos:pos])
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Body.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 7:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 8:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 10:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 12:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.EventName = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLogRecord() *LogRecord {
+ orig := NewLogRecord()
+ orig.TimeUnixNano = uint64(13)
+ orig.ObservedTimeUnixNano = uint64(13)
+ orig.SeverityNumber = SeverityNumber(13)
+ orig.SeverityText = "test_severitytext"
+ orig.Body = *GenTestAnyValue()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Flags = uint32(13)
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.EventName = "test_eventname"
+ return orig
+}
+
+func GenTestLogRecordPtrSlice() []*LogRecord {
+ orig := make([]*LogRecord, 5)
+ orig[0] = NewLogRecord()
+ orig[1] = GenTestLogRecord()
+ orig[2] = NewLogRecord()
+ orig[3] = GenTestLogRecord()
+ orig[4] = NewLogRecord()
+ return orig
+}
+
+func GenTestLogRecordSlice() []LogRecord {
+ orig := make([]LogRecord, 5)
+ orig[1] = *GenTestLogRecord()
+ orig[3] = *GenTestLogRecord()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go
new file mode 100644
index 00000000000..8b7cf668f93
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// LogsData represents the logs data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP logs data but do not
+// implement the OTLP protocol.
+type LogsData struct {
+ ResourceLogs []*ResourceLogs
+}
+
+var (
+ protoPoolLogsData = sync.Pool{
+ New: func() any {
+ return &LogsData{}
+ },
+ }
+)
+
+func NewLogsData() *LogsData {
+ if !UseProtoPooling.IsEnabled() {
+ return &LogsData{}
+ }
+ return protoPoolLogsData.Get().(*LogsData)
+}
+
+func DeleteLogsData(orig *LogsData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceLogs {
+ DeleteResourceLogs(orig.ResourceLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolLogsData.Put(orig)
+ }
+}
+
+func CopyLogsData(dest, src *LogsData) *LogsData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogsData()
+ }
+ dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
+
+ return dest
+}
+
+func CopyLogsDataSlice(dest, src []LogsData) []LogsData {
+ var newDest []LogsData
+ if cap(dest) < len(src) {
+ newDest = make([]LogsData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogsData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLogsDataPtrSlice(dest, src []*LogsData) []*LogsData {
+ var newDest []*LogsData
+ if cap(dest) < len(src) {
+ newDest = make([]*LogsData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsData()
+ }
+ }
+ for i := range src {
+ CopyLogsData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogsData) Reset() {
+ *orig = LogsData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogsData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceLogs) > 0 {
+ dest.WriteObjectField("resourceLogs")
+ dest.WriteArrayStart()
+ orig.ResourceLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceLogs); i++ {
+ dest.WriteMore()
+ orig.ResourceLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogsData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceLogs", "resource_logs":
+ for iter.ReadArray() {
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *LogsData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceLogs {
+ l = orig.ResourceLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *LogsData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
+ l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *LogsData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
+ err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLogsData() *LogsData {
+ orig := NewLogsData()
+ orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
+ return orig
+}
+
+func GenTestLogsDataPtrSlice() []*LogsData {
+ orig := make([]*LogsData, 5)
+ orig[0] = NewLogsData()
+ orig[1] = GenTestLogsData()
+ orig[2] = NewLogsData()
+ orig[3] = GenTestLogsData()
+ orig[4] = NewLogsData()
+ return orig
+}
+
+func GenTestLogsDataSlice() []LogsData {
+ orig := make([]LogsData, 5)
+ orig[1] = *GenTestLogsData()
+ orig[3] = *GenTestLogsData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go
new file mode 100644
index 00000000000..60bae8a7894
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_logsrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type LogsRequest struct {
+ RequestContext *RequestContext
+ LogsData LogsData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolLogsRequest = sync.Pool{
+ New: func() any {
+ return &LogsRequest{}
+ },
+ }
+)
+
+func NewLogsRequest() *LogsRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &LogsRequest{}
+ }
+ return protoPoolLogsRequest.Get().(*LogsRequest)
+}
+
+func DeleteLogsRequest(orig *LogsRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteLogsData(&orig.LogsData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolLogsRequest.Put(orig)
+ }
+}
+
+func CopyLogsRequest(dest, src *LogsRequest) *LogsRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewLogsRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyLogsData(&dest.LogsData, &src.LogsData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyLogsRequestSlice(dest, src []LogsRequest) []LogsRequest {
+ var newDest []LogsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]LogsRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyLogsRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyLogsRequestPtrSlice(dest, src []*LogsRequest) []*LogsRequest {
+ var newDest []*LogsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*LogsRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteLogsRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewLogsRequest()
+ }
+ }
+ for i := range src {
+ CopyLogsRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *LogsRequest) Reset() {
+ *orig = LogsRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *LogsRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("logsData")
+ orig.LogsData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *LogsRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "logsData", "logs_data":
+
+ orig.LogsData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *LogsRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.LogsData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *LogsRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.LogsData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *LogsRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogsData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.LogsData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestLogsRequest() *LogsRequest {
+ orig := NewLogsRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.LogsData = *GenTestLogsData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestLogsRequestPtrSlice() []*LogsRequest {
+ orig := make([]*LogsRequest, 5)
+ orig[0] = NewLogsRequest()
+ orig[1] = GenTestLogsRequest()
+ orig[2] = NewLogsRequest()
+ orig[3] = GenTestLogsRequest()
+ orig[4] = NewLogsRequest()
+ return orig
+}
+
+func GenTestLogsRequestSlice() []LogsRequest {
+ orig := make([]LogsRequest, 5)
+ orig[1] = *GenTestLogsRequest()
+ orig[3] = *GenTestLogsRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go
new file mode 100644
index 00000000000..41b74624725
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_mapping.go
@@ -0,0 +1,380 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
+type Mapping struct {
+ MemoryStart uint64
+ MemoryLimit uint64
+ FileOffset uint64
+ FilenameStrindex int32
+ AttributeIndices []int32
+}
+
+var (
+ protoPoolMapping = sync.Pool{
+ New: func() any {
+ return &Mapping{}
+ },
+ }
+)
+
+func NewMapping() *Mapping {
+ if !UseProtoPooling.IsEnabled() {
+ return &Mapping{}
+ }
+ return protoPoolMapping.Get().(*Mapping)
+}
+
+func DeleteMapping(orig *Mapping, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolMapping.Put(orig)
+ }
+}
+
+func CopyMapping(dest, src *Mapping) *Mapping {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMapping()
+ }
+ dest.MemoryStart = src.MemoryStart
+
+ dest.MemoryLimit = src.MemoryLimit
+
+ dest.FileOffset = src.FileOffset
+
+ dest.FilenameStrindex = src.FilenameStrindex
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
+}
+
+func CopyMappingSlice(dest, src []Mapping) []Mapping {
+ var newDest []Mapping
+ if cap(dest) < len(src) {
+ newDest = make([]Mapping, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMapping(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMapping(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMappingPtrSlice(dest, src []*Mapping) []*Mapping {
+ var newDest []*Mapping
+ if cap(dest) < len(src) {
+ newDest = make([]*Mapping, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMapping()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMapping(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMapping()
+ }
+ }
+ for i := range src {
+ CopyMapping(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Mapping) Reset() {
+ *orig = Mapping{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Mapping) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.MemoryStart != uint64(0) {
+ dest.WriteObjectField("memoryStart")
+ dest.WriteUint64(orig.MemoryStart)
+ }
+ if orig.MemoryLimit != uint64(0) {
+ dest.WriteObjectField("memoryLimit")
+ dest.WriteUint64(orig.MemoryLimit)
+ }
+ if orig.FileOffset != uint64(0) {
+ dest.WriteObjectField("fileOffset")
+ dest.WriteUint64(orig.FileOffset)
+ }
+ if orig.FilenameStrindex != int32(0) {
+ dest.WriteObjectField("filenameStrindex")
+ dest.WriteInt32(orig.FilenameStrindex)
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Mapping) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "memoryStart", "memory_start":
+ orig.MemoryStart = iter.ReadUint64()
+ case "memoryLimit", "memory_limit":
+ orig.MemoryLimit = iter.ReadUint64()
+ case "fileOffset", "file_offset":
+ orig.FileOffset = iter.ReadUint64()
+ case "filenameStrindex", "filename_strindex":
+ orig.FilenameStrindex = iter.ReadInt32()
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Mapping) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.MemoryStart != 0 {
+ n += 1 + proto.Sov(uint64(orig.MemoryStart))
+ }
+ if orig.MemoryLimit != 0 {
+ n += 1 + proto.Sov(uint64(orig.MemoryLimit))
+ }
+ if orig.FileOffset != 0 {
+ n += 1 + proto.Sov(uint64(orig.FileOffset))
+ }
+ if orig.FilenameStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Mapping) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.MemoryStart != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryStart))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.MemoryLimit != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryLimit))
+ pos--
+ buf[pos] = 0x10
+ }
+ if orig.FileOffset != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.FileOffset))
+ pos--
+ buf[pos] = 0x18
+ }
+ if orig.FilenameStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
+ pos--
+ buf[pos] = 0x20
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x2a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Mapping) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.MemoryStart = uint64(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.MemoryLimit = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FileOffset = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FilenameStrindex = int32(num)
+ case 5:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMapping() *Mapping {
+ orig := NewMapping()
+ orig.MemoryStart = uint64(13)
+ orig.MemoryLimit = uint64(13)
+ orig.FileOffset = uint64(13)
+ orig.FilenameStrindex = int32(13)
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestMappingPtrSlice() []*Mapping {
+ orig := make([]*Mapping, 5)
+ orig[0] = NewMapping()
+ orig[1] = GenTestMapping()
+ orig[2] = NewMapping()
+ orig[3] = GenTestMapping()
+ orig[4] = NewMapping()
+ return orig
+}
+
+func GenTestMappingSlice() []Mapping {
+ orig := make([]Mapping, 5)
+ orig[1] = *GenTestMapping()
+ orig[3] = *GenTestMapping()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go
new file mode 100644
index 00000000000..b7f85b27a03
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metric.go
@@ -0,0 +1,804 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *Metric) GetData() any {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type Metric_Gauge struct {
+ Gauge *Gauge
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if v, ok := m.GetData().(*Metric_Gauge); ok {
+ return v.Gauge
+ }
+ return nil
+}
+
+type Metric_Sum struct {
+ Sum *Sum
+}
+
+func (m *Metric) GetSum() *Sum {
+ if v, ok := m.GetData().(*Metric_Sum); ok {
+ return v.Sum
+ }
+ return nil
+}
+
+type Metric_Histogram struct {
+ Histogram *Histogram
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if v, ok := m.GetData().(*Metric_Histogram); ok {
+ return v.Histogram
+ }
+ return nil
+}
+
+type Metric_ExponentialHistogram struct {
+ ExponentialHistogram *ExponentialHistogram
+}
+
+func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
+ if v, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
+ return v.ExponentialHistogram
+ }
+ return nil
+}
+
+type Metric_Summary struct {
+ Summary *Summary
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if v, ok := m.GetData().(*Metric_Summary); ok {
+ return v.Summary
+ }
+ return nil
+}
+
+// Metric represents one metric as a collection of datapoints.
+// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto
+type Metric struct {
+ Name string
+ Description string
+ Unit string
+ Data any
+ Metadata []KeyValue
+}
+
+var (
+ protoPoolMetric = sync.Pool{
+ New: func() any {
+ return &Metric{}
+ },
+ }
+
+ ProtoPoolMetric_Gauge = sync.Pool{
+ New: func() any {
+ return &Metric_Gauge{}
+ },
+ }
+
+ ProtoPoolMetric_Sum = sync.Pool{
+ New: func() any {
+ return &Metric_Sum{}
+ },
+ }
+
+ ProtoPoolMetric_Histogram = sync.Pool{
+ New: func() any {
+ return &Metric_Histogram{}
+ },
+ }
+
+ ProtoPoolMetric_ExponentialHistogram = sync.Pool{
+ New: func() any {
+ return &Metric_ExponentialHistogram{}
+ },
+ }
+
+ ProtoPoolMetric_Summary = sync.Pool{
+ New: func() any {
+ return &Metric_Summary{}
+ },
+ }
+)
+
+func NewMetric() *Metric {
+ if !UseProtoPooling.IsEnabled() {
+ return &Metric{}
+ }
+ return protoPoolMetric.Get().(*Metric)
+}
+
+func DeleteMetric(orig *Metric, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ switch ov := orig.Data.(type) {
+ case *Metric_Gauge:
+ DeleteGauge(ov.Gauge, true)
+ ov.Gauge = nil
+ ProtoPoolMetric_Gauge.Put(ov)
+ case *Metric_Sum:
+ DeleteSum(ov.Sum, true)
+ ov.Sum = nil
+ ProtoPoolMetric_Sum.Put(ov)
+ case *Metric_Histogram:
+ DeleteHistogram(ov.Histogram, true)
+ ov.Histogram = nil
+ ProtoPoolMetric_Histogram.Put(ov)
+ case *Metric_ExponentialHistogram:
+ DeleteExponentialHistogram(ov.ExponentialHistogram, true)
+ ov.ExponentialHistogram = nil
+ ProtoPoolMetric_ExponentialHistogram.Put(ov)
+ case *Metric_Summary:
+ DeleteSummary(ov.Summary, true)
+ ov.Summary = nil
+ ProtoPoolMetric_Summary.Put(ov)
+
+ }
+ for i := range orig.Metadata {
+ DeleteKeyValue(&orig.Metadata[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetric.Put(orig)
+ }
+}
+
+func CopyMetric(dest, src *Metric) *Metric {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetric()
+ }
+ dest.Name = src.Name
+
+ dest.Description = src.Description
+
+ dest.Unit = src.Unit
+
+ switch t := src.Data.(type) {
+ case *Metric_Gauge:
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ CopyGauge(ov.Gauge, t.Gauge)
+ dest.Data = ov
+
+ case *Metric_Sum:
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ CopySum(ov.Sum, t.Sum)
+ dest.Data = ov
+
+ case *Metric_Histogram:
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ CopyHistogram(ov.Histogram, t.Histogram)
+ dest.Data = ov
+
+ case *Metric_ExponentialHistogram:
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ CopyExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram)
+ dest.Data = ov
+
+ case *Metric_Summary:
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ CopySummary(ov.Summary, t.Summary)
+ dest.Data = ov
+
+ default:
+ dest.Data = nil
+ }
+ dest.Metadata = CopyKeyValueSlice(dest.Metadata, src.Metadata)
+
+ return dest
+}
+
+func CopyMetricSlice(dest, src []Metric) []Metric {
+ var newDest []Metric
+ if cap(dest) < len(src) {
+ newDest = make([]Metric, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetric(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetric(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricPtrSlice(dest, src []*Metric) []*Metric {
+ var newDest []*Metric
+ if cap(dest) < len(src) {
+ newDest = make([]*Metric, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetric()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetric(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetric()
+ }
+ }
+ for i := range src {
+ CopyMetric(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Metric) Reset() {
+ *orig = Metric{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Metric) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Description != "" {
+ dest.WriteObjectField("description")
+ dest.WriteString(orig.Description)
+ }
+ if orig.Unit != "" {
+ dest.WriteObjectField("unit")
+ dest.WriteString(orig.Unit)
+ }
+ switch orig := orig.Data.(type) {
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ dest.WriteObjectField("gauge")
+ orig.Gauge.MarshalJSON(dest)
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ dest.WriteObjectField("sum")
+ orig.Sum.MarshalJSON(dest)
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ dest.WriteObjectField("histogram")
+ orig.Histogram.MarshalJSON(dest)
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ dest.WriteObjectField("exponentialHistogram")
+ orig.ExponentialHistogram.MarshalJSON(dest)
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ dest.WriteObjectField("summary")
+ orig.Summary.MarshalJSON(dest)
+ }
+ }
+ if len(orig.Metadata) > 0 {
+ dest.WriteObjectField("metadata")
+ dest.WriteArrayStart()
+ orig.Metadata[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Metadata); i++ {
+ dest.WriteMore()
+ orig.Metadata[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Metric) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "description":
+ orig.Description = iter.ReadString()
+ case "unit":
+ orig.Unit = iter.ReadString()
+
+ case "gauge":
+ {
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ ov.Gauge.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "sum":
+ {
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ ov.Sum.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "histogram":
+ {
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ ov.Histogram.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "exponentialHistogram", "exponential_histogram":
+ {
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ ov.ExponentialHistogram.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "summary":
+ {
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ ov.Summary.UnmarshalJSON(iter)
+ orig.Data = ov
+ }
+
+ case "metadata":
+ for iter.ReadArray() {
+ orig.Metadata = append(orig.Metadata, KeyValue{})
+ orig.Metadata[len(orig.Metadata)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Metric) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Description)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Unit)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ switch orig := orig.Data.(type) {
+ case nil:
+ _ = orig
+ break
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ l = orig.Gauge.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ l = orig.Sum.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ l = orig.Histogram.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ l = orig.ExponentialHistogram.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ l = orig.Summary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ }
+ for i := range orig.Metadata {
+ l = orig.Metadata[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Metric) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Description)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Description)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.Unit)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Unit)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ switch orig := orig.Data.(type) {
+ case *Metric_Gauge:
+ if orig.Gauge != nil {
+ l = orig.Gauge.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *Metric_Sum:
+ if orig.Sum != nil {
+ l = orig.Sum.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ case *Metric_Histogram:
+ if orig.Histogram != nil {
+ l = orig.Histogram.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+ }
+ case *Metric_ExponentialHistogram:
+ if orig.ExponentialHistogram != nil {
+ l = orig.ExponentialHistogram.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x52
+ }
+ case *Metric_Summary:
+ if orig.Summary != nil {
+ l = orig.Summary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x5a
+ }
+ }
+ for i := len(orig.Metadata) - 1; i >= 0; i-- {
+ l = orig.Metadata[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x62
+ }
+ return len(buf) - pos
+}
+
+func (orig *Metric) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Description = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Unit = string(buf[startPos:pos])
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Gauge
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Gauge{}
+ } else {
+ ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
+ }
+ ov.Gauge = NewGauge()
+ err = ov.Gauge.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Sum
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Sum{}
+ } else {
+ ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
+ }
+ ov.Sum = NewSum()
+ err = ov.Sum.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Histogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Histogram{}
+ } else {
+ ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
+ }
+ ov.Histogram = NewHistogram()
+ err = ov.Histogram.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 10:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_ExponentialHistogram
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_ExponentialHistogram{}
+ } else {
+ ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = NewExponentialHistogram()
+ err = ov.ExponentialHistogram.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 11:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *Metric_Summary
+ if !UseProtoPooling.IsEnabled() {
+ ov = &Metric_Summary{}
+ } else {
+ ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
+ }
+ ov.Summary = NewSummary()
+ err = ov.Summary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.Data = ov
+
+ case 12:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Metadata = append(orig.Metadata, KeyValue{})
+ err = orig.Metadata[len(orig.Metadata)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetric() *Metric {
+ orig := NewMetric()
+ orig.Name = "test_name"
+ orig.Description = "test_description"
+ orig.Unit = "test_unit"
+ orig.Data = &Metric_Gauge{Gauge: GenTestGauge()}
+ orig.Metadata = []KeyValue{{}, *GenTestKeyValue()}
+ return orig
+}
+
+func GenTestMetricPtrSlice() []*Metric {
+ orig := make([]*Metric, 5)
+ orig[0] = NewMetric()
+ orig[1] = GenTestMetric()
+ orig[2] = NewMetric()
+ orig[3] = GenTestMetric()
+ orig[4] = NewMetric()
+ return orig
+}
+
+func GenTestMetricSlice() []Metric {
+ orig := make([]Metric, 5)
+ orig[1] = *GenTestMetric()
+ orig[3] = *GenTestMetric()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go
new file mode 100644
index 00000000000..ad560d67b4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// MetricsData represents the metrics data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP metrics data but do not
+// implement the OTLP protocol..
+type MetricsData struct {
+ ResourceMetrics []*ResourceMetrics
+}
+
+var (
+ protoPoolMetricsData = sync.Pool{
+ New: func() any {
+ return &MetricsData{}
+ },
+ }
+)
+
+func NewMetricsData() *MetricsData {
+ if !UseProtoPooling.IsEnabled() {
+ return &MetricsData{}
+ }
+ return protoPoolMetricsData.Get().(*MetricsData)
+}
+
+func DeleteMetricsData(orig *MetricsData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceMetrics {
+ DeleteResourceMetrics(orig.ResourceMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetricsData.Put(orig)
+ }
+}
+
+func CopyMetricsData(dest, src *MetricsData) *MetricsData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetricsData()
+ }
+ dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
+
+ return dest
+}
+
+func CopyMetricsDataSlice(dest, src []MetricsData) []MetricsData {
+ var newDest []MetricsData
+ if cap(dest) < len(src) {
+ newDest = make([]MetricsData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetricsData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricsDataPtrSlice(dest, src []*MetricsData) []*MetricsData {
+ var newDest []*MetricsData
+ if cap(dest) < len(src) {
+ newDest = make([]*MetricsData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsData()
+ }
+ }
+ for i := range src {
+ CopyMetricsData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *MetricsData) Reset() {
+ *orig = MetricsData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *MetricsData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceMetrics) > 0 {
+ dest.WriteObjectField("resourceMetrics")
+ dest.WriteArrayStart()
+ orig.ResourceMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceMetrics); i++ {
+ dest.WriteMore()
+ orig.ResourceMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *MetricsData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceMetrics", "resource_metrics":
+ for iter.ReadArray() {
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *MetricsData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceMetrics {
+ l = orig.ResourceMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *MetricsData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
+ l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *MetricsData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
+ err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetricsData() *MetricsData {
+ orig := NewMetricsData()
+ orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
+ return orig
+}
+
+func GenTestMetricsDataPtrSlice() []*MetricsData {
+ orig := make([]*MetricsData, 5)
+ orig[0] = NewMetricsData()
+ orig[1] = GenTestMetricsData()
+ orig[2] = NewMetricsData()
+ orig[3] = GenTestMetricsData()
+ orig[4] = NewMetricsData()
+ return orig
+}
+
+func GenTestMetricsDataSlice() []MetricsData {
+ orig := make([]MetricsData, 5)
+ orig[1] = *GenTestMetricsData()
+ orig[3] = *GenTestMetricsData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go
new file mode 100644
index 00000000000..e4cb0b38f17
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_metricsrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type MetricsRequest struct {
+ RequestContext *RequestContext
+ MetricsData MetricsData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolMetricsRequest = sync.Pool{
+ New: func() any {
+ return &MetricsRequest{}
+ },
+ }
+)
+
+func NewMetricsRequest() *MetricsRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &MetricsRequest{}
+ }
+ return protoPoolMetricsRequest.Get().(*MetricsRequest)
+}
+
+func DeleteMetricsRequest(orig *MetricsRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteMetricsData(&orig.MetricsData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolMetricsRequest.Put(orig)
+ }
+}
+
+func CopyMetricsRequest(dest, src *MetricsRequest) *MetricsRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewMetricsRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyMetricsData(&dest.MetricsData, &src.MetricsData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyMetricsRequestSlice(dest, src []MetricsRequest) []MetricsRequest {
+ var newDest []MetricsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]MetricsRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyMetricsRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyMetricsRequestPtrSlice(dest, src []*MetricsRequest) []*MetricsRequest {
+ var newDest []*MetricsRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*MetricsRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteMetricsRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewMetricsRequest()
+ }
+ }
+ for i := range src {
+ CopyMetricsRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *MetricsRequest) Reset() {
+ *orig = MetricsRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *MetricsRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("metricsData")
+ orig.MetricsData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *MetricsRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "metricsData", "metrics_data":
+
+ orig.MetricsData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *MetricsRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.MetricsData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *MetricsRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.MetricsData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *MetricsRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field MetricsData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.MetricsData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestMetricsRequest() *MetricsRequest {
+ orig := NewMetricsRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.MetricsData = *GenTestMetricsData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestMetricsRequestPtrSlice() []*MetricsRequest {
+ orig := make([]*MetricsRequest, 5)
+ orig[0] = NewMetricsRequest()
+ orig[1] = GenTestMetricsRequest()
+ orig[2] = NewMetricsRequest()
+ orig[3] = GenTestMetricsRequest()
+ orig[4] = NewMetricsRequest()
+ return orig
+}
+
+func GenTestMetricsRequestSlice() []MetricsRequest {
+ orig := make([]MetricsRequest, 5)
+ orig[1] = *GenTestMetricsRequest()
+ orig[3] = *GenTestMetricsRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go
new file mode 100644
index 00000000000..4b86a7f3a1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_numberdatapoint.go
@@ -0,0 +1,559 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *NumberDataPoint) GetValue() any {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type NumberDataPoint_AsDouble struct {
+ AsDouble float64
+}
+
+func (m *NumberDataPoint) GetAsDouble() float64 {
+ if v, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
+ return v.AsDouble
+ }
+ return float64(0)
+}
+
+type NumberDataPoint_AsInt struct {
+ AsInt int64
+}
+
+func (m *NumberDataPoint) GetAsInt() int64 {
+ if v, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
+ return v.AsInt
+ }
+ return int64(0)
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.
+type NumberDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Value any
+ Exemplars []Exemplar
+ Flags uint32
+}
+
+var (
+ protoPoolNumberDataPoint = sync.Pool{
+ New: func() any {
+ return &NumberDataPoint{}
+ },
+ }
+
+ ProtoPoolNumberDataPoint_AsDouble = sync.Pool{
+ New: func() any {
+ return &NumberDataPoint_AsDouble{}
+ },
+ }
+
+ ProtoPoolNumberDataPoint_AsInt = sync.Pool{
+ New: func() any {
+ return &NumberDataPoint_AsInt{}
+ },
+ }
+)
+
+func NewNumberDataPoint() *NumberDataPoint {
+ if !UseProtoPooling.IsEnabled() {
+ return &NumberDataPoint{}
+ }
+ return protoPoolNumberDataPoint.Get().(*NumberDataPoint)
+}
+
+func DeleteNumberDataPoint(orig *NumberDataPoint, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ switch ov := orig.Value.(type) {
+ case *NumberDataPoint_AsDouble:
+ if UseProtoPooling.IsEnabled() {
+ ov.AsDouble = float64(0)
+ ProtoPoolNumberDataPoint_AsDouble.Put(ov)
+ }
+ case *NumberDataPoint_AsInt:
+ if UseProtoPooling.IsEnabled() {
+ ov.AsInt = int64(0)
+ ProtoPoolNumberDataPoint_AsInt.Put(ov)
+ }
+
+ }
+ for i := range orig.Exemplars {
+ DeleteExemplar(&orig.Exemplars[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolNumberDataPoint.Put(orig)
+ }
+}
+
+func CopyNumberDataPoint(dest, src *NumberDataPoint) *NumberDataPoint {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewNumberDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.StartTimeUnixNano = src.StartTimeUnixNano
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ switch t := src.Value.(type) {
+ case *NumberDataPoint_AsDouble:
+ var ov *NumberDataPoint_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsDouble{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
+ }
+ ov.AsDouble = t.AsDouble
+ dest.Value = ov
+ case *NumberDataPoint_AsInt:
+ var ov *NumberDataPoint_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsInt{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
+ }
+ ov.AsInt = t.AsInt
+ dest.Value = ov
+ default:
+ dest.Value = nil
+ }
+ dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
+
+ dest.Flags = src.Flags
+
+ return dest
+}
+
+func CopyNumberDataPointSlice(dest, src []NumberDataPoint) []NumberDataPoint {
+ var newDest []NumberDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]NumberDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteNumberDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyNumberDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyNumberDataPointPtrSlice(dest, src []*NumberDataPoint) []*NumberDataPoint {
+ var newDest []*NumberDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*NumberDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewNumberDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteNumberDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewNumberDataPoint()
+ }
+ }
+ for i := range src {
+ CopyNumberDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *NumberDataPoint) Reset() {
+ *orig = NumberDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *NumberDataPoint) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.StartTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("startTimeUnixNano")
+ dest.WriteUint64(orig.StartTimeUnixNano)
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ switch orig := orig.Value.(type) {
+ case *NumberDataPoint_AsDouble:
+ dest.WriteObjectField("asDouble")
+ dest.WriteFloat64(orig.AsDouble)
+ case *NumberDataPoint_AsInt:
+ dest.WriteObjectField("asInt")
+ dest.WriteInt64(orig.AsInt)
+ }
+ if len(orig.Exemplars) > 0 {
+ dest.WriteObjectField("exemplars")
+ dest.WriteArrayStart()
+ orig.Exemplars[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Exemplars); i++ {
+ dest.WriteMore()
+ orig.Exemplars[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *NumberDataPoint) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "startTimeUnixNano", "start_time_unix_nano":
+ orig.StartTimeUnixNano = iter.ReadUint64()
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+
+ case "asDouble", "as_double":
+ {
+ var ov *NumberDataPoint_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsDouble{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
+ }
+ ov.AsDouble = iter.ReadFloat64()
+ orig.Value = ov
+ }
+
+ case "asInt", "as_int":
+ {
+ var ov *NumberDataPoint_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsInt{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
+ }
+ ov.AsInt = iter.ReadInt64()
+ orig.Value = ov
+ }
+
+ case "exemplars":
+ for iter.ReadArray() {
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
+ }
+
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *NumberDataPoint) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.StartTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ switch orig := orig.Value.(type) {
+ case nil:
+ _ = orig
+ break
+ case *NumberDataPoint_AsDouble:
+ n += 9
+ case *NumberDataPoint_AsInt:
+ n += 9
+ }
+ for i := range orig.Exemplars {
+ l = orig.Exemplars[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Flags != 0 {
+ n += 1 + proto.Sov(uint64(orig.Flags))
+ }
+ return n
+}
+
+func (orig *NumberDataPoint) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ if orig.StartTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
+ pos--
+ buf[pos] = 0x11
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ switch orig := orig.Value.(type) {
+ case *NumberDataPoint_AsDouble:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
+ pos--
+ buf[pos] = 0x21
+
+ case *NumberDataPoint_AsInt:
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
+ pos--
+ buf[pos] = 0x31
+
+ }
+ for i := len(orig.Exemplars) - 1; i >= 0; i-- {
+ l = orig.Exemplars[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ if orig.Flags != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
+ pos--
+ buf[pos] = 0x40
+ }
+ return len(buf) - pos
+}
+
+func (orig *NumberDataPoint) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartTimeUnixNano = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *NumberDataPoint_AsDouble
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsDouble{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
+ }
+ ov.AsDouble = math.Float64frombits(num)
+ orig.Value = ov
+
+ case 6:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ var ov *NumberDataPoint_AsInt
+ if !UseProtoPooling.IsEnabled() {
+ ov = &NumberDataPoint_AsInt{}
+ } else {
+ ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
+ }
+ ov.AsInt = int64(num)
+ orig.Value = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Exemplars = append(orig.Exemplars, Exemplar{})
+ err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 8:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestNumberDataPoint() *NumberDataPoint {
+ orig := NewNumberDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Value = &NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)}
+ orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestNumberDataPointPtrSlice() []*NumberDataPoint {
+ orig := make([]*NumberDataPoint, 5)
+ orig[0] = NewNumberDataPoint()
+ orig[1] = GenTestNumberDataPoint()
+ orig[2] = NewNumberDataPoint()
+ orig[3] = GenTestNumberDataPoint()
+ orig[4] = NewNumberDataPoint()
+ return orig
+}
+
+func GenTestNumberDataPointSlice() []NumberDataPoint {
+ orig := make([]NumberDataPoint, 5)
+ orig[1] = *GenTestNumberDataPoint()
+ orig[3] = *GenTestNumberDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go
new file mode 100644
index 00000000000..eb1e0b9f922
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profile.go
@@ -0,0 +1,675 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Profile are an implementation of the pprofextended data model.
+
+type Profile struct {
+ SampleType ValueType
+ Sample []*Sample
+ TimeUnixNano uint64
+ DurationNano uint64
+ PeriodType ValueType
+ Period int64
+ CommentStrindices []int32
+ ProfileId ProfileID
+ DroppedAttributesCount uint32
+ OriginalPayloadFormat string
+ OriginalPayload []byte
+ AttributeIndices []int32
+}
+
+var (
+ protoPoolProfile = sync.Pool{
+ New: func() any {
+ return &Profile{}
+ },
+ }
+)
+
+func NewProfile() *Profile {
+ if !UseProtoPooling.IsEnabled() {
+ return &Profile{}
+ }
+ return protoPoolProfile.Get().(*Profile)
+}
+
+func DeleteProfile(orig *Profile, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteValueType(&orig.SampleType, false)
+ for i := range orig.Sample {
+ DeleteSample(orig.Sample[i], true)
+ }
+ DeleteValueType(&orig.PeriodType, false)
+ DeleteProfileID(&orig.ProfileId, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfile.Put(orig)
+ }
+}
+
+func CopyProfile(dest, src *Profile) *Profile {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfile()
+ }
+ CopyValueType(&dest.SampleType, &src.SampleType)
+
+ dest.Sample = CopySamplePtrSlice(dest.Sample, src.Sample)
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.DurationNano = src.DurationNano
+
+ CopyValueType(&dest.PeriodType, &src.PeriodType)
+
+ dest.Period = src.Period
+
+ dest.CommentStrindices = append(dest.CommentStrindices[:0], src.CommentStrindices...)
+ CopyProfileID(&dest.ProfileId, &src.ProfileId)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.OriginalPayloadFormat = src.OriginalPayloadFormat
+
+ dest.OriginalPayload = src.OriginalPayload
+
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+
+ return dest
+}
+
+func CopyProfileSlice(dest, src []Profile) []Profile {
+ var newDest []Profile
+ if cap(dest) < len(src) {
+ newDest = make([]Profile, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfile(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfile(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilePtrSlice(dest, src []*Profile) []*Profile {
+ var newDest []*Profile
+ if cap(dest) < len(src) {
+ newDest = make([]*Profile, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfile()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfile(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfile()
+ }
+ }
+ for i := range src {
+ CopyProfile(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Profile) Reset() {
+ *orig = Profile{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Profile) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("sampleType")
+ orig.SampleType.MarshalJSON(dest)
+ if len(orig.Sample) > 0 {
+ dest.WriteObjectField("sample")
+ dest.WriteArrayStart()
+ orig.Sample[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Sample); i++ {
+ dest.WriteMore()
+ orig.Sample[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.DurationNano != uint64(0) {
+ dest.WriteObjectField("durationNano")
+ dest.WriteUint64(orig.DurationNano)
+ }
+ dest.WriteObjectField("periodType")
+ orig.PeriodType.MarshalJSON(dest)
+ if orig.Period != int64(0) {
+ dest.WriteObjectField("period")
+ dest.WriteInt64(orig.Period)
+ }
+ if len(orig.CommentStrindices) > 0 {
+ dest.WriteObjectField("commentStrindices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.CommentStrindices[0])
+ for i := 1; i < len(orig.CommentStrindices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.CommentStrindices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if !orig.ProfileId.IsEmpty() {
+ dest.WriteObjectField("profileId")
+ orig.ProfileId.MarshalJSON(dest)
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if orig.OriginalPayloadFormat != "" {
+ dest.WriteObjectField("originalPayloadFormat")
+ dest.WriteString(orig.OriginalPayloadFormat)
+ }
+
+ if len(orig.OriginalPayload) > 0 {
+ dest.WriteObjectField("originalPayload")
+ dest.WriteBytes(orig.OriginalPayload)
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Profile) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "sampleType", "sample_type":
+
+ orig.SampleType.UnmarshalJSON(iter)
+ case "sample":
+ for iter.ReadArray() {
+ orig.Sample = append(orig.Sample, NewSample())
+ orig.Sample[len(orig.Sample)-1].UnmarshalJSON(iter)
+ }
+
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "durationNano", "duration_nano":
+ orig.DurationNano = iter.ReadUint64()
+ case "periodType", "period_type":
+
+ orig.PeriodType.UnmarshalJSON(iter)
+ case "period":
+ orig.Period = iter.ReadInt64()
+ case "commentStrindices", "comment_strindices":
+ for iter.ReadArray() {
+ orig.CommentStrindices = append(orig.CommentStrindices, iter.ReadInt32())
+ }
+
+ case "profileId", "profile_id":
+
+ orig.ProfileId.UnmarshalJSON(iter)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "originalPayloadFormat", "original_payload_format":
+ orig.OriginalPayloadFormat = iter.ReadString()
+ case "originalPayload", "original_payload":
+ orig.OriginalPayload = iter.ReadBytes()
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Profile) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.SampleType.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Sample {
+ l = orig.Sample[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.DurationNano != 0 {
+ n += 1 + proto.Sov(uint64(orig.DurationNano))
+ }
+ l = orig.PeriodType.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.Period != 0 {
+ n += 1 + proto.Sov(uint64(orig.Period))
+ }
+ if len(orig.CommentStrindices) > 0 {
+ l = 0
+ for _, e := range orig.CommentStrindices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.ProfileId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ l = len(orig.OriginalPayloadFormat)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.OriginalPayload)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Profile) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.SampleType.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Sample) - 1; i >= 0; i-- {
+ l = orig.Sample[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ if orig.DurationNano != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNano))
+ pos--
+ buf[pos] = 0x20
+ }
+ l = orig.PeriodType.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+
+ if orig.Period != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Period))
+ pos--
+ buf[pos] = 0x30
+ }
+ l = len(orig.CommentStrindices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.CommentStrindices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x3a
+ }
+ l = orig.ProfileId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x42
+
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x48
+ }
+ l = len(orig.OriginalPayloadFormat)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.OriginalPayloadFormat)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x52
+ }
+ l = len(orig.OriginalPayload)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.OriginalPayload)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x5a
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x62
+ }
+ return len(buf) - pos
+}
+
+func (orig *Profile) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SampleType.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Sample = append(orig.Sample, NewSample())
+ err = orig.Sample[len(orig.Sample)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DurationNano = uint64(num)
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.PeriodType.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 6:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Period = int64(num)
+ case 7:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.CommentStrindices = append(orig.CommentStrindices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field CommentStrindices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.CommentStrindices = append(orig.CommentStrindices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
+ }
+
+ case 8:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.ProfileId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 9:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 10:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.OriginalPayloadFormat = string(buf[startPos:pos])
+
+ case 11:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.OriginalPayload = make([]byte, length)
+ copy(orig.OriginalPayload, buf[startPos:pos])
+ }
+ case 12:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfile() *Profile {
+ orig := NewProfile()
+ orig.SampleType = *GenTestValueType()
+ orig.Sample = []*Sample{{}, GenTestSample()}
+ orig.TimeUnixNano = uint64(13)
+ orig.DurationNano = uint64(13)
+ orig.PeriodType = *GenTestValueType()
+ orig.Period = int64(13)
+ orig.CommentStrindices = []int32{int32(0), int32(13)}
+ orig.ProfileId = *GenTestProfileID()
+ orig.DroppedAttributesCount = uint32(13)
+ orig.OriginalPayloadFormat = "test_originalpayloadformat"
+ orig.OriginalPayload = []byte{1, 2, 3}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestProfilePtrSlice() []*Profile {
+ orig := make([]*Profile, 5)
+ orig[0] = NewProfile()
+ orig[1] = GenTestProfile()
+ orig[2] = NewProfile()
+ orig[3] = GenTestProfile()
+ orig[4] = NewProfile()
+ return orig
+}
+
+func GenTestProfileSlice() []Profile {
+ orig := make([]Profile, 5)
+ orig[1] = *GenTestProfile()
+ orig[3] = *GenTestProfile()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go
new file mode 100644
index 00000000000..f3d1c1a16d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdata.go
@@ -0,0 +1,281 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ProfilesData represents the profiles data that can be stored in persistent storage,
+// OR can be embedded by other protocols that transfer OTLP profiles data but do not
+// implement the OTLP protocol.
+type ProfilesData struct {
+ ResourceProfiles []*ResourceProfiles
+ Dictionary ProfilesDictionary
+}
+
+var (
+ protoPoolProfilesData = sync.Pool{
+ New: func() any {
+ return &ProfilesData{}
+ },
+ }
+)
+
+func NewProfilesData() *ProfilesData {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesData{}
+ }
+ return protoPoolProfilesData.Get().(*ProfilesData)
+}
+
+func DeleteProfilesData(orig *ProfilesData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceProfiles {
+ DeleteResourceProfiles(orig.ResourceProfiles[i], true)
+ }
+ DeleteProfilesDictionary(&orig.Dictionary, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesData.Put(orig)
+ }
+}
+
+func CopyProfilesData(dest, src *ProfilesData) *ProfilesData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesData()
+ }
+ dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
+
+ CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
+
+ return dest
+}
+
+func CopyProfilesDataSlice(dest, src []ProfilesData) []ProfilesData {
+ var newDest []ProfilesData
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesDataPtrSlice(dest, src []*ProfilesData) []*ProfilesData {
+ var newDest []*ProfilesData
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesData()
+ }
+ }
+ for i := range src {
+ CopyProfilesData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesData) Reset() {
+ *orig = ProfilesData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceProfiles) > 0 {
+ dest.WriteObjectField("resourceProfiles")
+ dest.WriteArrayStart()
+ orig.ResourceProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceProfiles); i++ {
+ dest.WriteMore()
+ orig.ResourceProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectField("dictionary")
+ orig.Dictionary.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceProfiles", "resource_profiles":
+ for iter.ReadArray() {
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "dictionary":
+
+ orig.Dictionary.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceProfiles {
+ l = orig.ResourceProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.Dictionary.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *ProfilesData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
+ l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = orig.Dictionary.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ return len(buf) - pos
+}
+
+func (orig *ProfilesData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
+ err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesData() *ProfilesData {
+ orig := NewProfilesData()
+ orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
+ orig.Dictionary = *GenTestProfilesDictionary()
+ return orig
+}
+
+func GenTestProfilesDataPtrSlice() []*ProfilesData {
+ orig := make([]*ProfilesData, 5)
+ orig[0] = NewProfilesData()
+ orig[1] = GenTestProfilesData()
+ orig[2] = NewProfilesData()
+ orig[3] = GenTestProfilesData()
+ orig[4] = NewProfilesData()
+ return orig
+}
+
+func GenTestProfilesDataSlice() []ProfilesData {
+ orig := make([]ProfilesData, 5)
+ orig[1] = *GenTestProfilesData()
+ orig[3] = *GenTestProfilesData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go
new file mode 100644
index 00000000000..af6168e1174
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesdictionary.go
@@ -0,0 +1,537 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
+type ProfilesDictionary struct {
+ MappingTable []*Mapping
+ LocationTable []*Location
+ FunctionTable []*Function
+ LinkTable []*Link
+ StringTable []string
+ AttributeTable []*KeyValueAndUnit
+ StackTable []*Stack
+}
+
+var (
+ protoPoolProfilesDictionary = sync.Pool{
+ New: func() any {
+ return &ProfilesDictionary{}
+ },
+ }
+)
+
+func NewProfilesDictionary() *ProfilesDictionary {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesDictionary{}
+ }
+ return protoPoolProfilesDictionary.Get().(*ProfilesDictionary)
+}
+
+func DeleteProfilesDictionary(orig *ProfilesDictionary, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.MappingTable {
+ DeleteMapping(orig.MappingTable[i], true)
+ }
+ for i := range orig.LocationTable {
+ DeleteLocation(orig.LocationTable[i], true)
+ }
+ for i := range orig.FunctionTable {
+ DeleteFunction(orig.FunctionTable[i], true)
+ }
+ for i := range orig.LinkTable {
+ DeleteLink(orig.LinkTable[i], true)
+ }
+ for i := range orig.AttributeTable {
+ DeleteKeyValueAndUnit(orig.AttributeTable[i], true)
+ }
+ for i := range orig.StackTable {
+ DeleteStack(orig.StackTable[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesDictionary.Put(orig)
+ }
+}
+
+func CopyProfilesDictionary(dest, src *ProfilesDictionary) *ProfilesDictionary {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesDictionary()
+ }
+ dest.MappingTable = CopyMappingPtrSlice(dest.MappingTable, src.MappingTable)
+
+ dest.LocationTable = CopyLocationPtrSlice(dest.LocationTable, src.LocationTable)
+
+ dest.FunctionTable = CopyFunctionPtrSlice(dest.FunctionTable, src.FunctionTable)
+
+ dest.LinkTable = CopyLinkPtrSlice(dest.LinkTable, src.LinkTable)
+
+ dest.StringTable = append(dest.StringTable[:0], src.StringTable...)
+ dest.AttributeTable = CopyKeyValueAndUnitPtrSlice(dest.AttributeTable, src.AttributeTable)
+
+ dest.StackTable = CopyStackPtrSlice(dest.StackTable, src.StackTable)
+
+ return dest
+}
+
+func CopyProfilesDictionarySlice(dest, src []ProfilesDictionary) []ProfilesDictionary {
+ var newDest []ProfilesDictionary
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesDictionary, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesDictionary(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesDictionary(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesDictionaryPtrSlice(dest, src []*ProfilesDictionary) []*ProfilesDictionary {
+ var newDest []*ProfilesDictionary
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesDictionary, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesDictionary()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesDictionary(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesDictionary()
+ }
+ }
+ for i := range src {
+ CopyProfilesDictionary(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesDictionary) Reset() {
+ *orig = ProfilesDictionary{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesDictionary) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.MappingTable) > 0 {
+ dest.WriteObjectField("mappingTable")
+ dest.WriteArrayStart()
+ orig.MappingTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.MappingTable); i++ {
+ dest.WriteMore()
+ orig.MappingTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.LocationTable) > 0 {
+ dest.WriteObjectField("locationTable")
+ dest.WriteArrayStart()
+ orig.LocationTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LocationTable); i++ {
+ dest.WriteMore()
+ orig.LocationTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.FunctionTable) > 0 {
+ dest.WriteObjectField("functionTable")
+ dest.WriteArrayStart()
+ orig.FunctionTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.FunctionTable); i++ {
+ dest.WriteMore()
+ orig.FunctionTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.LinkTable) > 0 {
+ dest.WriteObjectField("linkTable")
+ dest.WriteArrayStart()
+ orig.LinkTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LinkTable); i++ {
+ dest.WriteMore()
+ orig.LinkTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.StringTable) > 0 {
+ dest.WriteObjectField("stringTable")
+ dest.WriteArrayStart()
+ dest.WriteString(orig.StringTable[0])
+ for i := 1; i < len(orig.StringTable); i++ {
+ dest.WriteMore()
+ dest.WriteString(orig.StringTable[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeTable) > 0 {
+ dest.WriteObjectField("attributeTable")
+ dest.WriteArrayStart()
+ orig.AttributeTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.AttributeTable); i++ {
+ dest.WriteMore()
+ orig.AttributeTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.StackTable) > 0 {
+ dest.WriteObjectField("stackTable")
+ dest.WriteArrayStart()
+ orig.StackTable[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.StackTable); i++ {
+ dest.WriteMore()
+ orig.StackTable[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesDictionary) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "mappingTable", "mapping_table":
+ for iter.ReadArray() {
+ orig.MappingTable = append(orig.MappingTable, NewMapping())
+ orig.MappingTable[len(orig.MappingTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "locationTable", "location_table":
+ for iter.ReadArray() {
+ orig.LocationTable = append(orig.LocationTable, NewLocation())
+ orig.LocationTable[len(orig.LocationTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "functionTable", "function_table":
+ for iter.ReadArray() {
+ orig.FunctionTable = append(orig.FunctionTable, NewFunction())
+ orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "linkTable", "link_table":
+ for iter.ReadArray() {
+ orig.LinkTable = append(orig.LinkTable, NewLink())
+ orig.LinkTable[len(orig.LinkTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "stringTable", "string_table":
+ for iter.ReadArray() {
+ orig.StringTable = append(orig.StringTable, iter.ReadString())
+ }
+
+ case "attributeTable", "attribute_table":
+ for iter.ReadArray() {
+ orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
+ orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalJSON(iter)
+ }
+
+ case "stackTable", "stack_table":
+ for iter.ReadArray() {
+ orig.StackTable = append(orig.StackTable, NewStack())
+ orig.StackTable[len(orig.StackTable)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesDictionary) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.MappingTable {
+ l = orig.MappingTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.LocationTable {
+ l = orig.LocationTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.FunctionTable {
+ l = orig.FunctionTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.LinkTable {
+ l = orig.LinkTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for _, s := range orig.StringTable {
+ l = len(s)
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.AttributeTable {
+ l = orig.AttributeTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.StackTable {
+ l = orig.StackTable[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ProfilesDictionary) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.MappingTable) - 1; i >= 0; i-- {
+ l = orig.MappingTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ for i := len(orig.LocationTable) - 1; i >= 0; i-- {
+ l = orig.LocationTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.FunctionTable) - 1; i >= 0; i-- {
+ l = orig.FunctionTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.LinkTable) - 1; i >= 0; i-- {
+ l = orig.LinkTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ for i := len(orig.StringTable) - 1; i >= 0; i-- {
+ l = len(orig.StringTable[i])
+ pos -= l
+ copy(buf[pos:], orig.StringTable[i])
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ for i := len(orig.AttributeTable) - 1; i >= 0; i-- {
+ l = orig.AttributeTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ for i := len(orig.StackTable) - 1; i >= 0; i-- {
+ l = orig.StackTable[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ProfilesDictionary) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.MappingTable = append(orig.MappingTable, NewMapping())
+ err = orig.MappingTable[len(orig.MappingTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LocationTable = append(orig.LocationTable, NewLocation())
+ err = orig.LocationTable[len(orig.LocationTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.FunctionTable = append(orig.FunctionTable, NewFunction())
+ err = orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LinkTable = append(orig.LinkTable, NewLink())
+ err = orig.LinkTable[len(orig.LinkTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.StringTable = append(orig.StringTable, string(buf[startPos:pos]))
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
+ err = orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.StackTable = append(orig.StackTable, NewStack())
+ err = orig.StackTable[len(orig.StackTable)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesDictionary() *ProfilesDictionary {
+ orig := NewProfilesDictionary()
+ orig.MappingTable = []*Mapping{{}, GenTestMapping()}
+ orig.LocationTable = []*Location{{}, GenTestLocation()}
+ orig.FunctionTable = []*Function{{}, GenTestFunction()}
+ orig.LinkTable = []*Link{{}, GenTestLink()}
+ orig.StringTable = []string{"", "test_stringtable"}
+ orig.AttributeTable = []*KeyValueAndUnit{{}, GenTestKeyValueAndUnit()}
+ orig.StackTable = []*Stack{{}, GenTestStack()}
+ return orig
+}
+
+func GenTestProfilesDictionaryPtrSlice() []*ProfilesDictionary {
+ orig := make([]*ProfilesDictionary, 5)
+ orig[0] = NewProfilesDictionary()
+ orig[1] = GenTestProfilesDictionary()
+ orig[2] = NewProfilesDictionary()
+ orig[3] = GenTestProfilesDictionary()
+ orig[4] = NewProfilesDictionary()
+ return orig
+}
+
+func GenTestProfilesDictionarySlice() []ProfilesDictionary {
+ orig := make([]ProfilesDictionary, 5)
+ orig[1] = *GenTestProfilesDictionary()
+ orig[3] = *GenTestProfilesDictionary()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go
new file mode 100644
index 00000000000..287b9ee4d5b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_profilesrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type ProfilesRequest struct {
+ RequestContext *RequestContext
+ ProfilesData ProfilesData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolProfilesRequest = sync.Pool{
+ New: func() any {
+ return &ProfilesRequest{}
+ },
+ }
+)
+
+func NewProfilesRequest() *ProfilesRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &ProfilesRequest{}
+ }
+ return protoPoolProfilesRequest.Get().(*ProfilesRequest)
+}
+
+func DeleteProfilesRequest(orig *ProfilesRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteProfilesData(&orig.ProfilesData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolProfilesRequest.Put(orig)
+ }
+}
+
+func CopyProfilesRequest(dest, src *ProfilesRequest) *ProfilesRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewProfilesRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyProfilesData(&dest.ProfilesData, &src.ProfilesData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyProfilesRequestSlice(dest, src []ProfilesRequest) []ProfilesRequest {
+ var newDest []ProfilesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]ProfilesRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyProfilesRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyProfilesRequestPtrSlice(dest, src []*ProfilesRequest) []*ProfilesRequest {
+ var newDest []*ProfilesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*ProfilesRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteProfilesRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewProfilesRequest()
+ }
+ }
+ for i := range src {
+ CopyProfilesRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ProfilesRequest) Reset() {
+ *orig = ProfilesRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ProfilesRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("profilesData")
+ orig.ProfilesData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ProfilesRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "profilesData", "profiles_data":
+
+ orig.ProfilesData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ProfilesRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.ProfilesData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *ProfilesRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.ProfilesData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *ProfilesRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProfilesData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.ProfilesData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestProfilesRequest() *ProfilesRequest {
+ orig := NewProfilesRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.ProfilesData = *GenTestProfilesData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestProfilesRequestPtrSlice() []*ProfilesRequest {
+ orig := make([]*ProfilesRequest, 5)
+ orig[0] = NewProfilesRequest()
+ orig[1] = GenTestProfilesRequest()
+ orig[2] = NewProfilesRequest()
+ orig[3] = GenTestProfilesRequest()
+ orig[4] = NewProfilesRequest()
+ return orig
+}
+
+func GenTestProfilesRequestSlice() []ProfilesRequest {
+ orig := make([]ProfilesRequest, 5)
+ orig[1] = *GenTestProfilesRequest()
+ orig[3] = *GenTestProfilesRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go
new file mode 100644
index 00000000000..4921ea97f09
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_requestcontext.go
@@ -0,0 +1,654 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+func (m *RequestContext) GetClientAddress() any {
+ if m != nil {
+ return m.ClientAddress
+ }
+ return nil
+}
+
+type RequestContext_IP struct {
+ IP *IPAddr
+}
+
+func (m *RequestContext) GetIP() *IPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_IP); ok {
+ return v.IP
+ }
+ return nil
+}
+
+type RequestContext_TCP struct {
+ TCP *TCPAddr
+}
+
+func (m *RequestContext) GetTCP() *TCPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_TCP); ok {
+ return v.TCP
+ }
+ return nil
+}
+
+type RequestContext_UDP struct {
+ UDP *UDPAddr
+}
+
+func (m *RequestContext) GetUDP() *UDPAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_UDP); ok {
+ return v.UDP
+ }
+ return nil
+}
+
+type RequestContext_Unix struct {
+ Unix *UnixAddr
+}
+
+func (m *RequestContext) GetUnix() *UnixAddr {
+ if v, ok := m.GetClientAddress().(*RequestContext_Unix); ok {
+ return v.Unix
+ }
+ return nil
+}
+
+type RequestContext struct {
+ SpanContext *SpanContext
+ ClientMetadata []KeyValue
+ ClientAddress any
+}
+
+var (
+ protoPoolRequestContext = sync.Pool{
+ New: func() any {
+ return &RequestContext{}
+ },
+ }
+
+ ProtoPoolRequestContext_IP = sync.Pool{
+ New: func() any {
+ return &RequestContext_IP{}
+ },
+ }
+
+ ProtoPoolRequestContext_TCP = sync.Pool{
+ New: func() any {
+ return &RequestContext_TCP{}
+ },
+ }
+
+ ProtoPoolRequestContext_UDP = sync.Pool{
+ New: func() any {
+ return &RequestContext_UDP{}
+ },
+ }
+
+ ProtoPoolRequestContext_Unix = sync.Pool{
+ New: func() any {
+ return &RequestContext_Unix{}
+ },
+ }
+)
+
+func NewRequestContext() *RequestContext {
+ if !UseProtoPooling.IsEnabled() {
+ return &RequestContext{}
+ }
+ return protoPoolRequestContext.Get().(*RequestContext)
+}
+
+func DeleteRequestContext(orig *RequestContext, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteSpanContext(orig.SpanContext, true)
+ for i := range orig.ClientMetadata {
+ DeleteKeyValue(&orig.ClientMetadata[i], false)
+ }
+ switch ov := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ DeleteIPAddr(ov.IP, true)
+ ov.IP = nil
+ ProtoPoolRequestContext_IP.Put(ov)
+ case *RequestContext_TCP:
+ DeleteTCPAddr(ov.TCP, true)
+ ov.TCP = nil
+ ProtoPoolRequestContext_TCP.Put(ov)
+ case *RequestContext_UDP:
+ DeleteUDPAddr(ov.UDP, true)
+ ov.UDP = nil
+ ProtoPoolRequestContext_UDP.Put(ov)
+ case *RequestContext_Unix:
+ DeleteUnixAddr(ov.Unix, true)
+ ov.Unix = nil
+ ProtoPoolRequestContext_Unix.Put(ov)
+
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolRequestContext.Put(orig)
+ }
+}
+
+func CopyRequestContext(dest, src *RequestContext) *RequestContext {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewRequestContext()
+ }
+ dest.SpanContext = CopySpanContext(dest.SpanContext, src.SpanContext)
+
+ dest.ClientMetadata = CopyKeyValueSlice(dest.ClientMetadata, src.ClientMetadata)
+
+ switch t := src.ClientAddress.(type) {
+ case *RequestContext_IP:
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ CopyIPAddr(ov.IP, t.IP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_TCP:
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ CopyTCPAddr(ov.TCP, t.TCP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_UDP:
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ CopyUDPAddr(ov.UDP, t.UDP)
+ dest.ClientAddress = ov
+
+ case *RequestContext_Unix:
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ CopyUnixAddr(ov.Unix, t.Unix)
+ dest.ClientAddress = ov
+
+ default:
+ dest.ClientAddress = nil
+ }
+
+ return dest
+}
+
+func CopyRequestContextSlice(dest, src []RequestContext) []RequestContext {
+ var newDest []RequestContext
+ if cap(dest) < len(src) {
+ newDest = make([]RequestContext, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteRequestContext(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyRequestContext(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyRequestContextPtrSlice(dest, src []*RequestContext) []*RequestContext {
+ var newDest []*RequestContext
+ if cap(dest) < len(src) {
+ newDest = make([]*RequestContext, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewRequestContext()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteRequestContext(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewRequestContext()
+ }
+ }
+ for i := range src {
+ CopyRequestContext(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *RequestContext) Reset() {
+ *orig = RequestContext{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *RequestContext) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.SpanContext != nil {
+ dest.WriteObjectField("spanContext")
+ orig.SpanContext.MarshalJSON(dest)
+ }
+ if len(orig.ClientMetadata) > 0 {
+ dest.WriteObjectField("clientMetadata")
+ dest.WriteArrayStart()
+ orig.ClientMetadata[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ClientMetadata); i++ {
+ dest.WriteMore()
+ orig.ClientMetadata[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ dest.WriteObjectField("iP")
+ orig.IP.MarshalJSON(dest)
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ dest.WriteObjectField("tCP")
+ orig.TCP.MarshalJSON(dest)
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ dest.WriteObjectField("uDP")
+ orig.UDP.MarshalJSON(dest)
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ dest.WriteObjectField("unix")
+ orig.Unix.MarshalJSON(dest)
+ }
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *RequestContext) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "spanContext", "span_context":
+ orig.SpanContext = NewSpanContext()
+ orig.SpanContext.UnmarshalJSON(iter)
+ case "clientMetadata", "client_metadata":
+ for iter.ReadArray() {
+ orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
+ orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalJSON(iter)
+ }
+
+ case "iP":
+ {
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ ov.IP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "tCP":
+ {
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ ov.TCP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "uDP":
+ {
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ ov.UDP.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ case "unix":
+ {
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ ov.Unix.UnmarshalJSON(iter)
+ orig.ClientAddress = ov
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *RequestContext) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.SpanContext != nil {
+ l = orig.SpanContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.ClientMetadata {
+ l = orig.ClientMetadata[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case nil:
+ _ = orig
+ break
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ l = orig.IP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ l = orig.TCP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ l = orig.UDP.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ l = orig.Unix.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ }
+ return n
+}
+
+func (orig *RequestContext) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.SpanContext != nil {
+ l = orig.SpanContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ for i := len(orig.ClientMetadata) - 1; i >= 0; i-- {
+ l = orig.ClientMetadata[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ switch orig := orig.ClientAddress.(type) {
+ case *RequestContext_IP:
+ if orig.IP != nil {
+ l = orig.IP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ case *RequestContext_TCP:
+ if orig.TCP != nil {
+ l = orig.TCP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ case *RequestContext_UDP:
+ if orig.UDP != nil {
+ l = orig.UDP.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ case *RequestContext_Unix:
+ if orig.Unix != nil {
+ l = orig.Unix.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ }
+ return len(buf) - pos
+}
+
+func (orig *RequestContext) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.SpanContext = NewSpanContext()
+ err = orig.SpanContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
+ err = orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_IP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_IP{}
+ } else {
+ ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
+ }
+ ov.IP = NewIPAddr()
+ err = ov.IP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_TCP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_TCP{}
+ } else {
+ ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
+ }
+ ov.TCP = NewTCPAddr()
+ err = ov.TCP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field UDP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_UDP
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_UDP{}
+ } else {
+ ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
+ }
+ ov.UDP = NewUDPAddr()
+ err = ov.UDP.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Unix", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var ov *RequestContext_Unix
+ if !UseProtoPooling.IsEnabled() {
+ ov = &RequestContext_Unix{}
+ } else {
+ ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
+ }
+ ov.Unix = NewUnixAddr()
+ err = ov.Unix.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ orig.ClientAddress = ov
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestRequestContext() *RequestContext {
+ orig := NewRequestContext()
+ orig.SpanContext = GenTestSpanContext()
+ orig.ClientMetadata = []KeyValue{{}, *GenTestKeyValue()}
+ orig.ClientAddress = &RequestContext_IP{IP: GenTestIPAddr()}
+ return orig
+}
+
+func GenTestRequestContextPtrSlice() []*RequestContext {
+ orig := make([]*RequestContext, 5)
+ orig[0] = NewRequestContext()
+ orig[1] = GenTestRequestContext()
+ orig[2] = NewRequestContext()
+ orig[3] = GenTestRequestContext()
+ orig[4] = NewRequestContext()
+ return orig
+}
+
+func GenTestRequestContextSlice() []RequestContext {
+ orig := make([]RequestContext, 5)
+ orig[1] = *GenTestRequestContext()
+ orig[3] = *GenTestRequestContext()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go
new file mode 100644
index 00000000000..7bef0ffcb96
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resource.go
@@ -0,0 +1,325 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Resource is a message representing the resource information.
+type Resource struct {
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ EntityRefs []*EntityRef
+}
+
+var (
+ protoPoolResource = sync.Pool{
+ New: func() any {
+ return &Resource{}
+ },
+ }
+)
+
+func NewResource() *Resource {
+ if !UseProtoPooling.IsEnabled() {
+ return &Resource{}
+ }
+ return protoPoolResource.Get().(*Resource)
+}
+
+func DeleteResource(orig *Resource, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ for i := range orig.EntityRefs {
+ DeleteEntityRef(orig.EntityRefs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResource.Put(orig)
+ }
+}
+
+func CopyResource(dest, src *Resource) *Resource {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResource()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.EntityRefs = CopyEntityRefPtrSlice(dest.EntityRefs, src.EntityRefs)
+
+ return dest
+}
+
+func CopyResourceSlice(dest, src []Resource) []Resource {
+ var newDest []Resource
+ if cap(dest) < len(src) {
+ newDest = make([]Resource, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResource(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResource(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourcePtrSlice(dest, src []*Resource) []*Resource {
+ var newDest []*Resource
+ if cap(dest) < len(src) {
+ newDest = make([]*Resource, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResource()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResource(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResource()
+ }
+ }
+ for i := range src {
+ CopyResource(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Resource) Reset() {
+ *orig = Resource{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Resource) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if len(orig.EntityRefs) > 0 {
+ dest.WriteObjectField("entityRefs")
+ dest.WriteArrayStart()
+ orig.EntityRefs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.EntityRefs); i++ {
+ dest.WriteMore()
+ orig.EntityRefs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Resource) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "entityRefs", "entity_refs":
+ for iter.ReadArray() {
+ orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
+ orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Resource) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ for i := range orig.EntityRefs {
+ l = orig.EntityRefs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Resource) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x10
+ }
+ for i := len(orig.EntityRefs) - 1; i >= 0; i-- {
+ l = orig.EntityRefs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Resource) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
+ err = orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResource() *Resource {
+ orig := NewResource()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.EntityRefs = []*EntityRef{{}, GenTestEntityRef()}
+ return orig
+}
+
+func GenTestResourcePtrSlice() []*Resource {
+ orig := make([]*Resource, 5)
+ orig[0] = NewResource()
+ orig[1] = GenTestResource()
+ orig[2] = NewResource()
+ orig[3] = GenTestResource()
+ orig[4] = NewResource()
+ return orig
+}
+
+func GenTestResourceSlice() []Resource {
+ orig := make([]Resource, 5)
+ orig[1] = *GenTestResource()
+ orig[3] = *GenTestResource()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go
new file mode 100644
index 00000000000..57c32e3c17b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcelogs.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceLogs is a collection of logs from a Resource.
+type ResourceLogs struct {
+ Resource Resource
+ ScopeLogs []*ScopeLogs
+ SchemaUrl string
+ DeprecatedScopeLogs []*ScopeLogs
+}
+
+var (
+ protoPoolResourceLogs = sync.Pool{
+ New: func() any {
+ return &ResourceLogs{}
+ },
+ }
+)
+
+func NewResourceLogs() *ResourceLogs {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceLogs{}
+ }
+ return protoPoolResourceLogs.Get().(*ResourceLogs)
+}
+
+func DeleteResourceLogs(orig *ResourceLogs, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeLogs {
+ DeleteScopeLogs(orig.ScopeLogs[i], true)
+ }
+ for i := range orig.DeprecatedScopeLogs {
+ DeleteScopeLogs(orig.DeprecatedScopeLogs[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceLogs.Put(orig)
+ }
+}
+
+func CopyResourceLogs(dest, src *ResourceLogs) *ResourceLogs {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceLogs()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeLogs = CopyScopeLogsPtrSlice(dest.ScopeLogs, src.ScopeLogs)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeLogs = CopyScopeLogsPtrSlice(dest.DeprecatedScopeLogs, src.DeprecatedScopeLogs)
+
+ return dest
+}
+
+func CopyResourceLogsSlice(dest, src []ResourceLogs) []ResourceLogs {
+ var newDest []ResourceLogs
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceLogs, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceLogs(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceLogs(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceLogsPtrSlice(dest, src []*ResourceLogs) []*ResourceLogs {
+ var newDest []*ResourceLogs
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceLogs, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceLogs()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceLogs(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceLogs()
+ }
+ }
+ for i := range src {
+ CopyResourceLogs(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceLogs) Reset() {
+ *orig = ResourceLogs{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceLogs) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeLogs) > 0 {
+ dest.WriteObjectField("scopeLogs")
+ dest.WriteArrayStart()
+ orig.ScopeLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeLogs); i++ {
+ dest.WriteMore()
+ orig.ScopeLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeLogs) > 0 {
+ dest.WriteObjectField("deprecatedScopeLogs")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeLogs[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeLogs); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeLogs[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceLogs) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeLogs", "scope_logs":
+ for iter.ReadArray() {
+ orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
+ orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeLogs", "deprecated_scope_logs":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
+ orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceLogs) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeLogs {
+ l = orig.ScopeLogs[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeLogs {
+ l = orig.DeprecatedScopeLogs[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceLogs) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeLogs) - 1; i >= 0; i-- {
+ l = orig.ScopeLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeLogs) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeLogs[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceLogs) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
+ err = orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
+ err = orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceLogs() *ResourceLogs {
+ orig := NewResourceLogs()
+ orig.Resource = *GenTestResource()
+ orig.ScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
+ return orig
+}
+
+func GenTestResourceLogsPtrSlice() []*ResourceLogs {
+ orig := make([]*ResourceLogs, 5)
+ orig[0] = NewResourceLogs()
+ orig[1] = GenTestResourceLogs()
+ orig[2] = NewResourceLogs()
+ orig[3] = GenTestResourceLogs()
+ orig[4] = NewResourceLogs()
+ return orig
+}
+
+func GenTestResourceLogsSlice() []ResourceLogs {
+ orig := make([]ResourceLogs, 5)
+ orig[1] = *GenTestResourceLogs()
+ orig[3] = *GenTestResourceLogs()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go
new file mode 100644
index 00000000000..a260fe62e4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcemetrics.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceMetrics is a collection of metrics from a Resource.
+type ResourceMetrics struct {
+ Resource Resource
+ ScopeMetrics []*ScopeMetrics
+ SchemaUrl string
+ DeprecatedScopeMetrics []*ScopeMetrics
+}
+
+var (
+ protoPoolResourceMetrics = sync.Pool{
+ New: func() any {
+ return &ResourceMetrics{}
+ },
+ }
+)
+
+func NewResourceMetrics() *ResourceMetrics {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceMetrics{}
+ }
+ return protoPoolResourceMetrics.Get().(*ResourceMetrics)
+}
+
+func DeleteResourceMetrics(orig *ResourceMetrics, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeMetrics {
+ DeleteScopeMetrics(orig.ScopeMetrics[i], true)
+ }
+ for i := range orig.DeprecatedScopeMetrics {
+ DeleteScopeMetrics(orig.DeprecatedScopeMetrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceMetrics.Put(orig)
+ }
+}
+
+func CopyResourceMetrics(dest, src *ResourceMetrics) *ResourceMetrics {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceMetrics()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeMetrics = CopyScopeMetricsPtrSlice(dest.ScopeMetrics, src.ScopeMetrics)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeMetrics = CopyScopeMetricsPtrSlice(dest.DeprecatedScopeMetrics, src.DeprecatedScopeMetrics)
+
+ return dest
+}
+
+func CopyResourceMetricsSlice(dest, src []ResourceMetrics) []ResourceMetrics {
+ var newDest []ResourceMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceMetrics, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceMetrics(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceMetrics(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceMetricsPtrSlice(dest, src []*ResourceMetrics) []*ResourceMetrics {
+ var newDest []*ResourceMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceMetrics, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceMetrics()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceMetrics(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceMetrics()
+ }
+ }
+ for i := range src {
+ CopyResourceMetrics(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceMetrics) Reset() {
+ *orig = ResourceMetrics{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceMetrics) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeMetrics) > 0 {
+ dest.WriteObjectField("scopeMetrics")
+ dest.WriteArrayStart()
+ orig.ScopeMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeMetrics); i++ {
+ dest.WriteMore()
+ orig.ScopeMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeMetrics) > 0 {
+ dest.WriteObjectField("deprecatedScopeMetrics")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeMetrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeMetrics); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeMetrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceMetrics) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeMetrics", "scope_metrics":
+ for iter.ReadArray() {
+ orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
+ orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeMetrics", "deprecated_scope_metrics":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
+ orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceMetrics) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeMetrics {
+ l = orig.ScopeMetrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeMetrics {
+ l = orig.DeprecatedScopeMetrics[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceMetrics) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- {
+ l = orig.ScopeMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeMetrics) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeMetrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceMetrics) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
+ err = orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
+ err = orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceMetrics() *ResourceMetrics {
+ orig := NewResourceMetrics()
+ orig.Resource = *GenTestResource()
+ orig.ScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
+ return orig
+}
+
+func GenTestResourceMetricsPtrSlice() []*ResourceMetrics {
+ orig := make([]*ResourceMetrics, 5)
+ orig[0] = NewResourceMetrics()
+ orig[1] = GenTestResourceMetrics()
+ orig[2] = NewResourceMetrics()
+ orig[3] = GenTestResourceMetrics()
+ orig[4] = NewResourceMetrics()
+ return orig
+}
+
+func GenTestResourceMetricsSlice() []ResourceMetrics {
+ orig := make([]ResourceMetrics, 5)
+ orig[1] = *GenTestResourceMetrics()
+ orig[3] = *GenTestResourceMetrics()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go
new file mode 100644
index 00000000000..69522c24d84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourceprofiles.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceProfiles is a collection of profiles from a Resource.
+type ResourceProfiles struct {
+ Resource Resource
+ ScopeProfiles []*ScopeProfiles
+ SchemaUrl string
+}
+
+var (
+ protoPoolResourceProfiles = sync.Pool{
+ New: func() any {
+ return &ResourceProfiles{}
+ },
+ }
+)
+
+func NewResourceProfiles() *ResourceProfiles {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceProfiles{}
+ }
+ return protoPoolResourceProfiles.Get().(*ResourceProfiles)
+}
+
+func DeleteResourceProfiles(orig *ResourceProfiles, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeProfiles {
+ DeleteScopeProfiles(orig.ScopeProfiles[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceProfiles.Put(orig)
+ }
+}
+
+func CopyResourceProfiles(dest, src *ResourceProfiles) *ResourceProfiles {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceProfiles()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeProfiles = CopyScopeProfilesPtrSlice(dest.ScopeProfiles, src.ScopeProfiles)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyResourceProfilesSlice(dest, src []ResourceProfiles) []ResourceProfiles {
+ var newDest []ResourceProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceProfiles, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceProfiles(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceProfiles(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceProfilesPtrSlice(dest, src []*ResourceProfiles) []*ResourceProfiles {
+ var newDest []*ResourceProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceProfiles, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceProfiles()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceProfiles(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceProfiles()
+ }
+ }
+ for i := range src {
+ CopyResourceProfiles(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceProfiles) Reset() {
+ *orig = ResourceProfiles{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceProfiles) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeProfiles) > 0 {
+ dest.WriteObjectField("scopeProfiles")
+ dest.WriteArrayStart()
+ orig.ScopeProfiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeProfiles); i++ {
+ dest.WriteMore()
+ orig.ScopeProfiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceProfiles) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeProfiles", "scope_profiles":
+ for iter.ReadArray() {
+ orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
+ orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceProfiles) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeProfiles {
+ l = orig.ScopeProfiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceProfiles) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- {
+ l = orig.ScopeProfiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceProfiles) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
+ err = orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceProfiles() *ResourceProfiles {
+ orig := NewResourceProfiles()
+ orig.Resource = *GenTestResource()
+ orig.ScopeProfiles = []*ScopeProfiles{{}, GenTestScopeProfiles()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestResourceProfilesPtrSlice() []*ResourceProfiles {
+ orig := make([]*ResourceProfiles, 5)
+ orig[0] = NewResourceProfiles()
+ orig[1] = GenTestResourceProfiles()
+ orig[2] = NewResourceProfiles()
+ orig[3] = GenTestResourceProfiles()
+ orig[4] = NewResourceProfiles()
+ return orig
+}
+
+func GenTestResourceProfilesSlice() []ResourceProfiles {
+ orig := make([]ResourceProfiles, 5)
+ orig[1] = *GenTestResourceProfiles()
+ orig[3] = *GenTestResourceProfiles()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go
new file mode 100644
index 00000000000..bd70dedb80e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_resourcespans.go
@@ -0,0 +1,365 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ResourceSpans is a collection of spans from a Resource.
+type ResourceSpans struct {
+ Resource Resource
+ ScopeSpans []*ScopeSpans
+ SchemaUrl string
+ DeprecatedScopeSpans []*ScopeSpans
+}
+
+var (
+ protoPoolResourceSpans = sync.Pool{
+ New: func() any {
+ return &ResourceSpans{}
+ },
+ }
+)
+
+func NewResourceSpans() *ResourceSpans {
+ if !UseProtoPooling.IsEnabled() {
+ return &ResourceSpans{}
+ }
+ return protoPoolResourceSpans.Get().(*ResourceSpans)
+}
+
+func DeleteResourceSpans(orig *ResourceSpans, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteResource(&orig.Resource, false)
+ for i := range orig.ScopeSpans {
+ DeleteScopeSpans(orig.ScopeSpans[i], true)
+ }
+ for i := range orig.DeprecatedScopeSpans {
+ DeleteScopeSpans(orig.DeprecatedScopeSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolResourceSpans.Put(orig)
+ }
+}
+
+func CopyResourceSpans(dest, src *ResourceSpans) *ResourceSpans {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewResourceSpans()
+ }
+ CopyResource(&dest.Resource, &src.Resource)
+
+ dest.ScopeSpans = CopyScopeSpansPtrSlice(dest.ScopeSpans, src.ScopeSpans)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ dest.DeprecatedScopeSpans = CopyScopeSpansPtrSlice(dest.DeprecatedScopeSpans, src.DeprecatedScopeSpans)
+
+ return dest
+}
+
+func CopyResourceSpansSlice(dest, src []ResourceSpans) []ResourceSpans {
+ var newDest []ResourceSpans
+ if cap(dest) < len(src) {
+ newDest = make([]ResourceSpans, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceSpans(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyResourceSpans(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyResourceSpansPtrSlice(dest, src []*ResourceSpans) []*ResourceSpans {
+ var newDest []*ResourceSpans
+ if cap(dest) < len(src) {
+ newDest = make([]*ResourceSpans, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceSpans()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteResourceSpans(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewResourceSpans()
+ }
+ }
+ for i := range src {
+ CopyResourceSpans(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ResourceSpans) Reset() {
+ *orig = ResourceSpans{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ResourceSpans) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("resource")
+ orig.Resource.MarshalJSON(dest)
+ if len(orig.ScopeSpans) > 0 {
+ dest.WriteObjectField("scopeSpans")
+ dest.WriteArrayStart()
+ orig.ScopeSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ScopeSpans); i++ {
+ dest.WriteMore()
+ orig.ScopeSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ if len(orig.DeprecatedScopeSpans) > 0 {
+ dest.WriteObjectField("deprecatedScopeSpans")
+ dest.WriteArrayStart()
+ orig.DeprecatedScopeSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DeprecatedScopeSpans); i++ {
+ dest.WriteMore()
+ orig.DeprecatedScopeSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ResourceSpans) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resource":
+
+ orig.Resource.UnmarshalJSON(iter)
+ case "scopeSpans", "scope_spans":
+ for iter.ReadArray() {
+ orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
+ orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ case "deprecatedScopeSpans", "deprecated_scope_spans":
+ for iter.ReadArray() {
+ orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
+ orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ResourceSpans) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Resource.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.ScopeSpans {
+ l = orig.ScopeSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.DeprecatedScopeSpans {
+ l = orig.DeprecatedScopeSpans[i].SizeProto()
+ n += 2 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ResourceSpans) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Resource.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.ScopeSpans) - 1; i >= 0; i-- {
+ l = orig.ScopeSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.DeprecatedScopeSpans) - 1; i >= 0; i-- {
+ l = orig.DeprecatedScopeSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3e
+ pos--
+ buf[pos] = 0xc2
+ }
+ return len(buf) - pos
+}
+
+func (orig *ResourceSpans) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Resource.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
+ err = orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+
+ case 1000:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
+ err = orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestResourceSpans() *ResourceSpans {
+ orig := NewResourceSpans()
+ orig.Resource = *GenTestResource()
+ orig.ScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
+ orig.SchemaUrl = "test_schemaurl"
+ orig.DeprecatedScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
+ return orig
+}
+
+func GenTestResourceSpansPtrSlice() []*ResourceSpans {
+ orig := make([]*ResourceSpans, 5)
+ orig[0] = NewResourceSpans()
+ orig[1] = GenTestResourceSpans()
+ orig[2] = NewResourceSpans()
+ orig[3] = GenTestResourceSpans()
+ orig[4] = NewResourceSpans()
+ return orig
+}
+
+func GenTestResourceSpansSlice() []ResourceSpans {
+ orig := make([]ResourceSpans, 5)
+ orig[1] = *GenTestResourceSpans()
+ orig[3] = *GenTestResourceSpans()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go
new file mode 100644
index 00000000000..df89e9b63e1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sample.go
@@ -0,0 +1,451 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Sample represents each record value encountered within a profiled program.
+type Sample struct {
+ StackIndex int32
+ Values []int64
+ AttributeIndices []int32
+ LinkIndex int32
+ TimestampsUnixNano []uint64
+}
+
+var (
+ protoPoolSample = sync.Pool{
+ New: func() any {
+ return &Sample{}
+ },
+ }
+)
+
+func NewSample() *Sample {
+ if !UseProtoPooling.IsEnabled() {
+ return &Sample{}
+ }
+ return protoPoolSample.Get().(*Sample)
+}
+
+func DeleteSample(orig *Sample, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSample.Put(orig)
+ }
+}
+
+func CopySample(dest, src *Sample) *Sample {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSample()
+ }
+ dest.StackIndex = src.StackIndex
+
+ dest.Values = append(dest.Values[:0], src.Values...)
+ dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
+ dest.LinkIndex = src.LinkIndex
+
+ dest.TimestampsUnixNano = append(dest.TimestampsUnixNano[:0], src.TimestampsUnixNano...)
+
+ return dest
+}
+
+func CopySampleSlice(dest, src []Sample) []Sample {
+ var newDest []Sample
+ if cap(dest) < len(src) {
+ newDest = make([]Sample, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSample(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySample(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySamplePtrSlice(dest, src []*Sample) []*Sample {
+ var newDest []*Sample
+ if cap(dest) < len(src) {
+ newDest = make([]*Sample, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSample()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSample(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSample()
+ }
+ }
+ for i := range src {
+ CopySample(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Sample) Reset() {
+ *orig = Sample{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Sample) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.StackIndex != int32(0) {
+ dest.WriteObjectField("stackIndex")
+ dest.WriteInt32(orig.StackIndex)
+ }
+ if len(orig.Values) > 0 {
+ dest.WriteObjectField("values")
+ dest.WriteArrayStart()
+ dest.WriteInt64(orig.Values[0])
+ for i := 1; i < len(orig.Values); i++ {
+ dest.WriteMore()
+ dest.WriteInt64(orig.Values[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if len(orig.AttributeIndices) > 0 {
+ dest.WriteObjectField("attributeIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.AttributeIndices[0])
+ for i := 1; i < len(orig.AttributeIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.AttributeIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.LinkIndex != int32(0) {
+ dest.WriteObjectField("linkIndex")
+ dest.WriteInt32(orig.LinkIndex)
+ }
+ if len(orig.TimestampsUnixNano) > 0 {
+ dest.WriteObjectField("timestampsUnixNano")
+ dest.WriteArrayStart()
+ dest.WriteUint64(orig.TimestampsUnixNano[0])
+ for i := 1; i < len(orig.TimestampsUnixNano); i++ {
+ dest.WriteMore()
+ dest.WriteUint64(orig.TimestampsUnixNano[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Sample) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "stackIndex", "stack_index":
+ orig.StackIndex = iter.ReadInt32()
+ case "values":
+ for iter.ReadArray() {
+ orig.Values = append(orig.Values, iter.ReadInt64())
+ }
+
+ case "attributeIndices", "attribute_indices":
+ for iter.ReadArray() {
+ orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
+ }
+
+ case "linkIndex", "link_index":
+ orig.LinkIndex = iter.ReadInt32()
+ case "timestampsUnixNano", "timestamps_unix_nano":
+ for iter.ReadArray() {
+ orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, iter.ReadUint64())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Sample) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.StackIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.StackIndex))
+ }
+ if len(orig.Values) > 0 {
+ l = 0
+ for _, e := range orig.Values {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if len(orig.AttributeIndices) > 0 {
+ l = 0
+ for _, e := range orig.AttributeIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.LinkIndex != 0 {
+ n += 1 + proto.Sov(uint64(orig.LinkIndex))
+ }
+ l = len(orig.TimestampsUnixNano)
+ if l > 0 {
+ l *= 8
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Sample) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.StackIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.StackIndex))
+ pos--
+ buf[pos] = 0x8
+ }
+ l = len(orig.Values)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Values[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.AttributeIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0x1a
+ }
+ if orig.LinkIndex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex))
+ pos--
+ buf[pos] = 0x20
+ }
+ l = len(orig.TimestampsUnixNano)
+ if l > 0 {
+ for i := l - 1; i >= 0; i-- {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimestampsUnixNano[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(l*8))
+ pos--
+ buf[pos] = 0x2a
+ }
+ return len(buf) - pos
+}
+
+func (orig *Sample) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StackIndex = int32(num)
+ case 2:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.Values = append(orig.Values, int64(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field Values", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.Values = append(orig.Values, int64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+ }
+ case 3:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
+ }
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.LinkIndex = int32(num)
+ case 5:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ size := length / 8
+ orig.TimestampsUnixNano = make([]uint64, size)
+ var num uint64
+ for i := 0; i < size; i++ {
+ num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.TimestampsUnixNano[i] = uint64(num)
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field TimestampsUnixNano", pos-startPos)
+ }
+ case proto.WireTypeI64:
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, uint64(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSample() *Sample {
+ orig := NewSample()
+ orig.StackIndex = int32(13)
+ orig.Values = []int64{int64(0), int64(13)}
+ orig.AttributeIndices = []int32{int32(0), int32(13)}
+ orig.LinkIndex = int32(13)
+ orig.TimestampsUnixNano = []uint64{uint64(0), uint64(13)}
+ return orig
+}
+
+func GenTestSamplePtrSlice() []*Sample {
+ orig := make([]*Sample, 5)
+ orig[0] = NewSample()
+ orig[1] = GenTestSample()
+ orig[2] = NewSample()
+ orig[3] = GenTestSample()
+ orig[4] = NewSample()
+ return orig
+}
+
+func GenTestSampleSlice() []Sample {
+ orig := make([]Sample, 5)
+ orig[1] = *GenTestSample()
+ orig[3] = *GenTestSample()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go
new file mode 100644
index 00000000000..8f303264c73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopelogs.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeLogs is a collection of logs from a LibraryInstrumentation.
+type ScopeLogs struct {
+ Scope InstrumentationScope
+ LogRecords []*LogRecord
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeLogs = sync.Pool{
+ New: func() any {
+ return &ScopeLogs{}
+ },
+ }
+)
+
+func NewScopeLogs() *ScopeLogs {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeLogs{}
+ }
+ return protoPoolScopeLogs.Get().(*ScopeLogs)
+}
+
+func DeleteScopeLogs(orig *ScopeLogs, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.LogRecords {
+ DeleteLogRecord(orig.LogRecords[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeLogs.Put(orig)
+ }
+}
+
+func CopyScopeLogs(dest, src *ScopeLogs) *ScopeLogs {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeLogs()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.LogRecords = CopyLogRecordPtrSlice(dest.LogRecords, src.LogRecords)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeLogsSlice(dest, src []ScopeLogs) []ScopeLogs {
+ var newDest []ScopeLogs
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeLogs, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeLogs(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeLogs(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeLogsPtrSlice(dest, src []*ScopeLogs) []*ScopeLogs {
+ var newDest []*ScopeLogs
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeLogs, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeLogs()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeLogs(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeLogs()
+ }
+ }
+ for i := range src {
+ CopyScopeLogs(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeLogs) Reset() {
+ *orig = ScopeLogs{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeLogs) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.LogRecords) > 0 {
+ dest.WriteObjectField("logRecords")
+ dest.WriteArrayStart()
+ orig.LogRecords[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.LogRecords); i++ {
+ dest.WriteMore()
+ orig.LogRecords[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeLogs) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "logRecords", "log_records":
+ for iter.ReadArray() {
+ orig.LogRecords = append(orig.LogRecords, NewLogRecord())
+ orig.LogRecords[len(orig.LogRecords)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeLogs) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.LogRecords {
+ l = orig.LogRecords[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeLogs) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.LogRecords) - 1; i >= 0; i-- {
+ l = orig.LogRecords[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeLogs) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.LogRecords = append(orig.LogRecords, NewLogRecord())
+ err = orig.LogRecords[len(orig.LogRecords)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeLogs() *ScopeLogs {
+ orig := NewScopeLogs()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.LogRecords = []*LogRecord{{}, GenTestLogRecord()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeLogsPtrSlice() []*ScopeLogs {
+ orig := make([]*ScopeLogs, 5)
+ orig[0] = NewScopeLogs()
+ orig[1] = GenTestScopeLogs()
+ orig[2] = NewScopeLogs()
+ orig[3] = GenTestScopeLogs()
+ orig[4] = NewScopeLogs()
+ return orig
+}
+
+func GenTestScopeLogsSlice() []ScopeLogs {
+ orig := make([]ScopeLogs, 5)
+ orig[1] = *GenTestScopeLogs()
+ orig[3] = *GenTestScopeLogs()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go
new file mode 100644
index 00000000000..fc521a4017c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopemetrics.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeMetrics is a collection of metrics from a LibraryInstrumentation.
+type ScopeMetrics struct {
+ Scope InstrumentationScope
+ Metrics []*Metric
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeMetrics = sync.Pool{
+ New: func() any {
+ return &ScopeMetrics{}
+ },
+ }
+)
+
+func NewScopeMetrics() *ScopeMetrics {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeMetrics{}
+ }
+ return protoPoolScopeMetrics.Get().(*ScopeMetrics)
+}
+
+func DeleteScopeMetrics(orig *ScopeMetrics, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Metrics {
+ DeleteMetric(orig.Metrics[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeMetrics.Put(orig)
+ }
+}
+
+func CopyScopeMetrics(dest, src *ScopeMetrics) *ScopeMetrics {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeMetrics()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Metrics = CopyMetricPtrSlice(dest.Metrics, src.Metrics)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeMetricsSlice(dest, src []ScopeMetrics) []ScopeMetrics {
+ var newDest []ScopeMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeMetrics, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeMetrics(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeMetrics(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeMetricsPtrSlice(dest, src []*ScopeMetrics) []*ScopeMetrics {
+ var newDest []*ScopeMetrics
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeMetrics, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeMetrics()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeMetrics(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeMetrics()
+ }
+ }
+ for i := range src {
+ CopyScopeMetrics(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeMetrics) Reset() {
+ *orig = ScopeMetrics{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeMetrics) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Metrics) > 0 {
+ dest.WriteObjectField("metrics")
+ dest.WriteArrayStart()
+ orig.Metrics[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Metrics); i++ {
+ dest.WriteMore()
+ orig.Metrics[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeMetrics) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "metrics":
+ for iter.ReadArray() {
+ orig.Metrics = append(orig.Metrics, NewMetric())
+ orig.Metrics[len(orig.Metrics)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeMetrics) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Metrics {
+ l = orig.Metrics[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeMetrics) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Metrics) - 1; i >= 0; i-- {
+ l = orig.Metrics[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeMetrics) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Metrics = append(orig.Metrics, NewMetric())
+ err = orig.Metrics[len(orig.Metrics)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeMetrics() *ScopeMetrics {
+ orig := NewScopeMetrics()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Metrics = []*Metric{{}, GenTestMetric()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeMetricsPtrSlice() []*ScopeMetrics {
+ orig := make([]*ScopeMetrics, 5)
+ orig[0] = NewScopeMetrics()
+ orig[1] = GenTestScopeMetrics()
+ orig[2] = NewScopeMetrics()
+ orig[3] = GenTestScopeMetrics()
+ orig[4] = NewScopeMetrics()
+ return orig
+}
+
+func GenTestScopeMetricsSlice() []ScopeMetrics {
+ orig := make([]ScopeMetrics, 5)
+ orig[1] = *GenTestScopeMetrics()
+ orig[3] = *GenTestScopeMetrics()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go
new file mode 100644
index 00000000000..c17284b1bbd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopeprofiles.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
+type ScopeProfiles struct {
+ Scope InstrumentationScope
+ Profiles []*Profile
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeProfiles = sync.Pool{
+ New: func() any {
+ return &ScopeProfiles{}
+ },
+ }
+)
+
+func NewScopeProfiles() *ScopeProfiles {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeProfiles{}
+ }
+ return protoPoolScopeProfiles.Get().(*ScopeProfiles)
+}
+
+func DeleteScopeProfiles(orig *ScopeProfiles, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Profiles {
+ DeleteProfile(orig.Profiles[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeProfiles.Put(orig)
+ }
+}
+
+func CopyScopeProfiles(dest, src *ScopeProfiles) *ScopeProfiles {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeProfiles()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Profiles = CopyProfilePtrSlice(dest.Profiles, src.Profiles)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeProfilesSlice(dest, src []ScopeProfiles) []ScopeProfiles {
+ var newDest []ScopeProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeProfiles, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeProfiles(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeProfiles(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeProfilesPtrSlice(dest, src []*ScopeProfiles) []*ScopeProfiles {
+ var newDest []*ScopeProfiles
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeProfiles, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeProfiles()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeProfiles(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeProfiles()
+ }
+ }
+ for i := range src {
+ CopyScopeProfiles(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeProfiles) Reset() {
+ *orig = ScopeProfiles{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeProfiles) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Profiles) > 0 {
+ dest.WriteObjectField("profiles")
+ dest.WriteArrayStart()
+ orig.Profiles[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Profiles); i++ {
+ dest.WriteMore()
+ orig.Profiles[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeProfiles) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "profiles":
+ for iter.ReadArray() {
+ orig.Profiles = append(orig.Profiles, NewProfile())
+ orig.Profiles[len(orig.Profiles)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeProfiles) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Profiles {
+ l = orig.Profiles[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeProfiles) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Profiles) - 1; i >= 0; i-- {
+ l = orig.Profiles[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeProfiles) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Profiles = append(orig.Profiles, NewProfile())
+ err = orig.Profiles[len(orig.Profiles)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeProfiles() *ScopeProfiles {
+ orig := NewScopeProfiles()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Profiles = []*Profile{{}, GenTestProfile()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeProfilesPtrSlice() []*ScopeProfiles {
+ orig := make([]*ScopeProfiles, 5)
+ orig[0] = NewScopeProfiles()
+ orig[1] = GenTestScopeProfiles()
+ orig[2] = NewScopeProfiles()
+ orig[3] = GenTestScopeProfiles()
+ orig[4] = NewScopeProfiles()
+ return orig
+}
+
+func GenTestScopeProfilesSlice() []ScopeProfiles {
+ orig := make([]ScopeProfiles, 5)
+ orig[1] = *GenTestScopeProfiles()
+ orig[3] = *GenTestScopeProfiles()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go
new file mode 100644
index 00000000000..a02a1c71557
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_scopespans.go
@@ -0,0 +1,313 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ScopeSpans is a collection of spans from a LibraryInstrumentation.
+type ScopeSpans struct {
+ Scope InstrumentationScope
+ Spans []*Span
+ SchemaUrl string
+}
+
+var (
+ protoPoolScopeSpans = sync.Pool{
+ New: func() any {
+ return &ScopeSpans{}
+ },
+ }
+)
+
+func NewScopeSpans() *ScopeSpans {
+ if !UseProtoPooling.IsEnabled() {
+ return &ScopeSpans{}
+ }
+ return protoPoolScopeSpans.Get().(*ScopeSpans)
+}
+
+func DeleteScopeSpans(orig *ScopeSpans, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteInstrumentationScope(&orig.Scope, false)
+ for i := range orig.Spans {
+ DeleteSpan(orig.Spans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolScopeSpans.Put(orig)
+ }
+}
+
+func CopyScopeSpans(dest, src *ScopeSpans) *ScopeSpans {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewScopeSpans()
+ }
+ CopyInstrumentationScope(&dest.Scope, &src.Scope)
+
+ dest.Spans = CopySpanPtrSlice(dest.Spans, src.Spans)
+
+ dest.SchemaUrl = src.SchemaUrl
+
+ return dest
+}
+
+func CopyScopeSpansSlice(dest, src []ScopeSpans) []ScopeSpans {
+ var newDest []ScopeSpans
+ if cap(dest) < len(src) {
+ newDest = make([]ScopeSpans, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeSpans(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyScopeSpans(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyScopeSpansPtrSlice(dest, src []*ScopeSpans) []*ScopeSpans {
+ var newDest []*ScopeSpans
+ if cap(dest) < len(src) {
+ newDest = make([]*ScopeSpans, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeSpans()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteScopeSpans(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewScopeSpans()
+ }
+ }
+ for i := range src {
+ CopyScopeSpans(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ScopeSpans) Reset() {
+ *orig = ScopeSpans{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ScopeSpans) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ dest.WriteObjectField("scope")
+ orig.Scope.MarshalJSON(dest)
+ if len(orig.Spans) > 0 {
+ dest.WriteObjectField("spans")
+ dest.WriteArrayStart()
+ orig.Spans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Spans); i++ {
+ dest.WriteMore()
+ orig.Spans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.SchemaUrl != "" {
+ dest.WriteObjectField("schemaUrl")
+ dest.WriteString(orig.SchemaUrl)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ScopeSpans) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "scope":
+
+ orig.Scope.UnmarshalJSON(iter)
+ case "spans":
+ for iter.ReadArray() {
+ orig.Spans = append(orig.Spans, NewSpan())
+ orig.Spans[len(orig.Spans)-1].UnmarshalJSON(iter)
+ }
+
+ case "schemaUrl", "schema_url":
+ orig.SchemaUrl = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ScopeSpans) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.Scope.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ for i := range orig.Spans {
+ l = orig.Spans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *ScopeSpans) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.Scope.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ for i := len(orig.Spans) - 1; i >= 0; i-- {
+ l = orig.Spans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = len(orig.SchemaUrl)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.SchemaUrl)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *ScopeSpans) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Scope.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Spans = append(orig.Spans, NewSpan())
+ err = orig.Spans[len(orig.Spans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.SchemaUrl = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestScopeSpans() *ScopeSpans {
+ orig := NewScopeSpans()
+ orig.Scope = *GenTestInstrumentationScope()
+ orig.Spans = []*Span{{}, GenTestSpan()}
+ orig.SchemaUrl = "test_schemaurl"
+ return orig
+}
+
+func GenTestScopeSpansPtrSlice() []*ScopeSpans {
+ orig := make([]*ScopeSpans, 5)
+ orig[0] = NewScopeSpans()
+ orig[1] = GenTestScopeSpans()
+ orig[2] = NewScopeSpans()
+ orig[3] = GenTestScopeSpans()
+ orig[4] = NewScopeSpans()
+ return orig
+}
+
+func GenTestScopeSpansSlice() []ScopeSpans {
+ orig := make([]ScopeSpans, 5)
+ orig[1] = *GenTestScopeSpans()
+ orig[3] = *GenTestScopeSpans()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go
new file mode 100644
index 00000000000..7d0ef5a6042
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_span.go
@@ -0,0 +1,773 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Span represents a single operation within a trace.
+// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
+type Span struct {
+ TraceId TraceID
+ SpanId SpanID
+ TraceState string
+ ParentSpanId SpanID
+ Flags uint32
+ Name string
+ Kind SpanKind
+ StartTimeUnixNano uint64
+ EndTimeUnixNano uint64
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Events []*SpanEvent
+ DroppedEventsCount uint32
+ Links []*SpanLink
+ DroppedLinksCount uint32
+ Status Status
+}
+
+var (
+ protoPoolSpan = sync.Pool{
+ New: func() any {
+ return &Span{}
+ },
+ }
+)
+
+func NewSpan() *Span {
+ if !UseProtoPooling.IsEnabled() {
+ return &Span{}
+ }
+ return protoPoolSpan.Get().(*Span)
+}
+
+func DeleteSpan(orig *Span, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+ DeleteSpanID(&orig.ParentSpanId, false)
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ for i := range orig.Events {
+ DeleteSpanEvent(orig.Events[i], true)
+ }
+ for i := range orig.Links {
+ DeleteSpanLink(orig.Links[i], true)
+ }
+ DeleteStatus(&orig.Status, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolSpan.Put(orig)
+ }
+}
+
+func CopySpan(dest, src *Span) *Span {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpan()
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ dest.TraceState = src.TraceState
+
+ CopySpanID(&dest.ParentSpanId, &src.ParentSpanId)
+
+ dest.Flags = src.Flags
+
+ dest.Name = src.Name
+
+ dest.Kind = src.Kind
+
+ dest.StartTimeUnixNano = src.StartTimeUnixNano
+
+ dest.EndTimeUnixNano = src.EndTimeUnixNano
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.Events = CopySpanEventPtrSlice(dest.Events, src.Events)
+
+ dest.DroppedEventsCount = src.DroppedEventsCount
+
+ dest.Links = CopySpanLinkPtrSlice(dest.Links, src.Links)
+
+ dest.DroppedLinksCount = src.DroppedLinksCount
+
+ CopyStatus(&dest.Status, &src.Status)
+
+ return dest
+}
+
+func CopySpanSlice(dest, src []Span) []Span {
+ var newDest []Span
+ if cap(dest) < len(src) {
+ newDest = make([]Span, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpan(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpan(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanPtrSlice(dest, src []*Span) []*Span {
+ var newDest []*Span
+ if cap(dest) < len(src) {
+ newDest = make([]*Span, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpan()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpan(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpan()
+ }
+ }
+ for i := range src {
+ CopySpan(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Span) Reset() {
+ *orig = Span{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Span) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ if orig.TraceState != "" {
+ dest.WriteObjectField("traceState")
+ dest.WriteString(orig.TraceState)
+ }
+ if !orig.ParentSpanId.IsEmpty() {
+ dest.WriteObjectField("parentSpanId")
+ orig.ParentSpanId.MarshalJSON(dest)
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+
+ if int32(orig.Kind) != 0 {
+ dest.WriteObjectField("kind")
+ dest.WriteInt32(int32(orig.Kind))
+ }
+ if orig.StartTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("startTimeUnixNano")
+ dest.WriteUint64(orig.StartTimeUnixNano)
+ }
+ if orig.EndTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("endTimeUnixNano")
+ dest.WriteUint64(orig.EndTimeUnixNano)
+ }
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if len(orig.Events) > 0 {
+ dest.WriteObjectField("events")
+ dest.WriteArrayStart()
+ orig.Events[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Events); i++ {
+ dest.WriteMore()
+ orig.Events[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedEventsCount != uint32(0) {
+ dest.WriteObjectField("droppedEventsCount")
+ dest.WriteUint32(orig.DroppedEventsCount)
+ }
+ if len(orig.Links) > 0 {
+ dest.WriteObjectField("links")
+ dest.WriteArrayStart()
+ orig.Links[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Links); i++ {
+ dest.WriteMore()
+ orig.Links[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedLinksCount != uint32(0) {
+ dest.WriteObjectField("droppedLinksCount")
+ dest.WriteUint32(orig.DroppedLinksCount)
+ }
+ dest.WriteObjectField("status")
+ orig.Status.MarshalJSON(dest)
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Span) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ case "traceState", "trace_state":
+ orig.TraceState = iter.ReadString()
+ case "parentSpanId", "parent_span_id":
+
+ orig.ParentSpanId.UnmarshalJSON(iter)
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ case "name":
+ orig.Name = iter.ReadString()
+ case "kind":
+ orig.Kind = SpanKind(iter.ReadEnumValue(SpanKind_value))
+ case "startTimeUnixNano", "start_time_unix_nano":
+ orig.StartTimeUnixNano = iter.ReadUint64()
+ case "endTimeUnixNano", "end_time_unix_nano":
+ orig.EndTimeUnixNano = iter.ReadUint64()
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "events":
+ for iter.ReadArray() {
+ orig.Events = append(orig.Events, NewSpanEvent())
+ orig.Events[len(orig.Events)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedEventsCount", "dropped_events_count":
+ orig.DroppedEventsCount = iter.ReadUint32()
+ case "links":
+ for iter.ReadArray() {
+ orig.Links = append(orig.Links, NewSpanLink())
+ orig.Links[len(orig.Links)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedLinksCount", "dropped_links_count":
+ orig.DroppedLinksCount = iter.ReadUint32()
+ case "status":
+
+ orig.Status.UnmarshalJSON(iter)
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Span) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = len(orig.TraceState)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.ParentSpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.Flags != 0 {
+ n += 6
+ }
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Kind != 0 {
+ n += 1 + proto.Sov(uint64(orig.Kind))
+ }
+ if orig.StartTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.EndTimeUnixNano != 0 {
+ n += 9
+ }
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ for i := range orig.Events {
+ l = orig.Events[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedEventsCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedEventsCount))
+ }
+ for i := range orig.Links {
+ l = orig.Links[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedLinksCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedLinksCount))
+ }
+ l = orig.Status.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ return n
+}
+
+func (orig *Span) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ l = len(orig.TraceState)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.TraceState)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ l = orig.ParentSpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+
+ if orig.Flags != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
+ pos--
+ buf[pos] = 0x1
+ pos--
+ buf[pos] = 0x85
+ }
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x2a
+ }
+ if orig.Kind != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Kind))
+ pos--
+ buf[pos] = 0x30
+ }
+ if orig.StartTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
+ pos--
+ buf[pos] = 0x39
+ }
+ if orig.EndTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.EndTimeUnixNano))
+ pos--
+ buf[pos] = 0x41
+ }
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x4a
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x50
+ }
+ for i := len(orig.Events) - 1; i >= 0; i-- {
+ l = orig.Events[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x5a
+ }
+ if orig.DroppedEventsCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedEventsCount))
+ pos--
+ buf[pos] = 0x60
+ }
+ for i := len(orig.Links) - 1; i >= 0; i-- {
+ l = orig.Links[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x6a
+ }
+ if orig.DroppedLinksCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedLinksCount))
+ pos--
+ buf[pos] = 0x70
+ }
+ l = orig.Status.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x7a
+
+ return len(buf) - pos
+}
+
+func (orig *Span) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.TraceState = string(buf[startPos:pos])
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.ParentSpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 16:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+
+ case 5:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 6:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Kind = SpanKind(num)
+
+ case 7:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartTimeUnixNano = uint64(num)
+
+ case 8:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.EndTimeUnixNano = uint64(num)
+
+ case 9:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 10:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 11:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Events = append(orig.Events, NewSpanEvent())
+ err = orig.Events[len(orig.Events)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 12:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedEventsCount = uint32(num)
+
+ case 13:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Links = append(orig.Links, NewSpanLink())
+ err = orig.Links[len(orig.Links)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 14:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedLinksCount = uint32(num)
+
+ case 15:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.Status.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSpan() *Span {
+ orig := NewSpan()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.TraceState = "test_tracestate"
+ orig.ParentSpanId = *GenTestSpanID()
+ orig.Flags = uint32(13)
+ orig.Name = "test_name"
+ orig.Kind = SpanKind(13)
+ orig.StartTimeUnixNano = uint64(13)
+ orig.EndTimeUnixNano = uint64(13)
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Events = []*SpanEvent{{}, GenTestSpanEvent()}
+ orig.DroppedEventsCount = uint32(13)
+ orig.Links = []*SpanLink{{}, GenTestSpanLink()}
+ orig.DroppedLinksCount = uint32(13)
+ orig.Status = *GenTestStatus()
+ return orig
+}
+
+func GenTestSpanPtrSlice() []*Span {
+ orig := make([]*Span, 5)
+ orig[0] = NewSpan()
+ orig[1] = GenTestSpan()
+ orig[2] = NewSpan()
+ orig[3] = GenTestSpan()
+ orig[4] = NewSpan()
+ return orig
+}
+
+func GenTestSpanSlice() []Span {
+ orig := make([]Span, 5)
+ orig[1] = *GenTestSpan()
+ orig[3] = *GenTestSpan()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go
new file mode 100644
index 00000000000..7d78787004d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spancontext.go
@@ -0,0 +1,367 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type SpanContext struct {
+ TraceID TraceID
+ SpanID SpanID
+ TraceFlags uint32
+ TraceState string
+ Remote bool
+}
+
+var (
+ protoPoolSpanContext = sync.Pool{
+ New: func() any {
+ return &SpanContext{}
+ },
+ }
+)
+
+func NewSpanContext() *SpanContext {
+ if !UseProtoPooling.IsEnabled() {
+ return &SpanContext{}
+ }
+ return protoPoolSpanContext.Get().(*SpanContext)
+}
+
+func DeleteSpanContext(orig *SpanContext, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceID, false)
+ DeleteSpanID(&orig.SpanID, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolSpanContext.Put(orig)
+ }
+}
+
+func CopySpanContext(dest, src *SpanContext) *SpanContext {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanContext()
+ }
+ CopyTraceID(&dest.TraceID, &src.TraceID)
+
+ CopySpanID(&dest.SpanID, &src.SpanID)
+
+ dest.TraceFlags = src.TraceFlags
+
+ dest.TraceState = src.TraceState
+
+ dest.Remote = src.Remote
+
+ return dest
+}
+
+func CopySpanContextSlice(dest, src []SpanContext) []SpanContext {
+ var newDest []SpanContext
+ if cap(dest) < len(src) {
+ newDest = make([]SpanContext, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanContext(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanContext(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanContextPtrSlice(dest, src []*SpanContext) []*SpanContext {
+ var newDest []*SpanContext
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanContext, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanContext()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanContext(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanContext()
+ }
+ }
+ for i := range src {
+ CopySpanContext(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SpanContext) Reset() {
+ *orig = SpanContext{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanContext) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceID.IsEmpty() {
+ dest.WriteObjectField("traceID")
+ orig.TraceID.MarshalJSON(dest)
+ }
+ if !orig.SpanID.IsEmpty() {
+ dest.WriteObjectField("spanID")
+ orig.SpanID.MarshalJSON(dest)
+ }
+ if orig.TraceFlags != uint32(0) {
+ dest.WriteObjectField("traceFlags")
+ dest.WriteUint32(orig.TraceFlags)
+ }
+ if orig.TraceState != "" {
+ dest.WriteObjectField("traceState")
+ dest.WriteString(orig.TraceState)
+ }
+ if orig.Remote != false {
+ dest.WriteObjectField("remote")
+ dest.WriteBool(orig.Remote)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanContext) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceID", "trace_id":
+
+ orig.TraceID.UnmarshalJSON(iter)
+ case "spanID", "span_id":
+
+ orig.SpanID.UnmarshalJSON(iter)
+ case "traceFlags", "trace_flags":
+ orig.TraceFlags = iter.ReadUint32()
+ case "traceState", "trace_state":
+ orig.TraceState = iter.ReadString()
+ case "remote":
+ orig.Remote = iter.ReadBool()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SpanContext) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceID.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanID.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.TraceFlags != 0 {
+ n += 5
+ }
+ l = len(orig.TraceState)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Remote {
+ n += 2
+ }
+ return n
+}
+
+func (orig *SpanContext) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceID.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanID.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ if orig.TraceFlags != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.TraceFlags))
+ pos--
+ buf[pos] = 0x1d
+ }
+ l = len(orig.TraceState)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.TraceState)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ if orig.Remote {
+ pos--
+ if orig.Remote {
+ buf[pos] = 1
+ } else {
+ buf[pos] = 0
+ }
+ pos--
+ buf[pos] = 0x28
+ }
+ return len(buf) - pos
+}
+
+func (orig *SpanContext) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceID.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanID.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceFlags", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TraceFlags = uint32(num)
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.TraceState = string(buf[startPos:pos])
+
+ case 5:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Remote = num != 0
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSpanContext() *SpanContext {
+ orig := NewSpanContext()
+ orig.TraceID = *GenTestTraceID()
+ orig.SpanID = *GenTestSpanID()
+ orig.TraceFlags = uint32(13)
+ orig.TraceState = "test_tracestate"
+ orig.Remote = true
+ return orig
+}
+
+func GenTestSpanContextPtrSlice() []*SpanContext {
+ orig := make([]*SpanContext, 5)
+ orig[0] = NewSpanContext()
+ orig[1] = GenTestSpanContext()
+ orig[2] = NewSpanContext()
+ orig[3] = GenTestSpanContext()
+ orig[4] = NewSpanContext()
+ return orig
+}
+
+func GenTestSpanContextSlice() []SpanContext {
+ orig := make([]SpanContext, 5)
+ orig[1] = *GenTestSpanContext()
+ orig[3] = *GenTestSpanContext()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go
new file mode 100644
index 00000000000..14c0fe1627c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanevent.go
@@ -0,0 +1,342 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs. See OTLP for event definition.
+type SpanEvent struct {
+ TimeUnixNano uint64
+ Name string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+}
+
+var (
+ protoPoolSpanEvent = sync.Pool{
+ New: func() any {
+ return &SpanEvent{}
+ },
+ }
+)
+
+func NewSpanEvent() *SpanEvent {
+ if !UseProtoPooling.IsEnabled() {
+ return &SpanEvent{}
+ }
+ return protoPoolSpanEvent.Get().(*SpanEvent)
+}
+
+func DeleteSpanEvent(orig *SpanEvent, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSpanEvent.Put(orig)
+ }
+}
+
+func CopySpanEvent(dest, src *SpanEvent) *SpanEvent {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanEvent()
+ }
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.Name = src.Name
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ return dest
+}
+
+func CopySpanEventSlice(dest, src []SpanEvent) []SpanEvent {
+ var newDest []SpanEvent
+ if cap(dest) < len(src) {
+ newDest = make([]SpanEvent, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanEvent(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanEvent(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanEventPtrSlice(dest, src []*SpanEvent) []*SpanEvent {
+ var newDest []*SpanEvent
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanEvent, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanEvent()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanEvent(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanEvent()
+ }
+ }
+ for i := range src {
+ CopySpanEvent(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SpanEvent) Reset() {
+ *orig = SpanEvent{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanEvent) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanEvent) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "name":
+ orig.Name = iter.ReadString()
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SpanEvent) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ return n
+}
+
+func (orig *SpanEvent) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x9
+ }
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x20
+ }
+ return len(buf) - pos
+}
+
+func (orig *SpanEvent) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 4:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSpanEvent() *SpanEvent {
+ orig := NewSpanEvent()
+ orig.TimeUnixNano = uint64(13)
+ orig.Name = "test_name"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ return orig
+}
+
+func GenTestSpanEventPtrSlice() []*SpanEvent {
+ orig := make([]*SpanEvent, 5)
+ orig[0] = NewSpanEvent()
+ orig[1] = GenTestSpanEvent()
+ orig[2] = NewSpanEvent()
+ orig[3] = GenTestSpanEvent()
+ orig[4] = NewSpanEvent()
+ return orig
+}
+
+func GenTestSpanEventSlice() []SpanEvent {
+ orig := make([]SpanEvent, 5)
+ orig[1] = *GenTestSpanEvent()
+ orig[3] = *GenTestSpanEvent()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go
new file mode 100644
index 00000000000..07c42640bcf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_spanlink.go
@@ -0,0 +1,415 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// SpanLink is a pointer from the current span to another span in the same trace or in a
+// different trace.
+// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
+type SpanLink struct {
+ TraceId TraceID
+ SpanId SpanID
+ TraceState string
+ Attributes []KeyValue
+ DroppedAttributesCount uint32
+ Flags uint32
+}
+
+var (
+ protoPoolSpanLink = sync.Pool{
+ New: func() any {
+ return &SpanLink{}
+ },
+ }
+)
+
+func NewSpanLink() *SpanLink {
+ if !UseProtoPooling.IsEnabled() {
+ return &SpanLink{}
+ }
+ return protoPoolSpanLink.Get().(*SpanLink)
+}
+
+func DeleteSpanLink(orig *SpanLink, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteTraceID(&orig.TraceId, false)
+ DeleteSpanID(&orig.SpanId, false)
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSpanLink.Put(orig)
+ }
+}
+
+func CopySpanLink(dest, src *SpanLink) *SpanLink {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSpanLink()
+ }
+ CopyTraceID(&dest.TraceId, &src.TraceId)
+
+ CopySpanID(&dest.SpanId, &src.SpanId)
+
+ dest.TraceState = src.TraceState
+
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.DroppedAttributesCount = src.DroppedAttributesCount
+
+ dest.Flags = src.Flags
+
+ return dest
+}
+
+func CopySpanLinkSlice(dest, src []SpanLink) []SpanLink {
+ var newDest []SpanLink
+ if cap(dest) < len(src) {
+ newDest = make([]SpanLink, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanLink(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySpanLink(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySpanLinkPtrSlice(dest, src []*SpanLink) []*SpanLink {
+ var newDest []*SpanLink
+ if cap(dest) < len(src) {
+ newDest = make([]*SpanLink, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanLink()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSpanLink(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSpanLink()
+ }
+ }
+ for i := range src {
+ CopySpanLink(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SpanLink) Reset() {
+ *orig = SpanLink{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SpanLink) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if !orig.TraceId.IsEmpty() {
+ dest.WriteObjectField("traceId")
+ orig.TraceId.MarshalJSON(dest)
+ }
+ if !orig.SpanId.IsEmpty() {
+ dest.WriteObjectField("spanId")
+ orig.SpanId.MarshalJSON(dest)
+ }
+ if orig.TraceState != "" {
+ dest.WriteObjectField("traceState")
+ dest.WriteString(orig.TraceState)
+ }
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.DroppedAttributesCount != uint32(0) {
+ dest.WriteObjectField("droppedAttributesCount")
+ dest.WriteUint32(orig.DroppedAttributesCount)
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SpanLink) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "traceId", "trace_id":
+
+ orig.TraceId.UnmarshalJSON(iter)
+ case "spanId", "span_id":
+
+ orig.SpanId.UnmarshalJSON(iter)
+ case "traceState", "trace_state":
+ orig.TraceState = iter.ReadString()
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "droppedAttributesCount", "dropped_attributes_count":
+ orig.DroppedAttributesCount = iter.ReadUint32()
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SpanLink) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = orig.TraceId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = orig.SpanId.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ l = len(orig.TraceState)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.DroppedAttributesCount != 0 {
+ n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
+ }
+ if orig.Flags != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *SpanLink) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = orig.TraceId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+
+ l = orig.SpanId.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+
+ l = len(orig.TraceState)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.TraceState)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x22
+ }
+ if orig.DroppedAttributesCount != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
+ pos--
+ buf[pos] = 0x28
+ }
+ if orig.Flags != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
+ pos--
+ buf[pos] = 0x35
+ }
+ return len(buf) - pos
+}
+
+func (orig *SpanLink) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.TraceState = string(buf[startPos:pos])
+
+ case 4:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 5:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.DroppedAttributesCount = uint32(num)
+
+ case 6:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSpanLink() *SpanLink {
+ orig := NewSpanLink()
+ orig.TraceId = *GenTestTraceID()
+ orig.SpanId = *GenTestSpanID()
+ orig.TraceState = "test_tracestate"
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.DroppedAttributesCount = uint32(13)
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestSpanLinkPtrSlice() []*SpanLink {
+ orig := make([]*SpanLink, 5)
+ orig[0] = NewSpanLink()
+ orig[1] = GenTestSpanLink()
+ orig[2] = NewSpanLink()
+ orig[3] = GenTestSpanLink()
+ orig[4] = NewSpanLink()
+ return orig
+}
+
+func GenTestSpanLinkSlice() []SpanLink {
+ orig := make([]SpanLink, 5)
+ orig[1] = *GenTestSpanLink()
+ orig[3] = *GenTestSpanLink()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go
new file mode 100644
index 00000000000..a97599bfc65
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_stack.go
@@ -0,0 +1,261 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Stack represents a stack trace as a list of locations.
+
+type Stack struct {
+ LocationIndices []int32
+}
+
+var (
+ protoPoolStack = sync.Pool{
+ New: func() any {
+ return &Stack{}
+ },
+ }
+)
+
+func NewStack() *Stack {
+ if !UseProtoPooling.IsEnabled() {
+ return &Stack{}
+ }
+ return protoPoolStack.Get().(*Stack)
+}
+
+func DeleteStack(orig *Stack, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolStack.Put(orig)
+ }
+}
+
+func CopyStack(dest, src *Stack) *Stack {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewStack()
+ }
+ dest.LocationIndices = append(dest.LocationIndices[:0], src.LocationIndices...)
+
+ return dest
+}
+
+func CopyStackSlice(dest, src []Stack) []Stack {
+ var newDest []Stack
+ if cap(dest) < len(src) {
+ newDest = make([]Stack, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStack(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyStack(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyStackPtrSlice(dest, src []*Stack) []*Stack {
+ var newDest []*Stack
+ if cap(dest) < len(src) {
+ newDest = make([]*Stack, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStack()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStack(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStack()
+ }
+ }
+ for i := range src {
+ CopyStack(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Stack) Reset() {
+ *orig = Stack{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Stack) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.LocationIndices) > 0 {
+ dest.WriteObjectField("locationIndices")
+ dest.WriteArrayStart()
+ dest.WriteInt32(orig.LocationIndices[0])
+ for i := 1; i < len(orig.LocationIndices); i++ {
+ dest.WriteMore()
+ dest.WriteInt32(orig.LocationIndices[i])
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Stack) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "locationIndices", "location_indices":
+ for iter.ReadArray() {
+ orig.LocationIndices = append(orig.LocationIndices, iter.ReadInt32())
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Stack) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if len(orig.LocationIndices) > 0 {
+ l = 0
+ for _, e := range orig.LocationIndices {
+ l += proto.Sov(uint64(e))
+ }
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Stack) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.LocationIndices)
+ if l > 0 {
+ endPos := pos
+ for i := l - 1; i >= 0; i-- {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationIndices[i]))
+ }
+ pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Stack) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+ case 1:
+ switch wireType {
+ case proto.WireTypeLen:
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ var num uint64
+ for startPos < pos {
+ num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
+ if err != nil {
+ return err
+ }
+ orig.LocationIndices = append(orig.LocationIndices, int32(num))
+ }
+ if startPos != pos {
+ return fmt.Errorf("proto: invalid field len = %d for field LocationIndices", pos-startPos)
+ }
+ case proto.WireTypeVarint:
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+ orig.LocationIndices = append(orig.LocationIndices, int32(num))
+ default:
+ return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestStack() *Stack {
+ orig := NewStack()
+ orig.LocationIndices = []int32{int32(0), int32(13)}
+ return orig
+}
+
+func GenTestStackPtrSlice() []*Stack {
+ orig := make([]*Stack, 5)
+ orig[0] = NewStack()
+ orig[1] = GenTestStack()
+ orig[2] = NewStack()
+ orig[3] = GenTestStack()
+ orig[4] = NewStack()
+ return orig
+}
+
+func GenTestStackSlice() []Stack {
+ orig := make([]Stack, 5)
+ orig[1] = *GenTestStack()
+ orig[3] = *GenTestStack()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go
new file mode 100644
index 00000000000..c2f2d37754f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_status.go
@@ -0,0 +1,260 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Status is an optional final status for this span. Semantically, when Status was not
+// set, that means the span ended without errors and to assume Status.Ok (code = 0).
+type Status struct {
+ Message string
+ Code StatusCode
+}
+
+var (
+ protoPoolStatus = sync.Pool{
+ New: func() any {
+ return &Status{}
+ },
+ }
+)
+
+func NewStatus() *Status {
+ if !UseProtoPooling.IsEnabled() {
+ return &Status{}
+ }
+ return protoPoolStatus.Get().(*Status)
+}
+
+func DeleteStatus(orig *Status, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolStatus.Put(orig)
+ }
+}
+
+func CopyStatus(dest, src *Status) *Status {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewStatus()
+ }
+ dest.Message = src.Message
+
+ dest.Code = src.Code
+
+ return dest
+}
+
+func CopyStatusSlice(dest, src []Status) []Status {
+ var newDest []Status
+ if cap(dest) < len(src) {
+ newDest = make([]Status, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStatus(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyStatus(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyStatusPtrSlice(dest, src []*Status) []*Status {
+ var newDest []*Status
+ if cap(dest) < len(src) {
+ newDest = make([]*Status, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStatus()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteStatus(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewStatus()
+ }
+ }
+ for i := range src {
+ CopyStatus(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Status) Reset() {
+ *orig = Status{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Status) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Message != "" {
+ dest.WriteObjectField("message")
+ dest.WriteString(orig.Message)
+ }
+
+ if int32(orig.Code) != 0 {
+ dest.WriteObjectField("code")
+ dest.WriteInt32(int32(orig.Code))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Status) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "message":
+ orig.Message = iter.ReadString()
+ case "code":
+ orig.Code = StatusCode(iter.ReadEnumValue(StatusCode_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Status) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Message)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Code != 0 {
+ n += 1 + proto.Sov(uint64(orig.Code))
+ }
+ return n
+}
+
+func (orig *Status) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Message)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Message)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ if orig.Code != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Code))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *Status) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Message = string(buf[startPos:pos])
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Code = StatusCode(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestStatus() *Status {
+ orig := NewStatus()
+ orig.Message = "test_message"
+ orig.Code = StatusCode(13)
+ return orig
+}
+
+func GenTestStatusPtrSlice() []*Status {
+ orig := make([]*Status, 5)
+ orig[0] = NewStatus()
+ orig[1] = GenTestStatus()
+ orig[2] = NewStatus()
+ orig[3] = GenTestStatus()
+ orig[4] = NewStatus()
+ return orig
+}
+
+func GenTestStatusSlice() []Status {
+ orig := make([]Status, 5)
+ orig[1] = *GenTestStatus()
+ orig[3] = *GenTestStatus()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go
new file mode 100644
index 00000000000..67be5d00f04
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_sum.go
@@ -0,0 +1,311 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
+type Sum struct {
+ DataPoints []*NumberDataPoint
+ AggregationTemporality AggregationTemporality
+ IsMonotonic bool
+}
+
+var (
+ protoPoolSum = sync.Pool{
+ New: func() any {
+ return &Sum{}
+ },
+ }
+)
+
+func NewSum() *Sum {
+ if !UseProtoPooling.IsEnabled() {
+ return &Sum{}
+ }
+ return protoPoolSum.Get().(*Sum)
+}
+
+func DeleteSum(orig *Sum, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteNumberDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSum.Put(orig)
+ }
+}
+
+func CopySum(dest, src *Sum) *Sum {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSum()
+ }
+ dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ dest.IsMonotonic = src.IsMonotonic
+
+ return dest
+}
+
+func CopySumSlice(dest, src []Sum) []Sum {
+ var newDest []Sum
+ if cap(dest) < len(src) {
+ newDest = make([]Sum, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSum(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySum(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySumPtrSlice(dest, src []*Sum) []*Sum {
+ var newDest []*Sum
+ if cap(dest) < len(src) {
+ newDest = make([]*Sum, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSum()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSum(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSum()
+ }
+ }
+ for i := range src {
+ CopySum(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Sum) Reset() {
+ *orig = Sum{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Sum) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ if orig.IsMonotonic != false {
+ dest.WriteObjectField("isMonotonic")
+ dest.WriteBool(orig.IsMonotonic)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Sum) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ case "isMonotonic", "is_monotonic":
+ orig.IsMonotonic = iter.ReadBool()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Sum) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ if orig.IsMonotonic {
+ n += 2
+ }
+ return n
+}
+
+func (orig *Sum) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x10
+ }
+ if orig.IsMonotonic {
+ pos--
+ if orig.IsMonotonic {
+ buf[pos] = 1
+ } else {
+ buf[pos] = 0
+ }
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *Sum) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.IsMonotonic = num != 0
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSum() *Sum {
+ orig := NewSum()
+ orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
+ orig.AggregationTemporality = AggregationTemporality(13)
+ orig.IsMonotonic = true
+ return orig
+}
+
+func GenTestSumPtrSlice() []*Sum {
+ orig := make([]*Sum, 5)
+ orig[0] = NewSum()
+ orig[1] = GenTestSum()
+ orig[2] = NewSum()
+ orig[3] = GenTestSum()
+ orig[4] = NewSum()
+ return orig
+}
+
+func GenTestSumSlice() []Sum {
+ orig := make([]Sum, 5)
+ orig[1] = *GenTestSum()
+ orig[3] = *GenTestSum()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go
new file mode 100644
index 00000000000..8d40bd16603
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summary.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
+type Summary struct {
+ DataPoints []*SummaryDataPoint
+}
+
+var (
+ protoPoolSummary = sync.Pool{
+ New: func() any {
+ return &Summary{}
+ },
+ }
+)
+
+func NewSummary() *Summary {
+ if !UseProtoPooling.IsEnabled() {
+ return &Summary{}
+ }
+ return protoPoolSummary.Get().(*Summary)
+}
+
+func DeleteSummary(orig *Summary, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.DataPoints {
+ DeleteSummaryDataPoint(orig.DataPoints[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSummary.Put(orig)
+ }
+}
+
+func CopySummary(dest, src *Summary) *Summary {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSummary()
+ }
+ dest.DataPoints = CopySummaryDataPointPtrSlice(dest.DataPoints, src.DataPoints)
+
+ return dest
+}
+
+func CopySummarySlice(dest, src []Summary) []Summary {
+ var newDest []Summary
+ if cap(dest) < len(src) {
+ newDest = make([]Summary, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummary(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummary(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySummaryPtrSlice(dest, src []*Summary) []*Summary {
+ var newDest []*Summary
+ if cap(dest) < len(src) {
+ newDest = make([]*Summary, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummary()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummary(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummary()
+ }
+ }
+ for i := range src {
+ CopySummary(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *Summary) Reset() {
+ *orig = Summary{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *Summary) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.DataPoints) > 0 {
+ dest.WriteObjectField("dataPoints")
+ dest.WriteArrayStart()
+ orig.DataPoints[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.DataPoints); i++ {
+ dest.WriteMore()
+ orig.DataPoints[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *Summary) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "dataPoints", "data_points":
+ for iter.ReadArray() {
+ orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
+ orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *Summary) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.DataPoints {
+ l = orig.DataPoints[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *Summary) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.DataPoints) - 1; i >= 0; i-- {
+ l = orig.DataPoints[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *Summary) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
+ err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSummary() *Summary {
+ orig := NewSummary()
+ orig.DataPoints = []*SummaryDataPoint{{}, GenTestSummaryDataPoint()}
+ return orig
+}
+
+func GenTestSummaryPtrSlice() []*Summary {
+ orig := make([]*Summary, 5)
+ orig[0] = NewSummary()
+ orig[1] = GenTestSummary()
+ orig[2] = NewSummary()
+ orig[3] = GenTestSummary()
+ orig[4] = NewSummary()
+ return orig
+}
+
+func GenTestSummarySlice() []Summary {
+ orig := make([]Summary, 5)
+ orig[1] = *GenTestSummary()
+ orig[3] = *GenTestSummary()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go
new file mode 100644
index 00000000000..bf30987a669
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapoint.go
@@ -0,0 +1,451 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.
+type SummaryDataPoint struct {
+ Attributes []KeyValue
+ StartTimeUnixNano uint64
+ TimeUnixNano uint64
+ Count uint64
+ Sum float64
+ QuantileValues []*SummaryDataPointValueAtQuantile
+ Flags uint32
+}
+
+var (
+ protoPoolSummaryDataPoint = sync.Pool{
+ New: func() any {
+ return &SummaryDataPoint{}
+ },
+ }
+)
+
+func NewSummaryDataPoint() *SummaryDataPoint {
+ if !UseProtoPooling.IsEnabled() {
+ return &SummaryDataPoint{}
+ }
+ return protoPoolSummaryDataPoint.Get().(*SummaryDataPoint)
+}
+
+func DeleteSummaryDataPoint(orig *SummaryDataPoint, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.Attributes {
+ DeleteKeyValue(&orig.Attributes[i], false)
+ }
+ for i := range orig.QuantileValues {
+ DeleteSummaryDataPointValueAtQuantile(orig.QuantileValues[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSummaryDataPoint.Put(orig)
+ }
+}
+
+func CopySummaryDataPoint(dest, src *SummaryDataPoint) *SummaryDataPoint {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSummaryDataPoint()
+ }
+ dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
+
+ dest.StartTimeUnixNano = src.StartTimeUnixNano
+
+ dest.TimeUnixNano = src.TimeUnixNano
+
+ dest.Count = src.Count
+
+ dest.Sum = src.Sum
+
+ dest.QuantileValues = CopySummaryDataPointValueAtQuantilePtrSlice(dest.QuantileValues, src.QuantileValues)
+
+ dest.Flags = src.Flags
+
+ return dest
+}
+
+func CopySummaryDataPointSlice(dest, src []SummaryDataPoint) []SummaryDataPoint {
+ var newDest []SummaryDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]SummaryDataPoint, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPoint(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummaryDataPoint(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySummaryDataPointPtrSlice(dest, src []*SummaryDataPoint) []*SummaryDataPoint {
+ var newDest []*SummaryDataPoint
+ if cap(dest) < len(src) {
+ newDest = make([]*SummaryDataPoint, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPoint()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPoint(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPoint()
+ }
+ }
+ for i := range src {
+ CopySummaryDataPoint(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SummaryDataPoint) Reset() {
+ *orig = SummaryDataPoint{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SummaryDataPoint) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.Attributes) > 0 {
+ dest.WriteObjectField("attributes")
+ dest.WriteArrayStart()
+ orig.Attributes[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.Attributes); i++ {
+ dest.WriteMore()
+ orig.Attributes[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.StartTimeUnixNano != uint64(0) {
+ dest.WriteObjectField("startTimeUnixNano")
+ dest.WriteUint64(orig.StartTimeUnixNano)
+ }
+ if orig.TimeUnixNano != uint64(0) {
+ dest.WriteObjectField("timeUnixNano")
+ dest.WriteUint64(orig.TimeUnixNano)
+ }
+ if orig.Count != uint64(0) {
+ dest.WriteObjectField("count")
+ dest.WriteUint64(orig.Count)
+ }
+ if orig.Sum != float64(0) {
+ dest.WriteObjectField("sum")
+ dest.WriteFloat64(orig.Sum)
+ }
+ if len(orig.QuantileValues) > 0 {
+ dest.WriteObjectField("quantileValues")
+ dest.WriteArrayStart()
+ orig.QuantileValues[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.QuantileValues); i++ {
+ dest.WriteMore()
+ orig.QuantileValues[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ if orig.Flags != uint32(0) {
+ dest.WriteObjectField("flags")
+ dest.WriteUint32(orig.Flags)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SummaryDataPoint) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "attributes":
+ for iter.ReadArray() {
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
+ }
+
+ case "startTimeUnixNano", "start_time_unix_nano":
+ orig.StartTimeUnixNano = iter.ReadUint64()
+ case "timeUnixNano", "time_unix_nano":
+ orig.TimeUnixNano = iter.ReadUint64()
+ case "count":
+ orig.Count = iter.ReadUint64()
+ case "sum":
+ orig.Sum = iter.ReadFloat64()
+ case "quantileValues", "quantile_values":
+ for iter.ReadArray() {
+ orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
+ orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalJSON(iter)
+ }
+
+ case "flags":
+ orig.Flags = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SummaryDataPoint) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.Attributes {
+ l = orig.Attributes[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.StartTimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.TimeUnixNano != 0 {
+ n += 9
+ }
+ if orig.Count != 0 {
+ n += 9
+ }
+ if orig.Sum != 0 {
+ n += 9
+ }
+ for i := range orig.QuantileValues {
+ l = orig.QuantileValues[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Flags != 0 {
+ n += 1 + proto.Sov(uint64(orig.Flags))
+ }
+ return n
+}
+
+func (orig *SummaryDataPoint) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.Attributes) - 1; i >= 0; i-- {
+ l = orig.Attributes[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x3a
+ }
+ if orig.StartTimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
+ pos--
+ buf[pos] = 0x11
+ }
+ if orig.TimeUnixNano != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
+ pos--
+ buf[pos] = 0x19
+ }
+ if orig.Count != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
+ pos--
+ buf[pos] = 0x21
+ }
+ if orig.Sum != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
+ pos--
+ buf[pos] = 0x29
+ }
+ for i := len(orig.QuantileValues) - 1; i >= 0; i-- {
+ l = orig.QuantileValues[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x32
+ }
+ if orig.Flags != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
+ pos--
+ buf[pos] = 0x40
+ }
+ return len(buf) - pos
+}
+
+func (orig *SummaryDataPoint) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 7:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Attributes = append(orig.Attributes, KeyValue{})
+ err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.StartTimeUnixNano = uint64(num)
+
+ case 3:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TimeUnixNano = uint64(num)
+
+ case 4:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Count = uint64(num)
+
+ case 5:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Sum = math.Float64frombits(num)
+
+ case 6:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
+ err = orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 8:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Flags = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSummaryDataPoint() *SummaryDataPoint {
+ orig := NewSummaryDataPoint()
+ orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
+ orig.StartTimeUnixNano = uint64(13)
+ orig.TimeUnixNano = uint64(13)
+ orig.Count = uint64(13)
+ orig.Sum = float64(3.1415926)
+ orig.QuantileValues = []*SummaryDataPointValueAtQuantile{{}, GenTestSummaryDataPointValueAtQuantile()}
+ orig.Flags = uint32(13)
+ return orig
+}
+
+func GenTestSummaryDataPointPtrSlice() []*SummaryDataPoint {
+ orig := make([]*SummaryDataPoint, 5)
+ orig[0] = NewSummaryDataPoint()
+ orig[1] = GenTestSummaryDataPoint()
+ orig[2] = NewSummaryDataPoint()
+ orig[3] = GenTestSummaryDataPoint()
+ orig[4] = NewSummaryDataPoint()
+ return orig
+}
+
+func GenTestSummaryDataPointSlice() []SummaryDataPoint {
+ orig := make([]SummaryDataPoint, 5)
+ orig[1] = *GenTestSummaryDataPoint()
+ orig[3] = *GenTestSummaryDataPoint()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go
new file mode 100644
index 00000000000..ae92399aa41
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_summarydatapointvalueatquantile.go
@@ -0,0 +1,258 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
+type SummaryDataPointValueAtQuantile struct {
+ Quantile float64
+ Value float64
+}
+
+var (
+ protoPoolSummaryDataPointValueAtQuantile = sync.Pool{
+ New: func() any {
+ return &SummaryDataPointValueAtQuantile{}
+ },
+ }
+)
+
+func NewSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
+ if !UseProtoPooling.IsEnabled() {
+ return &SummaryDataPointValueAtQuantile{}
+ }
+ return protoPoolSummaryDataPointValueAtQuantile.Get().(*SummaryDataPointValueAtQuantile)
+}
+
+func DeleteSummaryDataPointValueAtQuantile(orig *SummaryDataPointValueAtQuantile, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolSummaryDataPointValueAtQuantile.Put(orig)
+ }
+}
+
+func CopySummaryDataPointValueAtQuantile(dest, src *SummaryDataPointValueAtQuantile) *SummaryDataPointValueAtQuantile {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewSummaryDataPointValueAtQuantile()
+ }
+ dest.Quantile = src.Quantile
+
+ dest.Value = src.Value
+
+ return dest
+}
+
+func CopySummaryDataPointValueAtQuantileSlice(dest, src []SummaryDataPointValueAtQuantile) []SummaryDataPointValueAtQuantile {
+ var newDest []SummaryDataPointValueAtQuantile
+ if cap(dest) < len(src) {
+ newDest = make([]SummaryDataPointValueAtQuantile, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPointValueAtQuantile(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopySummaryDataPointValueAtQuantile(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopySummaryDataPointValueAtQuantilePtrSlice(dest, src []*SummaryDataPointValueAtQuantile) []*SummaryDataPointValueAtQuantile {
+ var newDest []*SummaryDataPointValueAtQuantile
+ if cap(dest) < len(src) {
+ newDest = make([]*SummaryDataPointValueAtQuantile, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPointValueAtQuantile()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteSummaryDataPointValueAtQuantile(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewSummaryDataPointValueAtQuantile()
+ }
+ }
+ for i := range src {
+ CopySummaryDataPointValueAtQuantile(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *SummaryDataPointValueAtQuantile) Reset() {
+ *orig = SummaryDataPointValueAtQuantile{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *SummaryDataPointValueAtQuantile) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Quantile != float64(0) {
+ dest.WriteObjectField("quantile")
+ dest.WriteFloat64(orig.Quantile)
+ }
+ if orig.Value != float64(0) {
+ dest.WriteObjectField("value")
+ dest.WriteFloat64(orig.Value)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *SummaryDataPointValueAtQuantile) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "quantile":
+ orig.Quantile = iter.ReadFloat64()
+ case "value":
+ orig.Value = iter.ReadFloat64()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *SummaryDataPointValueAtQuantile) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.Quantile != 0 {
+ n += 9
+ }
+ if orig.Value != 0 {
+ n += 9
+ }
+ return n
+}
+
+func (orig *SummaryDataPointValueAtQuantile) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.Quantile != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile))
+ pos--
+ buf[pos] = 0x9
+ }
+ if orig.Value != 0 {
+ pos -= 8
+ binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value))
+ pos--
+ buf[pos] = 0x11
+ }
+ return len(buf) - pos
+}
+
+func (orig *SummaryDataPointValueAtQuantile) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Quantile = math.Float64frombits(num)
+
+ case 2:
+ if wireType != proto.WireTypeI64 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeI64(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Value = math.Float64frombits(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
+ orig := NewSummaryDataPointValueAtQuantile()
+ orig.Quantile = float64(3.1415926)
+ orig.Value = float64(3.1415926)
+ return orig
+}
+
+func GenTestSummaryDataPointValueAtQuantilePtrSlice() []*SummaryDataPointValueAtQuantile {
+ orig := make([]*SummaryDataPointValueAtQuantile, 5)
+ orig[0] = NewSummaryDataPointValueAtQuantile()
+ orig[1] = GenTestSummaryDataPointValueAtQuantile()
+ orig[2] = NewSummaryDataPointValueAtQuantile()
+ orig[3] = GenTestSummaryDataPointValueAtQuantile()
+ orig[4] = NewSummaryDataPointValueAtQuantile()
+ return orig
+}
+
+func GenTestSummaryDataPointValueAtQuantileSlice() []SummaryDataPointValueAtQuantile {
+ orig := make([]SummaryDataPointValueAtQuantile, 5)
+ orig[1] = *GenTestSummaryDataPointValueAtQuantile()
+ orig[3] = *GenTestSummaryDataPointValueAtQuantile()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go
new file mode 100644
index 00000000000..d2590bdbb1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tcpaddr.go
@@ -0,0 +1,295 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type TCPAddr struct {
+ IP []byte
+ Port int64
+ Zone string
+}
+
+var (
+ protoPoolTCPAddr = sync.Pool{
+ New: func() any {
+ return &TCPAddr{}
+ },
+ }
+)
+
+func NewTCPAddr() *TCPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &TCPAddr{}
+ }
+ return protoPoolTCPAddr.Get().(*TCPAddr)
+}
+
+func DeleteTCPAddr(orig *TCPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolTCPAddr.Put(orig)
+ }
+}
+
+func CopyTCPAddr(dest, src *TCPAddr) *TCPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTCPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Port = src.Port
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyTCPAddrSlice(dest, src []TCPAddr) []TCPAddr {
+ var newDest []TCPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]TCPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTCPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTCPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTCPAddrPtrSlice(dest, src []*TCPAddr) []*TCPAddr {
+ var newDest []*TCPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*TCPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTCPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTCPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTCPAddr()
+ }
+ }
+ for i := range src {
+ CopyTCPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TCPAddr) Reset() {
+ *orig = TCPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TCPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Port != int64(0) {
+ dest.WriteObjectField("port")
+ dest.WriteInt64(orig.Port)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TCPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "port":
+ orig.Port = iter.ReadInt64()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TCPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Port != 0 {
+ n += 1 + proto.Sov(uint64(orig.Port))
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *TCPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.Port != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
+ pos--
+ buf[pos] = 0x10
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *TCPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Port = int64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTCPAddr() *TCPAddr {
+ orig := NewTCPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Port = int64(13)
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestTCPAddrPtrSlice() []*TCPAddr {
+ orig := make([]*TCPAddr, 5)
+ orig[0] = NewTCPAddr()
+ orig[1] = GenTestTCPAddr()
+ orig[2] = NewTCPAddr()
+ orig[3] = GenTestTCPAddr()
+ orig[4] = NewTCPAddr()
+ return orig
+}
+
+func GenTestTCPAddrSlice() []TCPAddr {
+ orig := make([]TCPAddr, 5)
+ orig[1] = *GenTestTCPAddr()
+ orig[3] = *GenTestTCPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go
new file mode 100644
index 00000000000..f63549f1cbc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesdata.go
@@ -0,0 +1,247 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do not
+// implement the OTLP protocol.
+type TracesData struct {
+ ResourceSpans []*ResourceSpans
+}
+
+var (
+ protoPoolTracesData = sync.Pool{
+ New: func() any {
+ return &TracesData{}
+ },
+ }
+)
+
+func NewTracesData() *TracesData {
+ if !UseProtoPooling.IsEnabled() {
+ return &TracesData{}
+ }
+ return protoPoolTracesData.Get().(*TracesData)
+}
+
+func DeleteTracesData(orig *TracesData, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ for i := range orig.ResourceSpans {
+ DeleteResourceSpans(orig.ResourceSpans[i], true)
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolTracesData.Put(orig)
+ }
+}
+
+func CopyTracesData(dest, src *TracesData) *TracesData {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTracesData()
+ }
+ dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
+
+ return dest
+}
+
+func CopyTracesDataSlice(dest, src []TracesData) []TracesData {
+ var newDest []TracesData
+ if cap(dest) < len(src) {
+ newDest = make([]TracesData, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesData(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTracesData(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTracesDataPtrSlice(dest, src []*TracesData) []*TracesData {
+ var newDest []*TracesData
+ if cap(dest) < len(src) {
+ newDest = make([]*TracesData, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesData()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesData(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesData()
+ }
+ }
+ for i := range src {
+ CopyTracesData(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TracesData) Reset() {
+ *orig = TracesData{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TracesData) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if len(orig.ResourceSpans) > 0 {
+ dest.WriteObjectField("resourceSpans")
+ dest.WriteArrayStart()
+ orig.ResourceSpans[0].MarshalJSON(dest)
+ for i := 1; i < len(orig.ResourceSpans); i++ {
+ dest.WriteMore()
+ orig.ResourceSpans[i].MarshalJSON(dest)
+ }
+ dest.WriteArrayEnd()
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TracesData) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "resourceSpans", "resource_spans":
+ for iter.ReadArray() {
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
+ }
+
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TracesData) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ for i := range orig.ResourceSpans {
+ l = orig.ResourceSpans[i].SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *TracesData) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
+ l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ return len(buf) - pos
+}
+
+func (orig *TracesData) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
+ err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTracesData() *TracesData {
+ orig := NewTracesData()
+ orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
+ return orig
+}
+
+func GenTestTracesDataPtrSlice() []*TracesData {
+ orig := make([]*TracesData, 5)
+ orig[0] = NewTracesData()
+ orig[1] = GenTestTracesData()
+ orig[2] = NewTracesData()
+ orig[3] = GenTestTracesData()
+ orig[4] = NewTracesData()
+ return orig
+}
+
+func GenTestTracesDataSlice() []TracesData {
+ orig := make([]TracesData, 5)
+ orig[1] = *GenTestTracesData()
+ orig[3] = *GenTestTracesData()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go
new file mode 100644
index 00000000000..d3e3fd5633d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_tracesrequest.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type TracesRequest struct {
+ RequestContext *RequestContext
+ TracesData TracesData
+ FormatVersion uint32
+}
+
+var (
+ protoPoolTracesRequest = sync.Pool{
+ New: func() any {
+ return &TracesRequest{}
+ },
+ }
+)
+
+func NewTracesRequest() *TracesRequest {
+ if !UseProtoPooling.IsEnabled() {
+ return &TracesRequest{}
+ }
+ return protoPoolTracesRequest.Get().(*TracesRequest)
+}
+
+func DeleteTracesRequest(orig *TracesRequest, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ DeleteRequestContext(orig.RequestContext, true)
+ DeleteTracesData(&orig.TracesData, false)
+
+ orig.Reset()
+ if nullable {
+ protoPoolTracesRequest.Put(orig)
+ }
+}
+
+func CopyTracesRequest(dest, src *TracesRequest) *TracesRequest {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewTracesRequest()
+ }
+ dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
+
+ CopyTracesData(&dest.TracesData, &src.TracesData)
+
+ dest.FormatVersion = src.FormatVersion
+
+ return dest
+}
+
+func CopyTracesRequestSlice(dest, src []TracesRequest) []TracesRequest {
+ var newDest []TracesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]TracesRequest, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesRequest(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyTracesRequest(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyTracesRequestPtrSlice(dest, src []*TracesRequest) []*TracesRequest {
+ var newDest []*TracesRequest
+ if cap(dest) < len(src) {
+ newDest = make([]*TracesRequest, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesRequest()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteTracesRequest(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewTracesRequest()
+ }
+ }
+ for i := range src {
+ CopyTracesRequest(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *TracesRequest) Reset() {
+ *orig = TracesRequest{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *TracesRequest) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.RequestContext != nil {
+ dest.WriteObjectField("requestContext")
+ orig.RequestContext.MarshalJSON(dest)
+ }
+ dest.WriteObjectField("tracesData")
+ orig.TracesData.MarshalJSON(dest)
+ if orig.FormatVersion != uint32(0) {
+ dest.WriteObjectField("formatVersion")
+ dest.WriteUint32(orig.FormatVersion)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *TracesRequest) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "requestContext", "request_context":
+ orig.RequestContext = NewRequestContext()
+ orig.RequestContext.UnmarshalJSON(iter)
+ case "tracesData", "traces_data":
+
+ orig.TracesData.UnmarshalJSON(iter)
+ case "formatVersion", "format_version":
+ orig.FormatVersion = iter.ReadUint32()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *TracesRequest) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = orig.TracesData.SizeProto()
+ n += 1 + proto.Sov(uint64(l)) + l
+ if orig.FormatVersion != 0 {
+ n += 5
+ }
+ return n
+}
+
+func (orig *TracesRequest) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.RequestContext != nil {
+ l = orig.RequestContext.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ l = orig.TracesData.MarshalProto(buf[:pos])
+ pos -= l
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+
+ if orig.FormatVersion != 0 {
+ pos -= 4
+ binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
+ pos--
+ buf[pos] = 0xd
+ }
+ return len(buf) - pos
+}
+
+func (orig *TracesRequest) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ orig.RequestContext = NewRequestContext()
+ err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field TracesData", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+
+ err = orig.TracesData.UnmarshalProto(buf[startPos:pos])
+ if err != nil {
+ return err
+ }
+
+ case 1:
+ if wireType != proto.WireTypeI32 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
+ }
+ var num uint32
+ num, pos, err = proto.ConsumeI32(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.FormatVersion = uint32(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestTracesRequest() *TracesRequest {
+ orig := NewTracesRequest()
+ orig.RequestContext = GenTestRequestContext()
+ orig.TracesData = *GenTestTracesData()
+ orig.FormatVersion = uint32(13)
+ return orig
+}
+
+func GenTestTracesRequestPtrSlice() []*TracesRequest {
+ orig := make([]*TracesRequest, 5)
+ orig[0] = NewTracesRequest()
+ orig[1] = GenTestTracesRequest()
+ orig[2] = NewTracesRequest()
+ orig[3] = GenTestTracesRequest()
+ orig[4] = NewTracesRequest()
+ return orig
+}
+
+func GenTestTracesRequestSlice() []TracesRequest {
+ orig := make([]TracesRequest, 5)
+ orig[1] = *GenTestTracesRequest()
+ orig[3] = *GenTestTracesRequest()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go
new file mode 100644
index 00000000000..f197afd194a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_udpaddr.go
@@ -0,0 +1,295 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type UDPAddr struct {
+ IP []byte
+ Port int64
+ Zone string
+}
+
+var (
+ protoPoolUDPAddr = sync.Pool{
+ New: func() any {
+ return &UDPAddr{}
+ },
+ }
+)
+
+func NewUDPAddr() *UDPAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &UDPAddr{}
+ }
+ return protoPoolUDPAddr.Get().(*UDPAddr)
+}
+
+func DeleteUDPAddr(orig *UDPAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolUDPAddr.Put(orig)
+ }
+}
+
+func CopyUDPAddr(dest, src *UDPAddr) *UDPAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewUDPAddr()
+ }
+ dest.IP = src.IP
+
+ dest.Port = src.Port
+
+ dest.Zone = src.Zone
+
+ return dest
+}
+
+func CopyUDPAddrSlice(dest, src []UDPAddr) []UDPAddr {
+ var newDest []UDPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]UDPAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUDPAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyUDPAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyUDPAddrPtrSlice(dest, src []*UDPAddr) []*UDPAddr {
+ var newDest []*UDPAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*UDPAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUDPAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUDPAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUDPAddr()
+ }
+ }
+ for i := range src {
+ CopyUDPAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *UDPAddr) Reset() {
+ *orig = UDPAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *UDPAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+
+ if len(orig.IP) > 0 {
+ dest.WriteObjectField("iP")
+ dest.WriteBytes(orig.IP)
+ }
+ if orig.Port != int64(0) {
+ dest.WriteObjectField("port")
+ dest.WriteInt64(orig.Port)
+ }
+ if orig.Zone != "" {
+ dest.WriteObjectField("zone")
+ dest.WriteString(orig.Zone)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *UDPAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "iP":
+ orig.IP = iter.ReadBytes()
+ case "port":
+ orig.Port = iter.ReadInt64()
+ case "zone":
+ orig.Zone = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *UDPAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ if orig.Port != 0 {
+ n += 1 + proto.Sov(uint64(orig.Port))
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *UDPAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.IP)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.IP)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ if orig.Port != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
+ pos--
+ buf[pos] = 0x10
+ }
+ l = len(orig.Zone)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Zone)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x1a
+ }
+ return len(buf) - pos
+}
+
+func (orig *UDPAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ if length != 0 {
+ orig.IP = make([]byte, length)
+ copy(orig.IP, buf[startPos:pos])
+ }
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.Port = int64(num)
+
+ case 3:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Zone = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestUDPAddr() *UDPAddr {
+ orig := NewUDPAddr()
+ orig.IP = []byte{1, 2, 3}
+ orig.Port = int64(13)
+ orig.Zone = "test_zone"
+ return orig
+}
+
+func GenTestUDPAddrPtrSlice() []*UDPAddr {
+ orig := make([]*UDPAddr, 5)
+ orig[0] = NewUDPAddr()
+ orig[1] = GenTestUDPAddr()
+ orig[2] = NewUDPAddr()
+ orig[3] = GenTestUDPAddr()
+ orig[4] = NewUDPAddr()
+ return orig
+}
+
+func GenTestUDPAddrSlice() []UDPAddr {
+ orig := make([]UDPAddr, 5)
+ orig[1] = *GenTestUDPAddr()
+ orig[3] = *GenTestUDPAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go
new file mode 100644
index 00000000000..b7d25686c02
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_unixaddr.go
@@ -0,0 +1,261 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+type UnixAddr struct {
+ Name string
+ Net string
+}
+
+var (
+ protoPoolUnixAddr = sync.Pool{
+ New: func() any {
+ return &UnixAddr{}
+ },
+ }
+)
+
+func NewUnixAddr() *UnixAddr {
+ if !UseProtoPooling.IsEnabled() {
+ return &UnixAddr{}
+ }
+ return protoPoolUnixAddr.Get().(*UnixAddr)
+}
+
+func DeleteUnixAddr(orig *UnixAddr, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolUnixAddr.Put(orig)
+ }
+}
+
+func CopyUnixAddr(dest, src *UnixAddr) *UnixAddr {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewUnixAddr()
+ }
+ dest.Name = src.Name
+
+ dest.Net = src.Net
+
+ return dest
+}
+
+func CopyUnixAddrSlice(dest, src []UnixAddr) []UnixAddr {
+ var newDest []UnixAddr
+ if cap(dest) < len(src) {
+ newDest = make([]UnixAddr, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUnixAddr(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyUnixAddr(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyUnixAddrPtrSlice(dest, src []*UnixAddr) []*UnixAddr {
+ var newDest []*UnixAddr
+ if cap(dest) < len(src) {
+ newDest = make([]*UnixAddr, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUnixAddr()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteUnixAddr(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewUnixAddr()
+ }
+ }
+ for i := range src {
+ CopyUnixAddr(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *UnixAddr) Reset() {
+ *orig = UnixAddr{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *UnixAddr) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.Name != "" {
+ dest.WriteObjectField("name")
+ dest.WriteString(orig.Name)
+ }
+ if orig.Net != "" {
+ dest.WriteObjectField("net")
+ dest.WriteString(orig.Net)
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *UnixAddr) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "name":
+ orig.Name = iter.ReadString()
+ case "net":
+ orig.Net = iter.ReadString()
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *UnixAddr) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ l = len(orig.Net)
+ if l > 0 {
+ n += 1 + proto.Sov(uint64(l)) + l
+ }
+ return n
+}
+
+func (orig *UnixAddr) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ l = len(orig.Name)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Name)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0xa
+ }
+ l = len(orig.Net)
+ if l > 0 {
+ pos -= l
+ copy(buf[pos:], orig.Net)
+ pos = proto.EncodeVarint(buf, pos, uint64(l))
+ pos--
+ buf[pos] = 0x12
+ }
+ return len(buf) - pos
+}
+
+func (orig *UnixAddr) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Name = string(buf[startPos:pos])
+
+ case 2:
+ if wireType != proto.WireTypeLen {
+ return fmt.Errorf("proto: wrong wireType = %d for field Net", wireType)
+ }
+ var length int
+ length, pos, err = proto.ConsumeLen(buf, pos)
+ if err != nil {
+ return err
+ }
+ startPos := pos - length
+ orig.Net = string(buf[startPos:pos])
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestUnixAddr() *UnixAddr {
+ orig := NewUnixAddr()
+ orig.Name = "test_name"
+ orig.Net = "test_net"
+ return orig
+}
+
+func GenTestUnixAddrPtrSlice() []*UnixAddr {
+ orig := make([]*UnixAddr, 5)
+ orig[0] = NewUnixAddr()
+ orig[1] = GenTestUnixAddr()
+ orig[2] = NewUnixAddr()
+ orig[3] = GenTestUnixAddr()
+ orig[4] = NewUnixAddr()
+ return orig
+}
+
+func GenTestUnixAddrSlice() []UnixAddr {
+ orig := make([]UnixAddr, 5)
+ orig[1] = *GenTestUnixAddr()
+ orig[3] = *GenTestUnixAddr()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go
new file mode 100644
index 00000000000..a070ab3e70f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_proto_valuetype.go
@@ -0,0 +1,285 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/proto"
+)
+
+// ValueType describes the type and units of a value, with an optional aggregation temporality.
+type ValueType struct {
+ TypeStrindex int32
+ UnitStrindex int32
+ AggregationTemporality AggregationTemporality
+}
+
+var (
+ protoPoolValueType = sync.Pool{
+ New: func() any {
+ return &ValueType{}
+ },
+ }
+)
+
+func NewValueType() *ValueType {
+ if !UseProtoPooling.IsEnabled() {
+ return &ValueType{}
+ }
+ return protoPoolValueType.Get().(*ValueType)
+}
+
+func DeleteValueType(orig *ValueType, nullable bool) {
+ if orig == nil {
+ return
+ }
+
+ if !UseProtoPooling.IsEnabled() {
+ orig.Reset()
+ return
+ }
+
+ orig.Reset()
+ if nullable {
+ protoPoolValueType.Put(orig)
+ }
+}
+
+func CopyValueType(dest, src *ValueType) *ValueType {
+ // If copying to same object, just return.
+ if src == dest {
+ return dest
+ }
+
+ if src == nil {
+ return nil
+ }
+
+ if dest == nil {
+ dest = NewValueType()
+ }
+ dest.TypeStrindex = src.TypeStrindex
+
+ dest.UnitStrindex = src.UnitStrindex
+
+ dest.AggregationTemporality = src.AggregationTemporality
+
+ return dest
+}
+
+func CopyValueTypeSlice(dest, src []ValueType) []ValueType {
+ var newDest []ValueType
+ if cap(dest) < len(src) {
+ newDest = make([]ValueType, len(src))
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteValueType(&dest[i], false)
+ }
+ }
+ for i := range src {
+ CopyValueType(&newDest[i], &src[i])
+ }
+ return newDest
+}
+
+func CopyValueTypePtrSlice(dest, src []*ValueType) []*ValueType {
+ var newDest []*ValueType
+ if cap(dest) < len(src) {
+ newDest = make([]*ValueType, len(src))
+ // Copy old pointers to re-use.
+ copy(newDest, dest)
+ // Add new pointers for missing elements from len(dest) to len(srt).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewValueType()
+ }
+ } else {
+ newDest = dest[:len(src)]
+ // Cleanup the rest of the elements so GC can free the memory.
+ // This can happen when len(src) < len(dest) < cap(dest).
+ for i := len(src); i < len(dest); i++ {
+ DeleteValueType(dest[i], true)
+ dest[i] = nil
+ }
+ // Add new pointers for missing elements.
+ // This can happen when len(dest) < len(src) < cap(dest).
+ for i := len(dest); i < len(src); i++ {
+ newDest[i] = NewValueType()
+ }
+ }
+ for i := range src {
+ CopyValueType(newDest[i], src[i])
+ }
+ return newDest
+}
+
+func (orig *ValueType) Reset() {
+ *orig = ValueType{}
+}
+
+// MarshalJSON marshals all properties from the current struct to the destination stream.
+func (orig *ValueType) MarshalJSON(dest *json.Stream) {
+ dest.WriteObjectStart()
+ if orig.TypeStrindex != int32(0) {
+ dest.WriteObjectField("typeStrindex")
+ dest.WriteInt32(orig.TypeStrindex)
+ }
+ if orig.UnitStrindex != int32(0) {
+ dest.WriteObjectField("unitStrindex")
+ dest.WriteInt32(orig.UnitStrindex)
+ }
+
+ if int32(orig.AggregationTemporality) != 0 {
+ dest.WriteObjectField("aggregationTemporality")
+ dest.WriteInt32(int32(orig.AggregationTemporality))
+ }
+ dest.WriteObjectEnd()
+}
+
+// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
+func (orig *ValueType) UnmarshalJSON(iter *json.Iterator) {
+ for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
+ switch f {
+ case "typeStrindex", "type_strindex":
+ orig.TypeStrindex = iter.ReadInt32()
+ case "unitStrindex", "unit_strindex":
+ orig.UnitStrindex = iter.ReadInt32()
+ case "aggregationTemporality", "aggregation_temporality":
+ orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
+ default:
+ iter.Skip()
+ }
+ }
+}
+
+func (orig *ValueType) SizeProto() int {
+ var n int
+ var l int
+ _ = l
+ if orig.TypeStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.TypeStrindex))
+ }
+ if orig.UnitStrindex != 0 {
+ n += 1 + proto.Sov(uint64(orig.UnitStrindex))
+ }
+ if orig.AggregationTemporality != 0 {
+ n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
+ }
+ return n
+}
+
+func (orig *ValueType) MarshalProto(buf []byte) int {
+ pos := len(buf)
+ var l int
+ _ = l
+ if orig.TypeStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.TypeStrindex))
+ pos--
+ buf[pos] = 0x8
+ }
+ if orig.UnitStrindex != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
+ pos--
+ buf[pos] = 0x10
+ }
+ if orig.AggregationTemporality != 0 {
+ pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
+ pos--
+ buf[pos] = 0x18
+ }
+ return len(buf) - pos
+}
+
+func (orig *ValueType) UnmarshalProto(buf []byte) error {
+ var err error
+ var fieldNum int32
+ var wireType proto.WireType
+
+ l := len(buf)
+ pos := 0
+ for pos < l {
+ // If in a group parsing, move to the next tag.
+ fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
+ if err != nil {
+ return err
+ }
+ switch fieldNum {
+
+ case 1:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.TypeStrindex = int32(num)
+
+ case 2:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.UnitStrindex = int32(num)
+
+ case 3:
+ if wireType != proto.WireTypeVarint {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+ }
+ var num uint64
+ num, pos, err = proto.ConsumeVarint(buf, pos)
+ if err != nil {
+ return err
+ }
+
+ orig.AggregationTemporality = AggregationTemporality(num)
+ default:
+ pos, err = proto.ConsumeUnknown(buf, pos, wireType)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func GenTestValueType() *ValueType {
+ orig := NewValueType()
+ orig.TypeStrindex = int32(13)
+ orig.UnitStrindex = int32(13)
+ orig.AggregationTemporality = AggregationTemporality(13)
+ return orig
+}
+
+func GenTestValueTypePtrSlice() []*ValueType {
+ orig := make([]*ValueType, 5)
+ orig[0] = NewValueType()
+ orig[1] = GenTestValueType()
+ orig[2] = NewValueType()
+ orig[3] = GenTestValueType()
+ orig[4] = NewValueType()
+ return orig
+}
+
+func GenTestValueTypeSlice() []ValueType {
+ orig := make([]ValueType, 5)
+ orig[1] = *GenTestValueType()
+ orig[3] = *GenTestValueType()
+ return orig
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go
new file mode 100644
index 00000000000..2fd1f11324d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_anyvalueslice.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type SliceWrapper struct {
+ orig *[]AnyValue
+ state *State
+}
+
+func GetSliceOrig(ms SliceWrapper) *[]AnyValue {
+ return ms.orig
+}
+
+func GetSliceState(ms SliceWrapper) *State {
+ return ms.state
+}
+
+func NewSliceWrapper(orig *[]AnyValue, state *State) SliceWrapper {
+ return SliceWrapper{orig: orig, state: state}
+}
+
+func GenTestSliceWrapper() SliceWrapper {
+ orig := GenTestAnyValueSlice()
+ return NewSliceWrapper(&orig, NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
index 0d6214c20ff..ecdbd89cea8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type ByteSlice struct {
+type ByteSliceWrapper struct {
orig *[]byte
state *State
}
-func GetOrigByteSlice(ms ByteSlice) *[]byte {
+func GetByteSliceOrig(ms ByteSliceWrapper) *[]byte {
return ms.orig
}
-func GetByteSliceState(ms ByteSlice) *State {
+func GetByteSliceState(ms ByteSliceWrapper) *State {
return ms.state
}
-func NewByteSlice(orig *[]byte, state *State) ByteSlice {
- return ByteSlice{orig: orig, state: state}
+func NewByteSliceWrapper(orig *[]byte, state *State) ByteSliceWrapper {
+ return ByteSliceWrapper{orig: orig, state: state}
}
-func CopyOrigByteSlice(dst, src []byte) []byte {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestByteSliceWrapper() ByteSliceWrapper {
+ orig := []byte{1, 2, 3}
+ return NewByteSliceWrapper(&orig, NewState())
}
-func FillTestByteSlice(tv ByteSlice) {
-}
-
-func GenerateTestByteSlice() ByteSlice {
- state := StateMutable
- var orig []byte = nil
-
- return ByteSlice{&orig, &state}
+func GenTestByteSlice() []byte {
+ return []byte{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go
new file mode 100644
index 00000000000..b4e4ea12ee6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityref.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type EntityRefWrapper struct {
+ orig *EntityRef
+ state *State
+}
+
+func GetEntityRefOrig(ms EntityRefWrapper) *EntityRef {
+ return ms.orig
+}
+
+func GetEntityRefState(ms EntityRefWrapper) *State {
+ return ms.state
+}
+
+func NewEntityRefWrapper(orig *EntityRef, state *State) EntityRefWrapper {
+ return EntityRefWrapper{orig: orig, state: state}
+}
+
+func GenTestEntityRefWrapper() EntityRefWrapper {
+ return NewEntityRefWrapper(GenTestEntityRef(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go
new file mode 100644
index 00000000000..8e71ec1de0a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_entityrefslice.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type EntityRefSliceWrapper struct {
+ orig *[]*EntityRef
+ state *State
+}
+
+func GetEntityRefSliceOrig(ms EntityRefSliceWrapper) *[]*EntityRef {
+ return ms.orig
+}
+
+func GetEntityRefSliceState(ms EntityRefSliceWrapper) *State {
+ return ms.state
+}
+
+func NewEntityRefSliceWrapper(orig *[]*EntityRef, state *State) EntityRefSliceWrapper {
+ return EntityRefSliceWrapper{orig: orig, state: state}
+}
+
+func GenTestEntityRefSliceWrapper() EntityRefSliceWrapper {
+ orig := GenTestEntityRefPtrSlice()
+ return NewEntityRefSliceWrapper(&orig, NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go
new file mode 100644
index 00000000000..a529cd105bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportlogsservicerequest.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type LogsWrapper struct {
+ orig *ExportLogsServiceRequest
+ state *State
+}
+
+func GetLogsOrig(ms LogsWrapper) *ExportLogsServiceRequest {
+ return ms.orig
+}
+
+func GetLogsState(ms LogsWrapper) *State {
+ return ms.state
+}
+
+func NewLogsWrapper(orig *ExportLogsServiceRequest, state *State) LogsWrapper {
+ return LogsWrapper{orig: orig, state: state}
+}
+
+func GenTestLogsWrapper() LogsWrapper {
+ return NewLogsWrapper(GenTestExportLogsServiceRequest(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go
new file mode 100644
index 00000000000..0d378e4d74d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportmetricsservicerequest.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type MetricsWrapper struct {
+ orig *ExportMetricsServiceRequest
+ state *State
+}
+
+func GetMetricsOrig(ms MetricsWrapper) *ExportMetricsServiceRequest {
+ return ms.orig
+}
+
+func GetMetricsState(ms MetricsWrapper) *State {
+ return ms.state
+}
+
+func NewMetricsWrapper(orig *ExportMetricsServiceRequest, state *State) MetricsWrapper {
+ return MetricsWrapper{orig: orig, state: state}
+}
+
+func GenTestMetricsWrapper() MetricsWrapper {
+ return NewMetricsWrapper(GenTestExportMetricsServiceRequest(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go
new file mode 100644
index 00000000000..8533626bebe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exportprofilesservicerequest.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type ProfilesWrapper struct {
+ orig *ExportProfilesServiceRequest
+ state *State
+}
+
+func GetProfilesOrig(ms ProfilesWrapper) *ExportProfilesServiceRequest {
+ return ms.orig
+}
+
+func GetProfilesState(ms ProfilesWrapper) *State {
+ return ms.state
+}
+
+func NewProfilesWrapper(orig *ExportProfilesServiceRequest, state *State) ProfilesWrapper {
+ return ProfilesWrapper{orig: orig, state: state}
+}
+
+func GenTestProfilesWrapper() ProfilesWrapper {
+ return NewProfilesWrapper(GenTestExportProfilesServiceRequest(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go
new file mode 100644
index 00000000000..75d82979d88
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_exporttraceservicerequest.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type TracesWrapper struct {
+ orig *ExportTraceServiceRequest
+ state *State
+}
+
+func GetTracesOrig(ms TracesWrapper) *ExportTraceServiceRequest {
+ return ms.orig
+}
+
+func GetTracesState(ms TracesWrapper) *State {
+ return ms.state
+}
+
+func NewTracesWrapper(orig *ExportTraceServiceRequest, state *State) TracesWrapper {
+ return TracesWrapper{orig: orig, state: state}
+}
+
+func GenTestTracesWrapper() TracesWrapper {
+ return NewTracesWrapper(GenTestExportTraceServiceRequest(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
index 2ca007b5f01..8d0d6ea6cf8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type Float64Slice struct {
+type Float64SliceWrapper struct {
orig *[]float64
state *State
}
-func GetOrigFloat64Slice(ms Float64Slice) *[]float64 {
+func GetFloat64SliceOrig(ms Float64SliceWrapper) *[]float64 {
return ms.orig
}
-func GetFloat64SliceState(ms Float64Slice) *State {
+func GetFloat64SliceState(ms Float64SliceWrapper) *State {
return ms.state
}
-func NewFloat64Slice(orig *[]float64, state *State) Float64Slice {
- return Float64Slice{orig: orig, state: state}
+func NewFloat64SliceWrapper(orig *[]float64, state *State) Float64SliceWrapper {
+ return Float64SliceWrapper{orig: orig, state: state}
}
-func CopyOrigFloat64Slice(dst, src []float64) []float64 {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestFloat64SliceWrapper() Float64SliceWrapper {
+ orig := []float64{1.1, 2.2, 3.3}
+ return NewFloat64SliceWrapper(&orig, NewState())
}
-func FillTestFloat64Slice(tv Float64Slice) {
-}
-
-func GenerateTestFloat64Slice() Float64Slice {
- state := StateMutable
- var orig []float64 = nil
-
- return Float64Slice{&orig, &state}
+func GenTestFloat64Slice() []float64 {
+ return []float64{1.1, 2.2, 3.3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
index 5483272a960..5fbe72e12ec 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go
@@ -1,50 +1,28 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type InstrumentationScope struct {
- orig *otlpcommon.InstrumentationScope
+type InstrumentationScopeWrapper struct {
+ orig *InstrumentationScope
state *State
}
-func GetOrigInstrumentationScope(ms InstrumentationScope) *otlpcommon.InstrumentationScope {
+func GetInstrumentationScopeOrig(ms InstrumentationScopeWrapper) *InstrumentationScope {
return ms.orig
}
-func GetInstrumentationScopeState(ms InstrumentationScope) *State {
+func GetInstrumentationScopeState(ms InstrumentationScopeWrapper) *State {
return ms.state
}
-func NewInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *State) InstrumentationScope {
- return InstrumentationScope{orig: orig, state: state}
-}
-
-func CopyOrigInstrumentationScope(dest, src *otlpcommon.InstrumentationScope) {
- dest.Name = src.Name
- dest.Version = src.Version
- dest.Attributes = CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
-}
-
-func GenerateTestInstrumentationScope() InstrumentationScope {
- orig := otlpcommon.InstrumentationScope{}
- state := StateMutable
- tv := NewInstrumentationScope(&orig, &state)
- FillTestInstrumentationScope(tv)
- return tv
+func NewInstrumentationScopeWrapper(orig *InstrumentationScope, state *State) InstrumentationScopeWrapper {
+ return InstrumentationScopeWrapper{orig: orig, state: state}
}
-func FillTestInstrumentationScope(tv InstrumentationScope) {
- tv.orig.Name = "test_name"
- tv.orig.Version = "test_version"
- FillTestMap(NewMap(&tv.orig.Attributes, tv.state))
- tv.orig.DroppedAttributesCount = uint32(17)
+func GenTestInstrumentationScopeWrapper() InstrumentationScopeWrapper {
+ return NewInstrumentationScopeWrapper(GenTestInstrumentationScope(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
index c4fe994f9ab..ed66b200738 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type Int32Slice struct {
+type Int32SliceWrapper struct {
orig *[]int32
state *State
}
-func GetOrigInt32Slice(ms Int32Slice) *[]int32 {
+func GetInt32SliceOrig(ms Int32SliceWrapper) *[]int32 {
return ms.orig
}
-func GetInt32SliceState(ms Int32Slice) *State {
+func GetInt32SliceState(ms Int32SliceWrapper) *State {
return ms.state
}
-func NewInt32Slice(orig *[]int32, state *State) Int32Slice {
- return Int32Slice{orig: orig, state: state}
+func NewInt32SliceWrapper(orig *[]int32, state *State) Int32SliceWrapper {
+ return Int32SliceWrapper{orig: orig, state: state}
}
-func CopyOrigInt32Slice(dst, src []int32) []int32 {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestInt32SliceWrapper() Int32SliceWrapper {
+ orig := []int32{1, 2, 3}
+ return NewInt32SliceWrapper(&orig, NewState())
}
-func FillTestInt32Slice(tv Int32Slice) {
-}
-
-func GenerateTestInt32Slice() Int32Slice {
- state := StateMutable
- var orig []int32 = nil
-
- return Int32Slice{&orig, &state}
+func GenTestInt32Slice() []int32 {
+ return []int32{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
index 19e49e71ffa..9174f8632a7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int64slice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type Int64Slice struct {
+type Int64SliceWrapper struct {
orig *[]int64
state *State
}
-func GetOrigInt64Slice(ms Int64Slice) *[]int64 {
+func GetInt64SliceOrig(ms Int64SliceWrapper) *[]int64 {
return ms.orig
}
-func GetInt64SliceState(ms Int64Slice) *State {
+func GetInt64SliceState(ms Int64SliceWrapper) *State {
return ms.state
}
-func NewInt64Slice(orig *[]int64, state *State) Int64Slice {
- return Int64Slice{orig: orig, state: state}
+func NewInt64SliceWrapper(orig *[]int64, state *State) Int64SliceWrapper {
+ return Int64SliceWrapper{orig: orig, state: state}
}
-func CopyOrigInt64Slice(dst, src []int64) []int64 {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestInt64SliceWrapper() Int64SliceWrapper {
+ orig := []int64{1, 2, 3}
+ return NewInt64SliceWrapper(&orig, NewState())
}
-func FillTestInt64Slice(tv Int64Slice) {
-}
-
-func GenerateTestInt64Slice() Int64Slice {
- state := StateMutable
- var orig []int64 = nil
-
- return Int64Slice{&orig, &state}
+func GenTestInt64Slice() []int64 {
+ return []int64{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go
deleted file mode 100644
index 5f3fe569ba5..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package internal
-
-type IntSlice struct {
- orig *[]int
- state *State
-}
-
-func GetOrigIntSlice(ms IntSlice) *[]int {
- return ms.orig
-}
-
-func GetIntSliceState(ms IntSlice) *State {
- return ms.state
-}
-
-func NewIntSlice(orig *[]int, state *State) IntSlice {
- return IntSlice{orig: orig, state: state}
-}
-
-func FillTestIntSlice(tv IntSlice) {
-}
-
-func GenerateTestIntSlice() IntSlice {
- state := StateMutable
- var orig []int = nil
-
- return IntSlice{&orig, &state}
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go
new file mode 100644
index 00000000000..7bece6d028b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_profilesdata.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package internal
+
+type ProfilesDataWrapper struct {
+ orig *ProfilesData
+ state *State
+}
+
+func GetProfilesDataOrig(ms ProfilesDataWrapper) *ProfilesData {
+ return ms.orig
+}
+
+func GetProfilesDataState(ms ProfilesDataWrapper) *State {
+ return ms.state
+}
+
+func NewProfilesDataWrapper(orig *ProfilesData, state *State) ProfilesDataWrapper {
+ return ProfilesDataWrapper{orig: orig, state: state}
+}
+
+func GenTestProfilesDataWrapper() ProfilesDataWrapper {
+ return NewProfilesDataWrapper(GenTestProfilesData(), NewState())
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
index b84b705c9a1..1d6cabfebfd 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go
@@ -1,46 +1,28 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-import (
- otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-type Resource struct {
- orig *otlpresource.Resource
+type ResourceWrapper struct {
+ orig *Resource
state *State
}
-func GetOrigResource(ms Resource) *otlpresource.Resource {
+func GetResourceOrig(ms ResourceWrapper) *Resource {
return ms.orig
}
-func GetResourceState(ms Resource) *State {
+func GetResourceState(ms ResourceWrapper) *State {
return ms.state
}
-func NewResource(orig *otlpresource.Resource, state *State) Resource {
- return Resource{orig: orig, state: state}
-}
-
-func CopyOrigResource(dest, src *otlpresource.Resource) {
- dest.Attributes = CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
-}
-
-func GenerateTestResource() Resource {
- orig := otlpresource.Resource{}
- state := StateMutable
- tv := NewResource(&orig, &state)
- FillTestResource(tv)
- return tv
+func NewResourceWrapper(orig *Resource, state *State) ResourceWrapper {
+ return ResourceWrapper{orig: orig, state: state}
}
-func FillTestResource(tv Resource) {
- FillTestMap(NewMap(&tv.orig.Attributes, tv.state))
- tv.orig.DroppedAttributesCount = uint32(17)
+func GenTestResourceWrapper() ResourceWrapper {
+ return NewResourceWrapper(GenTestResource(), NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
index a05b216f7ca..d922f65d80c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_stringslice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type StringSlice struct {
+type StringSliceWrapper struct {
orig *[]string
state *State
}
-func GetOrigStringSlice(ms StringSlice) *[]string {
+func GetStringSliceOrig(ms StringSliceWrapper) *[]string {
return ms.orig
}
-func GetStringSliceState(ms StringSlice) *State {
+func GetStringSliceState(ms StringSliceWrapper) *State {
return ms.state
}
-func NewStringSlice(orig *[]string, state *State) StringSlice {
- return StringSlice{orig: orig, state: state}
+func NewStringSliceWrapper(orig *[]string, state *State) StringSliceWrapper {
+ return StringSliceWrapper{orig: orig, state: state}
}
-func CopyOrigStringSlice(dst, src []string) []string {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestStringSliceWrapper() StringSliceWrapper {
+ orig := []string{"a", "b", "c"}
+ return NewStringSliceWrapper(&orig, NewState())
}
-func FillTestStringSlice(tv StringSlice) {
-}
-
-func GenerateTestStringSlice() StringSlice {
- state := StateMutable
- var orig []string = nil
-
- return StringSlice{&orig, &state}
+func GenTestStringSlice() []string {
+ return []string{"a", "b", "c"}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
index dbbe25fe961..c20fd9d0e15 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go
@@ -1,39 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
-type UInt64Slice struct {
+type UInt64SliceWrapper struct {
orig *[]uint64
state *State
}
-func GetOrigUInt64Slice(ms UInt64Slice) *[]uint64 {
+func GetUInt64SliceOrig(ms UInt64SliceWrapper) *[]uint64 {
return ms.orig
}
-func GetUInt64SliceState(ms UInt64Slice) *State {
+func GetUInt64SliceState(ms UInt64SliceWrapper) *State {
return ms.state
}
-func NewUInt64Slice(orig *[]uint64, state *State) UInt64Slice {
- return UInt64Slice{orig: orig, state: state}
+func NewUInt64SliceWrapper(orig *[]uint64, state *State) UInt64SliceWrapper {
+ return UInt64SliceWrapper{orig: orig, state: state}
}
-func CopyOrigUInt64Slice(dst, src []uint64) []uint64 {
- dst = dst[:0]
- return append(dst, src...)
+func GenTestUInt64SliceWrapper() UInt64SliceWrapper {
+ orig := []uint64{1, 2, 3}
+ return NewUInt64SliceWrapper(&orig, NewState())
}
-func FillTestUInt64Slice(tv UInt64Slice) {
-}
-
-func GenerateTestUInt64Slice() UInt64Slice {
- state := StateMutable
- var orig []uint64 = nil
-
- return UInt64Slice{&orig, &state}
+func GenTestUint64Slice() []uint64 {
+ return []uint64{1, 2, 3}
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go
deleted file mode 100644
index 89d957a6534..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- "encoding/base64"
- "fmt"
-
- jsoniter "github.com/json-iterator/go"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-// ReadAttribute Unmarshal JSON data and return otlpcommon.KeyValue
-func ReadAttribute(iter *jsoniter.Iterator) otlpcommon.KeyValue {
- kv := otlpcommon.KeyValue{}
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "key":
- kv.Key = iter.ReadString()
- case "value":
- ReadValue(iter, &kv.Value)
- default:
- iter.Skip()
- }
- return true
- })
- return kv
-}
-
-// ReadValue Unmarshal JSON data and return otlpcommon.AnyValue
-func ReadValue(iter *jsoniter.Iterator, val *otlpcommon.AnyValue) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "stringValue", "string_value":
- val.Value = &otlpcommon.AnyValue_StringValue{
- StringValue: iter.ReadString(),
- }
-
- case "boolValue", "bool_value":
- val.Value = &otlpcommon.AnyValue_BoolValue{
- BoolValue: iter.ReadBool(),
- }
- case "intValue", "int_value":
- val.Value = &otlpcommon.AnyValue_IntValue{
- IntValue: ReadInt64(iter),
- }
- case "doubleValue", "double_value":
- val.Value = &otlpcommon.AnyValue_DoubleValue{
- DoubleValue: ReadFloat64(iter),
- }
- case "bytesValue", "bytes_value":
- v, err := base64.StdEncoding.DecodeString(iter.ReadString())
- if err != nil {
- iter.ReportError("bytesValue", fmt.Sprintf("base64 decode:%v", err))
- break
- }
- val.Value = &otlpcommon.AnyValue_BytesValue{
- BytesValue: v,
- }
- case "arrayValue", "array_value":
- val.Value = &otlpcommon.AnyValue_ArrayValue{
- ArrayValue: readArray(iter),
- }
- case "kvlistValue", "kvlist_value":
- val.Value = &otlpcommon.AnyValue_KvlistValue{
- KvlistValue: readKvlistValue(iter),
- }
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func readArray(iter *jsoniter.Iterator) *otlpcommon.ArrayValue {
- v := &otlpcommon.ArrayValue{}
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "values":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- v.Values = append(v.Values, otlpcommon.AnyValue{})
- ReadValue(iter, &v.Values[len(v.Values)-1])
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
- return v
-}
-
-func readKvlistValue(iter *jsoniter.Iterator) *otlpcommon.KeyValueList {
- v := &otlpcommon.KeyValueList{}
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "values":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- v.Values = append(v.Values, ReadAttribute(iter))
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
- return v
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go
deleted file mode 100644
index 02dd2b7768c..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- jsoniter "github.com/json-iterator/go"
-)
-
-// ReadEnumValue returns the enum integer value representation. Accepts both enum names and enum integer values.
-// See https://developers.google.com/protocol-buffers/docs/proto3#json.
-func ReadEnumValue(iter *jsoniter.Iterator, valueMap map[string]int32) int32 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadInt32()
- case jsoniter.StringValue:
- val, ok := valueMap[iter.ReadString()]
- // Same behavior with official protobuf JSON decoder,
- // see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81
- if !ok {
- iter.ReportError("ReadEnumValue", "unknown string value")
- return 0
- }
- return val
- default:
- iter.ReportError("ReadEnumValue", "unsupported value type")
- return 0
- }
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/iterator.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/iterator.go
new file mode 100644
index 00000000000..f0367e11dae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/iterator.go
@@ -0,0 +1,217 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package json // import "go.opentelemetry.io/collector/pdata/internal/json"
+import (
+ "encoding/base64"
+ "strconv"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+func BorrowIterator(data []byte) *Iterator {
+ return &Iterator{
+ delegate: jsoniter.ConfigFastest.BorrowIterator(data),
+ }
+}
+
+func ReturnIterator(s *Iterator) {
+ jsoniter.ConfigFastest.ReturnIterator(s.delegate)
+}
+
+type Iterator struct {
+ delegate *jsoniter.Iterator
+}
+
+// ReadInt32 unmarshalls JSON data into an int32. Accepts both numbers and strings decimal.
+// See https://developers.google.com/protocol-buffers/docs/proto3#json.
+func (iter *Iterator) ReadInt32() int32 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadInt32()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseInt(iter.ReadString(), 10, 32)
+ if err != nil {
+ iter.ReportError("ReadInt32", err.Error())
+ return 0
+ }
+ return int32(val)
+ default:
+ iter.ReportError("ReadInt32", "unsupported value type")
+ return 0
+ }
+}
+
+// ReadUint32 unmarshalls JSON data into an uint32. Accepts both numbers and strings decimal.
+// See https://developers.google.com/protocol-buffers/docs/proto3#json.
+func (iter *Iterator) ReadUint32() uint32 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadUint32()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseUint(iter.ReadString(), 10, 32)
+ if err != nil {
+ iter.ReportError("ReadUint32", err.Error())
+ return 0
+ }
+ return uint32(val)
+ default:
+ iter.ReportError("ReadUint32", "unsupported value type")
+ return 0
+ }
+}
+
+// ReadInt64 unmarshalls JSON data into an int64. Accepts both numbers and strings decimal.
+// See https://developers.google.com/protocol-buffers/docs/proto3#json.
+func (iter *Iterator) ReadInt64() int64 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadInt64()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseInt(iter.ReadString(), 10, 64)
+ if err != nil {
+ iter.ReportError("ReadInt64", err.Error())
+ return 0
+ }
+ return val
+ default:
+ iter.ReportError("ReadInt64", "unsupported value type")
+ return 0
+ }
+}
+
+// ReadUint64 unmarshalls JSON data into an uint64. Accepts both numbers and strings decimal.
+// See https://developers.google.com/protocol-buffers/docs/proto3#json.
+func (iter *Iterator) ReadUint64() uint64 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadUint64()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseUint(iter.ReadString(), 10, 64)
+ if err != nil {
+ iter.ReportError("ReadUint64", err.Error())
+ return 0
+ }
+ return val
+ default:
+ iter.ReportError("ReadUint64", "unsupported value type")
+ return 0
+ }
+}
+
+func (iter *Iterator) ReadFloat32() float32 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadFloat32()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseFloat(iter.ReadString(), 32)
+ if err != nil {
+ iter.ReportError("ReadUint64", err.Error())
+ return 0
+ }
+ return float32(val)
+ default:
+ iter.ReportError("ReadUint64", "unsupported value type")
+ return 0
+ }
+}
+
+func (iter *Iterator) ReadFloat64() float64 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.delegate.ReadFloat64()
+ case jsoniter.StringValue:
+ val, err := strconv.ParseFloat(iter.ReadString(), 64)
+ if err != nil {
+ iter.ReportError("ReadUint64", err.Error())
+ return 0
+ }
+ return val
+ default:
+ iter.ReportError("ReadUint64", "unsupported value type")
+ return 0
+ }
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() bool {
+ return iter.delegate.ReadBool()
+}
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() string {
+ return iter.delegate.ReadString()
+}
+
+// ReadBytes read base64 encoded bytes from iterator.
+func (iter *Iterator) ReadBytes() []byte {
+ buf := iter.ReadStringAsSlice()
+ if len(buf) == 0 {
+ return nil
+ }
+ orig := make([]byte, base64.StdEncoding.DecodedLen(len(buf)))
+ n, err := base64.StdEncoding.Decode(orig, buf)
+ if err != nil {
+ iter.ReportError("base64.Decode", err.Error())
+ }
+ return orig[:n]
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte cannot be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() []byte {
+ return iter.delegate.ReadStringAsSlice()
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation, msg string) {
+ iter.delegate.ReportError(operation, msg)
+}
+
+// Error returns any recorded error if any otherwise it returns nil.
+func (iter *Iterator) Error() error {
+ return iter.delegate.Error
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+ iter.delegate.Skip()
+}
+
+// ReadArray read array element, returns true if the array has more element to read.
+func (iter *Iterator) ReadArray() bool {
+ return iter.delegate.ReadArray()
+}
+
+// ReadObject read one field from object.
+// If object ended, returns empty string. Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() string {
+ return iter.delegate.ReadObject()
+}
+
+// ReadEnumValue returns the enum integer value representation. Accepts both enum names and enum integer values.
+// See https://developers.google.com/protocol-buffers/docs/proto3#json.
+func (iter *Iterator) ReadEnumValue(valueMap map[string]int32) int32 {
+ switch iter.delegate.WhatIsNext() {
+ case jsoniter.NumberValue:
+ return iter.ReadInt32()
+ case jsoniter.StringValue:
+ val, ok := valueMap[iter.ReadString()]
+ // Same behavior with official protobuf JSON decoder,
+ // see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81
+ if !ok {
+ iter.ReportError("ReadEnumValue", "unknown string value")
+ return 0
+ }
+ return val
+ default:
+ iter.ReportError("ReadEnumValue", "unsupported value type")
+ return 0
+ }
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+ iter.delegate.ResetBytes(input)
+ return iter
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go
deleted file mode 100644
index b77d934b691..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- "io"
-
- "github.com/gogo/protobuf/jsonpb"
- "github.com/gogo/protobuf/proto"
-)
-
-var marshaler = &jsonpb.Marshaler{
- // https://github.com/open-telemetry/opentelemetry-specification/pull/2758
- EnumsAsInts: true,
- // https://github.com/open-telemetry/opentelemetry-specification/pull/2829
- OrigName: false,
-}
-
-func Marshal(out io.Writer, pb proto.Message) error {
- return marshaler.Marshal(out, pb)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go
deleted file mode 100644
index 23830b97133..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- "strconv"
-
- jsoniter "github.com/json-iterator/go"
-)
-
-// ReadInt32 unmarshalls JSON data into an int32. Accepts both numbers and strings decimal.
-// See https://developers.google.com/protocol-buffers/docs/proto3#json.
-func ReadInt32(iter *jsoniter.Iterator) int32 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadInt32()
- case jsoniter.StringValue:
- val, err := strconv.ParseInt(iter.ReadString(), 10, 32)
- if err != nil {
- iter.ReportError("ReadInt32", err.Error())
- return 0
- }
- return int32(val)
- default:
- iter.ReportError("ReadInt32", "unsupported value type")
- return 0
- }
-}
-
-// ReadUint32 unmarshalls JSON data into an uint32. Accepts both numbers and strings decimal.
-// See https://developers.google.com/protocol-buffers/docs/proto3#json.
-func ReadUint32(iter *jsoniter.Iterator) uint32 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadUint32()
- case jsoniter.StringValue:
- val, err := strconv.ParseUint(iter.ReadString(), 10, 32)
- if err != nil {
- iter.ReportError("ReadUint32", err.Error())
- return 0
- }
- return uint32(val)
- default:
- iter.ReportError("ReadUint32", "unsupported value type")
- return 0
- }
-}
-
-// ReadInt64 unmarshalls JSON data into an int64. Accepts both numbers and strings decimal.
-// See https://developers.google.com/protocol-buffers/docs/proto3#json.
-func ReadInt64(iter *jsoniter.Iterator) int64 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadInt64()
- case jsoniter.StringValue:
- val, err := strconv.ParseInt(iter.ReadString(), 10, 64)
- if err != nil {
- iter.ReportError("ReadInt64", err.Error())
- return 0
- }
- return val
- default:
- iter.ReportError("ReadInt64", "unsupported value type")
- return 0
- }
-}
-
-// ReadUint64 unmarshalls JSON data into an uint64. Accepts both numbers and strings decimal.
-// See https://developers.google.com/protocol-buffers/docs/proto3#json.
-func ReadUint64(iter *jsoniter.Iterator) uint64 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadUint64()
- case jsoniter.StringValue:
- val, err := strconv.ParseUint(iter.ReadString(), 10, 64)
- if err != nil {
- iter.ReportError("ReadUint64", err.Error())
- return 0
- }
- return val
- default:
- iter.ReportError("ReadUint64", "unsupported value type")
- return 0
- }
-}
-
-func ReadFloat64(iter *jsoniter.Iterator) float64 {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- return iter.ReadFloat64()
- case jsoniter.StringValue:
- val, err := strconv.ParseFloat(iter.ReadString(), 64)
- if err != nil {
- iter.ReportError("ReadUint64", err.Error())
- return 0
- }
- return val
- default:
- iter.ReportError("ReadUint64", "unsupported value type")
- return 0
- }
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go
deleted file mode 100644
index 302033bc441..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- jsoniter "github.com/json-iterator/go"
-
- otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
-)
-
-func ReadResource(iter *jsoniter.Iterator, resource *otlpresource.Resource) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- resource.Attributes = append(resource.Attributes, ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- resource.DroppedAttributesCount = ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go
deleted file mode 100644
index 40ad41b15bf..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package json // import "go.opentelemetry.io/collector/pdata/internal/json"
-
-import (
- jsoniter "github.com/json-iterator/go"
-
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-func ReadScope(iter *jsoniter.Iterator, scope *otlpcommon.InstrumentationScope) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "name":
- scope.Name = iter.ReadString()
- case "version":
- scope.Version = iter.ReadString()
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- scope.Attributes = append(scope.Attributes, ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- scope.DroppedAttributesCount = ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/stream.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/stream.go
new file mode 100644
index 00000000000..0f1b7538908
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/stream.go
@@ -0,0 +1,104 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package json // import "go.opentelemetry.io/collector/pdata/internal/json"
+
+import (
+ "encoding/base64"
+ "errors"
+ "io"
+ "math"
+ "strconv"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+func BorrowStream(writer io.Writer) *Stream {
+ return &Stream{
+ Stream: jsoniter.ConfigFastest.BorrowStream(writer),
+ wmTracker: make([]bool, 32),
+ }
+}
+
+func ReturnStream(s *Stream) {
+ jsoniter.ConfigFastest.ReturnStream(s.Stream)
+}
+
+// Stream avoids the need to explicitly call the `Stream.WriteMore` method while marshaling objects by
+// checking if a field was previously written inside the current object and automatically appending a ","
+// if so before writing the next field.
+type Stream struct {
+ *jsoniter.Stream
+ // wmTracker acts like a stack which pushes a new value when an object is started and removes the
+ // top when it is ended. The value added for every object tracks if there is any written field
+ // already for that object, and if it is then automatically add a "," before any new field.
+ wmTracker []bool
+}
+
+func (ots *Stream) WriteObjectStart() {
+ ots.Stream.WriteObjectStart()
+ ots.wmTracker = append(ots.wmTracker, false)
+}
+
+func (ots *Stream) WriteObjectField(field string) {
+ if ots.wmTracker[len(ots.wmTracker)-1] {
+ ots.WriteMore()
+ }
+
+ ots.Stream.WriteObjectField(field)
+ ots.wmTracker[len(ots.wmTracker)-1] = true
+}
+
+func (ots *Stream) WriteObjectEnd() {
+ ots.Stream.WriteObjectEnd()
+ ots.wmTracker = ots.wmTracker[:len(ots.wmTracker)-1]
+}
+
+// WriteInt64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
+func (ots *Stream) WriteInt64(val int64) {
+ ots.WriteString(strconv.FormatInt(val, 10))
+}
+
+// WriteUint64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
+func (ots *Stream) WriteUint64(val uint64) {
+ ots.WriteString(strconv.FormatUint(val, 10))
+}
+
+// WriteBytes writes the values as a base64 encoded string. This is per the protobuf encoding rules for bytes.
+func (ots *Stream) WriteBytes(val []byte) {
+ if len(val) == 0 {
+ ots.WriteString("")
+ return
+ }
+
+ ots.WriteString(base64.StdEncoding.EncodeToString(val))
+}
+
+// WriteFloat64 writes the JSON value that will be a number or one of the special string
+// values "NaN", "Infinity", and "-Infinity". Either numbers or strings are accepted.
+// Empty strings are invalid. Exponent notation is also accepted.
+// See https://protobuf.dev/programming-guides/json/.
+func (ots *Stream) WriteFloat64(val float64) {
+ if math.IsNaN(val) {
+ ots.WriteString("NaN")
+ return
+ }
+ if math.IsInf(val, 1) {
+ ots.WriteString("Infinity")
+ return
+ }
+ if math.IsInf(val, -1) {
+ ots.WriteString("-Infinity")
+ return
+ }
+
+ ots.Stream.WriteFloat64(val)
+}
+
+func (ots *Stream) ReportError(err error) {
+ ots.Stream.Error = errors.Join(ots.Stream.Error, err)
+}
+
+func (ots *Stream) Error() error {
+ return ots.Stream.Error
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go
new file mode 100644
index 00000000000..4f3bfdb6ccc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/encoding.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
+
+import (
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+)
+
+var (
+ defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 512 << 10, // 512KB
+ 1 << 20, // 1MB
+ 4 << 20, // 4MB
+ 16 << 20, // 16MB
+ }
+ otelBufferPool = mem.NewTieredBufferPool(defaultBufferPoolSizes...)
+)
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with mem.NewTieredBufferPool that uses a set of default sizes optimized for
+// expected telemetry workflows.
+func DefaultBufferPool() mem.BufferPool {
+ return otelBufferPool
+}
+
+// Name is the name registered for the proto compressor.
+const Name = "proto"
+
+func init() {
+ encoding.RegisterCodecV2(&codecV2{delegate: encoding.GetCodecV2(Name)})
+}
+
+// codecV2 is a custom proto encoding that uses a different tier schema for the TieredBufferPool as well
+// as it call into the custom marshal/unmarshal logic that works with memory pooling.
+// If not an otlp payload fallback on the default grpc/proto encoding.
+type codecV2 struct {
+ delegate encoding.CodecV2
+}
+
+type otelEncoder interface {
+ SizeProto() int
+ MarshalProto([]byte) int
+ UnmarshalProto([]byte) error
+}
+
+func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) {
+ if m, ok := v.(otelEncoder); ok {
+ size := m.SizeProto()
+ buf := otelBufferPool.Get(size)
+ n := m.MarshalProto((*buf)[:size])
+ *buf = (*buf)[:n]
+ return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
+ }
+
+ return c.delegate.Marshal(v)
+}
+
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ if m, ok := v.(otelEncoder); ok {
+ // TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
+ buf := data.MaterializeToBuffer(otelBufferPool)
+ defer buf.Free()
+ return m.UnmarshalProto(buf.ReadOnlyData())
+ }
+
+ return c.delegate.Unmarshal(data, v)
+}
+
+func (c *codecV2) Name() string {
+ return Name
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go
new file mode 100644
index 00000000000..ef26e365485
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/logs_service.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// LogsServiceClient is the client API for LogsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LogsServiceClient interface {
+ Export(context.Context, *internal.ExportLogsServiceRequest, ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error)
+}
+
+type logsServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient {
+ return &logsServiceClient{cc}
+}
+
+func (c *logsServiceClient) Export(ctx context.Context, in *internal.ExportLogsServiceRequest, opts ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error) {
+ out := new(internal.ExportLogsServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// LogsServiceServer is the server API for LogsService service.
+type LogsServiceServer interface {
+ Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error)
+}
+
+// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedLogsServiceServer struct{}
+
+func (*UnimplementedLogsServiceServer) Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) {
+ s.RegisterService(&logsServiceServiceDesc, srv)
+}
+
+// Context cannot be the first parameter of the function because gRPC definition.
+//
+//nolint:revive
+func logsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
+ in := new(internal.ExportLogsServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LogsServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
+ }
+ handler := func(ctx context.Context, req any) (any, error) {
+ return srv.(LogsServiceServer).Export(ctx, req.(*internal.ExportLogsServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var logsServiceServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
+ HandlerType: (*LogsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: logsServiceExportHandler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go
new file mode 100644
index 00000000000..daf4f5cc8a9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/metrics_service.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+ Export(context.Context, *internal.ExportMetricsServiceRequest, ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error)
+}
+
+type metricsServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
+ return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Export(ctx context.Context, in *internal.ExportMetricsServiceRequest, opts ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error) {
+ out := new(internal.ExportMetricsServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+type MetricsServiceServer interface {
+ Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error)
+}
+
+// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedMetricsServiceServer struct{}
+
+func (*UnimplementedMetricsServiceServer) Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
+ s.RegisterService(&metricsServiceServiceDesc, srv)
+}
+
+// Context cannot be the first parameter of the function because gRPC definition.
+//
+//nolint:revive
+func metricsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
+ in := new(internal.ExportMetricsServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricsServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
+ }
+ handler := func(ctx context.Context, req any) (any, error) {
+ return srv.(MetricsServiceServer).Export(ctx, req.(*internal.ExportMetricsServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var metricsServiceServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
+ HandlerType: (*MetricsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: metricsServiceExportHandler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go
new file mode 100644
index 00000000000..db846146ff8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/profiles_service.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ProfilesServiceClient is the client API for ProfilesService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ProfilesServiceClient interface {
+ Export(context.Context, *internal.ExportProfilesServiceRequest, ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error)
+}
+
+type profilesServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient {
+ return &profilesServiceClient{cc}
+}
+
+func (c *profilesServiceClient) Export(ctx context.Context, in *internal.ExportProfilesServiceRequest, opts ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error) {
+ out := new(internal.ExportProfilesServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ProfilesServiceServer is the server API for ProfilesService service.
+type ProfilesServiceServer interface {
+ Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error)
+}
+
+// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedProfilesServiceServer struct{}
+
+func (*UnimplementedProfilesServiceServer) Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) {
+ s.RegisterService(&profilesServiceServiceDesc, srv)
+}
+
+// Context cannot be the first parameter of the function because gRPC definition.
+//
+//nolint:revive
+func profilesServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
+ in := new(internal.ExportProfilesServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ProfilesServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export",
+ }
+ handler := func(ctx context.Context, req any) (any, error) {
+ return srv.(ProfilesServiceServer).Export(ctx, req.(*internal.ExportProfilesServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var profilesServiceServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService",
+ HandlerType: (*ProfilesServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: profilesServiceExportHandler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go
new file mode 100644
index 00000000000..943f61792bd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otelgrpc/trace_service.go
@@ -0,0 +1,88 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ Export(context.Context, *internal.ExportTraceServiceRequest, ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error)
+}
+
+type traceServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, in *internal.ExportTraceServiceRequest, opts ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error) {
+ out := new(internal.ExportTraceServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+type TraceServiceServer interface {
+ Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error)
+}
+
+// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct{}
+
+func (*UnimplementedTraceServiceServer) Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+
+func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
+ s.RegisterService(&traceServiceServiceDesc, srv)
+}
+
+// Context cannot be the first parameter of the function because gRPC definition.
+//
+//nolint:revive
+func traceServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
+ in := new(internal.ExportTraceServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
+ }
+ handler := func(ctx context.Context, req any) (any, error) {
+ return srv.(TraceServiceServer).Export(ctx, req.(*internal.ExportTraceServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var traceServiceServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: traceServiceExportHandler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
index c0328a5b41d..aad78de4e22 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol.
// Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateLogs(rls []*otlplogs.ResourceLogs) {
+func MigrateLogs(rls []*internal.ResourceLogs) {
for _, rl := range rls {
if len(rl.ScopeLogs) == 0 {
rl.ScopeLogs = rl.DeprecatedScopeLogs
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
index 9a7da14868c..fb1776c4ffd 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol.
// Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateMetrics(rms []*otlpmetrics.ResourceMetrics) {
+func MigrateMetrics(rms []*internal.ResourceMetrics) {
for _, rm := range rms {
if len(rm.ScopeMetrics) == 0 {
rm.ScopeMetrics = rm.DeprecatedScopeMetrics
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
index 59c23cc672b..5144e7c4d2f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go
@@ -4,9 +4,9 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol.
// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {}
+func MigrateProfiles(_ []*internal.ResourceProfiles) {}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
index 627881fc3dc..84f5deadef0 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go
@@ -4,12 +4,12 @@
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol.
// Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
-func MigrateTraces(rss []*otlptrace.ResourceSpans) {
+func MigrateTraces(rss []*internal.ResourceSpans) {
for _, rs := range rss {
if len(rs.ScopeSpans) == 0 {
rs.ScopeSpans = rs.DeprecatedScopeSpans
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go
new file mode 100644
index 00000000000..5a038fe0854
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/profileid.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const profileIDSize = 16
+
+var errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
+
+// ProfileID is a custom data type that is used for all profile_id fields in OTLP
+// Protobuf messages.
+type ProfileID [profileIDSize]byte
+
+func DeleteProfileID(*ProfileID, bool) {}
+
+func CopyProfileID(dest, src *ProfileID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at leas one non-zero byte.
+func (pid ProfileID) IsEmpty() bool {
+ return pid == [profileIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (pid ProfileID) SizeProto() int {
+ if pid.IsEmpty() {
+ return 0
+ }
+
+ return profileIDSize
+}
+
+// MarshalProto converts profile ID into a binary representation. Called by Protobuf serialization.
+func (pid ProfileID) MarshalProto(buf []byte) int {
+ if pid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-profileIDSize:], pid[:])
+}
+
+// UnmarshalProto inflates this profile ID from binary representation. Called by Protobuf serialization.
+func (pid *ProfileID) UnmarshalProto(buf []byte) error {
+ if len(buf) == 0 {
+ *pid = [profileIDSize]byte{}
+ return nil
+ }
+
+ if len(buf) != profileIDSize {
+ return errUnmarshalProfileID
+ }
+
+ copy(pid[:], buf)
+ return nil
+}
+
+// MarshalJSON converts ProfileID into a hex string.
+//
+//nolint:govet
+func (pid ProfileID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(pid[:]))
+}
+
+// UnmarshalJSON decodes ProfileID from hex string.
+//
+//nolint:govet
+func (pid *ProfileID) UnmarshalJSON(iter *json.Iterator) {
+ *pid = [profileIDSize]byte{}
+ unmarshalJSON(pid[:], iter)
+}
+
+func GenTestProfileID() *ProfileID {
+ pid := ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
+ return &pid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go
new file mode 100644
index 00000000000..1655533a2ce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/marshal.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
+
+// EncodeVarint encodes the variant at the end of the buffer.
+func EncodeVarint(buf []byte, offset int, v uint64) int {
+ offset -= Sov(v)
+ base := offset
+ for v >= 1<<7 {
+ //nolint:gosec
+ buf[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ buf[offset] = uint8(v)
+ return base
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go
new file mode 100644
index 00000000000..6f48443bf9d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/size.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
+
+import (
+ "math/bits"
+)
+
+func Sov(x uint64) (n int) {
+ return (bits.Len64(x|1) + 6) / 7
+}
+
+func Soz(x uint64) (n int) {
+ //nolint:gosec
+ return Sov((x << 1) ^ uint64((int64(x) >> 63)))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go
new file mode 100644
index 00000000000..6ecf5ca3a3a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/proto/unmarshal.go
@@ -0,0 +1,148 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// WireType represents the proto wire type.
+type WireType int8
+
+const (
+ WireTypeVarint WireType = 0
+ WireTypeI64 WireType = 1
+ WireTypeLen WireType = 2
+ WireTypeStartGroup WireType = 3
+ WireTypeEndGroup WireType = 4
+ WireTypeI32 WireType = 5
+)
+
+var (
+ ErrInvalidLength = errors.New("proto: negative length found during unmarshaling")
+ ErrIntOverflow = errors.New("proto: integer overflow")
+ ErrUnexpectedEndOfGroup = errors.New("proto: unexpected end of group")
+)
+
+// ConsumeUnknown parses buf starting at pos as a wireType field, reporting the new position.
+func ConsumeUnknown(buf []byte, pos int, wireType WireType) (int, error) {
+ var err error
+ l := len(buf)
+ depth := 0
+ for pos < l {
+ switch wireType {
+ case WireTypeVarint:
+ _, pos, err = ConsumeVarint(buf, pos)
+ return pos, err
+ case WireTypeI64:
+ _, pos, err = ConsumeI64(buf, pos)
+ return pos, err
+ case WireTypeLen:
+ _, pos, err = ConsumeLen(buf, pos)
+ return pos, err
+ case WireTypeStartGroup:
+ depth++
+ case WireTypeEndGroup:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroup
+ }
+ depth--
+ case WireTypeI32:
+ _, pos, err = ConsumeI32(buf, pos)
+ return pos, err
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+
+ // Only when parsing a group can be here, if done return otherwise parse more tags.
+ if depth == 0 {
+ return pos, nil
+ }
+
+ // If in a group parsing, move to the next tag.
+ _, wireType, pos, err = ConsumeTag(buf, pos)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+// ConsumeI64 parses buf starting at pos as a WireTypeI64 field, reporting the value and the new position.
+func ConsumeI64(buf []byte, pos int) (uint64, int, error) {
+ pos += 8
+ if pos < 0 || pos > len(buf) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ return binary.LittleEndian.Uint64(buf[pos-8:]), pos, nil
+}
+
+// ConsumeLen parses buf starting at pos as a WireTypeLen field, reporting the len and the new position.
+func ConsumeLen(buf []byte, pos int) (int, int, error) {
+ var num uint64
+ var err error
+ num, pos, err = ConsumeVarint(buf, pos)
+ if err != nil {
+ return 0, 0, err
+ }
+ //nolint:gosec
+ length := int(num)
+ if length < 0 {
+ return 0, 0, ErrInvalidLength
+ }
+ pos += length
+ if pos < 0 || pos > len(buf) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ return length, pos, nil
+}
+
+// ConsumeI32 parses buf starting at pos as a WireTypeI32 field, reporting the value and the new position.
+func ConsumeI32(buf []byte, pos int) (uint32, int, error) {
+ pos += 4
+ if pos < 0 || pos > len(buf) {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ return binary.LittleEndian.Uint32(buf[pos-4:]), pos, nil
+}
+
+// ConsumeTag parses buf starting at pos as a varint-encoded tag, reporting the new position.
+func ConsumeTag(buf []byte, pos int) (int32, WireType, int, error) {
+ tag, pos, err := ConsumeVarint(buf, pos)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ //nolint:gosec
+ fieldNum := int32(tag >> 3)
+ //nolint:gosec
+ wireType := int8(tag & 0x7)
+ if fieldNum <= 0 {
+ return 0, 0, 0, fmt.Errorf("proto: Link: illegal field=%d (tag=%d, pos=%d)", fieldNum, tag, pos)
+ }
+ return fieldNum, WireType(wireType), pos, nil
+}
+
+// ConsumeVarint parses buf starting at pos as a varint-encoded uint64, reporting the new position.
+func ConsumeVarint(buf []byte, pos int) (uint64, int, error) {
+ l := len(buf)
+ var num uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, 0, ErrIntOverflow
+ }
+ if pos >= l {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ b := buf[pos]
+ pos++
+ num |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ return num, pos, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go
new file mode 100644
index 00000000000..9ec0f465fb0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/spanid.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const spanIDSize = 8
+
+var errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
+
+// SpanID is a custom data type that is used for all span_id fields in OTLP
+// Protobuf messages.
+type SpanID [spanIDSize]byte
+
+func DeleteSpanID(*SpanID, bool) {}
+
+func CopySpanID(dest, src *SpanID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (sid SpanID) SizeProto() int {
+ if sid.IsEmpty() {
+ return 0
+ }
+ return spanIDSize
+}
+
+// MarshalProto converts span ID into a binary representation. Called by Protobuf serialization.
+func (sid SpanID) MarshalProto(buf []byte) int {
+ if sid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-spanIDSize:], sid[:])
+}
+
+// UnmarshalProto inflates this span ID from binary representation. Called by Protobuf serialization.
+func (sid *SpanID) UnmarshalProto(data []byte) error {
+ if len(data) == 0 {
+ *sid = [spanIDSize]byte{}
+ return nil
+ }
+
+ if len(data) != spanIDSize {
+ return errUnmarshalSpanID
+ }
+
+ copy(sid[:], data)
+ return nil
+}
+
+// MarshalJSON converts SpanID into a hex string.
+//
+//nolint:govet
+func (sid SpanID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(sid[:]))
+}
+
+// UnmarshalJSON decodes SpanID from hex string.
+//
+//nolint:govet
+func (sid *SpanID) UnmarshalJSON(iter *json.Iterator) {
+ *sid = [spanIDSize]byte{}
+ unmarshalJSON(sid[:], iter)
+}
+
+func GenTestSpanID() *SpanID {
+ sid := SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
+ return &sid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/state.go b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
index f10de5eadf1..46f10722dc7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go
@@ -2,21 +2,89 @@
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
+import (
+ "sync/atomic"
+
+ "go.opentelemetry.io/collector/featuregate"
+)
+
+var _ = featuregate.GlobalRegistry().MustRegister(
+ "pdata.useCustomProtoEncoding",
+ featuregate.StageStable,
+ featuregate.WithRegisterDescription("When enabled, enable custom proto encoding. This is required step to enable featuregate pdata.useProtoPooling."),
+ featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
+ featuregate.WithRegisterFromVersion("v0.133.0"),
+ featuregate.WithRegisterToVersion("v0.137.0"),
+)
+
+var UseProtoPooling = featuregate.GlobalRegistry().MustRegister(
+ "pdata.useProtoPooling",
+ featuregate.StageAlpha,
+ featuregate.WithRegisterDescription("When enabled, enable using local memory pools for underlying data that the pdata messages are pushed to."),
+ featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
+ featuregate.WithRegisterFromVersion("v0.133.0"),
+)
// State defines an ownership state of pmetric.Metrics, plog.Logs or ptrace.Traces.
-type State int32
+type State struct {
+ refs atomic.Int32
+ state uint32
+}
const (
- // StateMutable indicates that the data is exclusive to the current consumer.
- StateMutable State = iota
-
- // StateReadOnly indicates that the data is shared with other consumers.
- StateReadOnly
+ defaultState uint32 = 0
+ stateReadOnlyBit = uint32(1 << 0)
+ statePipelineOwnedBit = uint32(1 << 1)
)
+func NewState() *State {
+ st := &State{
+ state: defaultState,
+ }
+ st.refs.Store(1)
+ return st
+}
+
+func (st *State) MarkReadOnly() {
+ st.state |= stateReadOnlyBit
+}
+
+func (st *State) IsReadOnly() bool {
+ return st.state&stateReadOnlyBit != 0
+}
+
// AssertMutable panics if the state is not StateMutable.
-func (state *State) AssertMutable() {
- if *state != StateMutable {
+func (st *State) AssertMutable() {
+ if st.state&stateReadOnlyBit != 0 {
panic("invalid access to shared data")
}
}
+
+// MarkPipelineOwned marks the data as owned by the pipeline, returns true if the data were
+// previously not owned by the pipeline, otherwise false.
+func (st *State) MarkPipelineOwned() bool {
+ if st.state&statePipelineOwnedBit != 0 {
+ return false
+ }
+ st.state |= statePipelineOwnedBit
+ return true
+}
+
+// Ref add one to the count of active references.
+func (st *State) Ref() {
+ st.refs.Add(1)
+}
+
+// Unref returns true if reference count got to 0 which means no more active references,
+// otherwise it returns false.
+func (st *State) Unref() bool {
+ refs := st.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return false
+ case refs == 0:
+ return true
+ default:
+ panic("Cannot unref freed data")
+ }
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go
new file mode 100644
index 00000000000..fe54123de6d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/traceid.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/collector/pdata/internal"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/internal/json"
+)
+
+const traceIDSize = 16
+
+var errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
+
+// TraceID is a custom data type that is used for all trace_id fields in OTLP
+// Protobuf messages.
+type TraceID [traceIDSize]byte
+
+func DeleteTraceID(*TraceID, bool) {}
+
+func CopyTraceID(dest, src *TraceID) {
+ *dest = *src
+}
+
+// IsEmpty returns true if id contains at leas one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// SizeProto returns the size of the data to serialize in proto format.
+func (tid TraceID) SizeProto() int {
+ if tid.IsEmpty() {
+ return 0
+ }
+
+ return traceIDSize
+}
+
+// MarshalProto converts trace ID into a binary representation. Called by Protobuf serialization.
+func (tid TraceID) MarshalProto(buf []byte) int {
+ if tid.IsEmpty() {
+ return 0
+ }
+
+ return copy(buf[len(buf)-traceIDSize:], tid[:])
+}
+
+// UnmarshalProto inflates this trace ID from binary representation. Called by Protobuf serialization.
+func (tid *TraceID) UnmarshalProto(buf []byte) error {
+ if len(buf) == 0 {
+ *tid = [traceIDSize]byte{}
+ return nil
+ }
+
+ if len(buf) != traceIDSize {
+ return errUnmarshalTraceID
+ }
+
+ copy(tid[:], buf)
+ return nil
+}
+
+// MarshalJSON converts TraceID into a hex string.
+//
+//nolint:govet
+func (tid TraceID) MarshalJSON(dest *json.Stream) {
+ dest.WriteString(hex.EncodeToString(tid[:]))
+}
+
+// UnmarshalJSON decodes TraceID from hex string.
+//
+//nolint:govet
+func (tid *TraceID) UnmarshalJSON(iter *json.Iterator) {
+ *tid = [profileIDSize]byte{}
+ unmarshalJSON(tid[:], iter)
+}
+
+func GenTestTraceID() *TraceID {
+ tid := TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
+ return &tid
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
index 6b6c076cca6..b47b508336c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go
@@ -3,44 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
-type Logs struct {
- orig *otlpcollectorlog.ExportLogsServiceRequest
- state *State
-}
-
-func GetOrigLogs(ms Logs) *otlpcollectorlog.ExportLogsServiceRequest {
- return ms.orig
-}
-
-func GetLogsState(ms Logs) *State {
- return ms.state
-}
-
-func SetLogsState(ms Logs, state State) {
- *ms.state = state
-}
-
-func NewLogs(orig *otlpcollectorlog.ExportLogsServiceRequest, state *State) Logs {
- return Logs{orig: orig, state: state}
-}
-
// LogsToProto internal helper to convert Logs to protobuf representation.
-func LogsToProto(l Logs) otlplogs.LogsData {
- return otlplogs.LogsData{
+func LogsToProto(l LogsWrapper) LogsData {
+ return LogsData{
ResourceLogs: l.orig.ResourceLogs,
}
}
// LogsFromProto internal helper to convert protobuf representation to Logs.
// This function set exclusive state assuming that it's called only once per Logs.
-func LogsFromProto(orig otlplogs.LogsData) Logs {
- state := StateMutable
- return NewLogs(&otlpcollectorlog.ExportLogsServiceRequest{
+func LogsFromProto(orig LogsData) LogsWrapper {
+ return NewLogsWrapper(&ExportLogsServiceRequest{
ResourceLogs: orig.ResourceLogs,
- }, &state)
+ }, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
index 131ed01fcdc..c5c6ca7f6da 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go
@@ -3,48 +3,24 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Map struct {
- orig *[]otlpcommon.KeyValue
+type MapWrapper struct {
+ orig *[]KeyValue
state *State
}
-func GetOrigMap(ms Map) *[]otlpcommon.KeyValue {
+func GetMapOrig(ms MapWrapper) *[]KeyValue {
return ms.orig
}
-func GetMapState(ms Map) *State {
+func GetMapState(ms MapWrapper) *State {
return ms.state
}
-func NewMap(orig *[]otlpcommon.KeyValue, state *State) Map {
- return Map{orig: orig, state: state}
-}
-
-func CopyOrigMap(dest, src []otlpcommon.KeyValue) []otlpcommon.KeyValue {
- if cap(dest) < len(src) {
- dest = make([]otlpcommon.KeyValue, len(src))
- }
- dest = dest[:len(src)]
- for i := 0; i < len(src); i++ {
- dest[i].Key = src[i].Key
- CopyOrigValue(&dest[i].Value, &src[i].Value)
- }
- return dest
-}
-
-func GenerateTestMap() Map {
- var orig []otlpcommon.KeyValue
- state := StateMutable
- ms := NewMap(&orig, &state)
- FillTestMap(ms)
- return ms
+func NewMapWrapper(orig *[]KeyValue, state *State) MapWrapper {
+ return MapWrapper{orig: orig, state: state}
}
-func FillTestMap(dest Map) {
- *dest.orig = nil
- *dest.orig = append(*dest.orig, otlpcommon.KeyValue{Key: "k", Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "v"}}})
+func GenTestMapWrapper() MapWrapper {
+ orig := GenTestKeyValueSlice()
+ return NewMapWrapper(&orig, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
index 85be497ea5c..3cb790395fc 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go
@@ -3,44 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
-type Metrics struct {
- orig *otlpcollectormetrics.ExportMetricsServiceRequest
- state *State
-}
-
-func GetOrigMetrics(ms Metrics) *otlpcollectormetrics.ExportMetricsServiceRequest {
- return ms.orig
-}
-
-func GetMetricsState(ms Metrics) *State {
- return ms.state
-}
-
-func SetMetricsState(ms Metrics, state State) {
- *ms.state = state
-}
-
-func NewMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *State) Metrics {
- return Metrics{orig: orig, state: state}
-}
-
// MetricsToProto internal helper to convert Metrics to protobuf representation.
-func MetricsToProto(l Metrics) otlpmetrics.MetricsData {
- return otlpmetrics.MetricsData{
+func MetricsToProto(l MetricsWrapper) MetricsData {
+ return MetricsData{
ResourceMetrics: l.orig.ResourceMetrics,
}
}
// MetricsFromProto internal helper to convert protobuf representation to Metrics.
// This function set exclusive state assuming that it's called only once per Metrics.
-func MetricsFromProto(orig otlpmetrics.MetricsData) Metrics {
- state := StateMutable
- return NewMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{
+func MetricsFromProto(orig MetricsData) MetricsWrapper {
+ return NewMetricsWrapper(&ExportMetricsServiceRequest{
ResourceMetrics: orig.ResourceMetrics,
- }, &state)
+ }, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
index 5b5f0cc06ef..1a56d922975 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go
@@ -3,35 +3,9 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
- otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
-)
-
-type Profiles struct {
- orig *otlpcollectorprofile.ExportProfilesServiceRequest
- state *State
-}
-
-func GetOrigProfiles(ms Profiles) *otlpcollectorprofile.ExportProfilesServiceRequest {
- return ms.orig
-}
-
-func GetProfilesState(ms Profiles) *State {
- return ms.state
-}
-
-func SetProfilesState(ms Profiles, state State) {
- *ms.state = state
-}
-
-func NewProfiles(orig *otlpcollectorprofile.ExportProfilesServiceRequest, state *State) Profiles {
- return Profiles{orig: orig, state: state}
-}
-
// ProfilesToProto internal helper to convert Profiles to protobuf representation.
-func ProfilesToProto(l Profiles) otlpprofile.ProfilesData {
- return otlpprofile.ProfilesData{
+func ProfilesToProto(l ProfilesWrapper) ProfilesData {
+ return ProfilesData{
ResourceProfiles: l.orig.ResourceProfiles,
Dictionary: l.orig.Dictionary,
}
@@ -39,10 +13,9 @@ func ProfilesToProto(l Profiles) otlpprofile.ProfilesData {
// ProfilesFromProto internal helper to convert protobuf representation to Profiles.
// This function set exclusive state assuming that it's called only once per Profiles.
-func ProfilesFromProto(orig otlpprofile.ProfilesData) Profiles {
- state := StateMutable
- return NewProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{
+func ProfilesFromProto(orig ProfilesData) ProfilesWrapper {
+ return NewProfilesWrapper(&ExportProfilesServiceRequest{
ResourceProfiles: orig.ResourceProfiles,
Dictionary: orig.Dictionary,
- }, &state)
+ }, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go
deleted file mode 100644
index 5105cc6f3ad..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package internal // import "go.opentelemetry.io/collector/pdata/internal"
-
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Slice struct {
- orig *[]otlpcommon.AnyValue
- state *State
-}
-
-func GetOrigSlice(ms Slice) *[]otlpcommon.AnyValue {
- return ms.orig
-}
-
-func GetSliceState(ms Slice) *State {
- return ms.state
-}
-
-func NewSlice(orig *[]otlpcommon.AnyValue, state *State) Slice {
- return Slice{orig: orig, state: state}
-}
-
-func CopyOrigSlice(dest, src []otlpcommon.AnyValue) []otlpcommon.AnyValue {
- if cap(dest) < len(src) {
- dest = make([]otlpcommon.AnyValue, len(src))
- }
- dest = dest[:len(src)]
- for i := 0; i < len(src); i++ {
- CopyOrigValue(&dest[i], &src[i])
- }
- return dest
-}
-
-func GenerateTestSlice() Slice {
- orig := []otlpcommon.AnyValue{}
- state := StateMutable
- tv := NewSlice(&orig, &state)
- FillTestSlice(tv)
- return tv
-}
-
-func FillTestSlice(tv Slice) {
- *tv.orig = make([]otlpcommon.AnyValue, 7)
- for i := 0; i < 7; i++ {
- state := StateMutable
- FillTestValue(NewValue(&(*tv.orig)[i], &state))
- }
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
index 5a4cdadbde0..758a1cb27e0 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go
@@ -3,44 +3,17 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
-type Traces struct {
- orig *otlpcollectortrace.ExportTraceServiceRequest
- state *State
-}
-
-func GetOrigTraces(ms Traces) *otlpcollectortrace.ExportTraceServiceRequest {
- return ms.orig
-}
-
-func GetTracesState(ms Traces) *State {
- return ms.state
-}
-
-func SetTracesState(ms Traces, state State) {
- *ms.state = state
-}
-
-func NewTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *State) Traces {
- return Traces{orig: orig, state: state}
-}
-
// TracesToProto internal helper to convert Traces to protobuf representation.
-func TracesToProto(l Traces) otlptrace.TracesData {
- return otlptrace.TracesData{
+func TracesToProto(l TracesWrapper) TracesData {
+ return TracesData{
ResourceSpans: l.orig.ResourceSpans,
}
}
// TracesFromProto internal helper to convert protobuf representation to Traces.
// This function set exclusive state assuming that it's called only once per Traces.
-func TracesFromProto(orig otlptrace.TracesData) Traces {
- state := StateMutable
- return NewTraces(&otlpcollectortrace.ExportTraceServiceRequest{
+func TracesFromProto(orig TracesData) TracesWrapper {
+ return NewTracesWrapper(&ExportTraceServiceRequest{
ResourceSpans: orig.ResourceSpans,
- }, &state)
+ }, NewState())
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
index d1d5c3cf865..56278b728dd 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go
@@ -3,35 +3,29 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-type TraceState struct {
+type TraceStateWrapper struct {
orig *string
state *State
}
-func GetOrigTraceState(ms TraceState) *string {
+func GetTraceStateOrig(ms TraceStateWrapper) *string {
return ms.orig
}
-func GetTraceStateState(ms TraceState) *State {
+func GetTraceStateState(ms TraceStateWrapper) *State {
return ms.state
}
-func NewTraceState(orig *string, state *State) TraceState {
- return TraceState{orig: orig, state: state}
+func NewTraceStateWrapper(orig *string, state *State) TraceStateWrapper {
+ return TraceStateWrapper{orig: orig, state: state}
}
-func CopyOrigTraceState(dest, src *string) {
- *dest = *src
+func GenTestTraceStateWrapper() TraceStateWrapper {
+ return NewTraceStateWrapper(GenTestTraceState(), NewState())
}
-func GenerateTestTraceState() TraceState {
- var orig string
- state := StateMutable
- ms := NewTraceState(&orig, &state)
- FillTestTraceState(ms)
- return ms
-}
-
-func FillTestTraceState(dest TraceState) {
- *dest.orig = "rojo=00f067aa0ba902b7"
+func GenTestTraceState() *string {
+ orig := new(string)
+ *orig = "rojo=00f067aa0ba902b7"
+ return orig
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
index c0e0497d590..c0220c8f010 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go
@@ -3,73 +3,73 @@
package internal // import "go.opentelemetry.io/collector/pdata/internal"
-import (
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
-)
-
-type Value struct {
- orig *otlpcommon.AnyValue
+type ValueWrapper struct {
+ orig *AnyValue
state *State
}
-func GetOrigValue(ms Value) *otlpcommon.AnyValue {
+func GetValueOrig(ms ValueWrapper) *AnyValue {
return ms.orig
}
-func GetValueState(ms Value) *State {
+func GetValueState(ms ValueWrapper) *State {
return ms.state
}
-func NewValue(orig *otlpcommon.AnyValue, state *State) Value {
- return Value{orig: orig, state: state}
+func NewValueWrapper(orig *AnyValue, state *State) ValueWrapper {
+ return ValueWrapper{orig: orig, state: state}
+}
+
+func GenTestValueWrapper() ValueWrapper {
+ orig := GenTestAnyValue()
+ return NewValueWrapper(orig, NewState())
}
-func CopyOrigValue(dest, src *otlpcommon.AnyValue) {
- switch sv := src.Value.(type) {
- case *otlpcommon.AnyValue_KvlistValue:
- dv, ok := dest.Value.(*otlpcommon.AnyValue_KvlistValue)
- if !ok {
- dv = &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}
- dest.Value = dv
- }
- if sv.KvlistValue == nil {
- dv.KvlistValue = nil
- return
- }
- dv.KvlistValue.Values = CopyOrigMap(dv.KvlistValue.Values, sv.KvlistValue.Values)
- case *otlpcommon.AnyValue_ArrayValue:
- dv, ok := dest.Value.(*otlpcommon.AnyValue_ArrayValue)
- if !ok {
- dv = &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}
- dest.Value = dv
- }
- if sv.ArrayValue == nil {
- dv.ArrayValue = nil
- return
- }
- dv.ArrayValue.Values = CopyOrigSlice(dv.ArrayValue.Values, sv.ArrayValue.Values)
- case *otlpcommon.AnyValue_BytesValue:
- bv, ok := dest.Value.(*otlpcommon.AnyValue_BytesValue)
- if !ok {
- bv = &otlpcommon.AnyValue_BytesValue{}
- dest.Value = bv
- }
- bv.BytesValue = make([]byte, len(sv.BytesValue))
- copy(bv.BytesValue, sv.BytesValue)
- default:
- // Primitive immutable type, no need for deep copy.
- dest.Value = sv
+func NewAnyValueStringValue() *AnyValue_StringValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_StringValue{}
}
+ return ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
-func FillTestValue(dest Value) {
- dest.orig.Value = &otlpcommon.AnyValue_StringValue{StringValue: "v"}
+func NewAnyValueIntValue() *AnyValue_IntValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_IntValue{}
+ }
+ return ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
-func GenerateTestValue() Value {
- var orig otlpcommon.AnyValue
- state := StateMutable
- ms := NewValue(&orig, &state)
- FillTestValue(ms)
- return ms
+func NewAnyValueBoolValue() *AnyValue_BoolValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_BoolValue{}
+ }
+ return ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
+}
+
+func NewAnyValueDoubleValue() *AnyValue_DoubleValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_DoubleValue{}
+ }
+ return ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
+}
+
+func NewAnyValueBytesValue() *AnyValue_BytesValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_BytesValue{}
+ }
+ return ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
+}
+
+func NewAnyValueArrayValue() *AnyValue_ArrayValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_ArrayValue{}
+ }
+ return ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
+}
+
+func NewAnyValueKvlistValue() *AnyValue_KvlistValue {
+ if !UseProtoPooling.IsEnabled() {
+ return &AnyValue_KvlistValue{}
+ }
+ return ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
index 9423fa0f1af..073b55d3416 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewByteSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type ByteSlice internal.ByteSlice
+type ByteSlice internal.ByteSliceWrapper
func (ms ByteSlice) getOrig() *[]byte {
- return internal.GetOrigByteSlice(internal.ByteSlice(ms))
+ return internal.GetByteSliceOrig(internal.ByteSliceWrapper(ms))
}
func (ms ByteSlice) getState() *internal.State {
- return internal.GetByteSliceState(internal.ByteSlice(ms))
+ return internal.GetByteSliceState(internal.ByteSliceWrapper(ms))
}
// NewByteSlice creates a new empty ByteSlice.
func NewByteSlice() ByteSlice {
orig := []byte(nil)
- state := internal.StateMutable
- return ByteSlice(internal.NewByteSlice(&orig, &state))
+ return ByteSlice(internal.NewByteSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []byte slice.
func (ms ByteSlice) AsRaw() []byte {
- return internal.CopyOrigByteSlice(nil, *ms.getOrig())
+ return copyByteSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []byte into the slice ByteSlice.
func (ms ByteSlice) FromRaw(val []byte) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigByteSlice(*ms.getOrig(), val)
+ *ms.getOrig() = copyByteSlice(*ms.getOrig(), val)
}
// Len returns length of the []byte slice value.
@@ -128,13 +127,42 @@ func (ms ByteSlice) MoveAndAppendTo(dest ByteSlice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms ByteSlice) RemoveIf(f func(byte) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero byte
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms ByteSlice) CopyTo(dest ByteSlice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigByteSlice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another ByteSlice
func (ms ByteSlice) Equal(val ByteSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyByteSlice(dst, src []byte) []byte {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
index 957460aa43b..dac9ebc4edf 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewFloat64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Float64Slice internal.Float64Slice
+type Float64Slice internal.Float64SliceWrapper
func (ms Float64Slice) getOrig() *[]float64 {
- return internal.GetOrigFloat64Slice(internal.Float64Slice(ms))
+ return internal.GetFloat64SliceOrig(internal.Float64SliceWrapper(ms))
}
func (ms Float64Slice) getState() *internal.State {
- return internal.GetFloat64SliceState(internal.Float64Slice(ms))
+ return internal.GetFloat64SliceState(internal.Float64SliceWrapper(ms))
}
// NewFloat64Slice creates a new empty Float64Slice.
func NewFloat64Slice() Float64Slice {
orig := []float64(nil)
- state := internal.StateMutable
- return Float64Slice(internal.NewFloat64Slice(&orig, &state))
+ return Float64Slice(internal.NewFloat64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []float64 slice.
func (ms Float64Slice) AsRaw() []float64 {
- return internal.CopyOrigFloat64Slice(nil, *ms.getOrig())
+ return copyFloat64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []float64 into the slice Float64Slice.
func (ms Float64Slice) FromRaw(val []float64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigFloat64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyFloat64Slice(*ms.getOrig(), val)
}
// Len returns length of the []float64 slice value.
@@ -128,13 +127,42 @@ func (ms Float64Slice) MoveAndAppendTo(dest Float64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Float64Slice) RemoveIf(f func(float64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero float64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Float64Slice) CopyTo(dest Float64Slice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigFloat64Slice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Float64Slice
func (ms Float64Slice) Equal(val Float64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyFloat64Slice(dst, src []float64) []float64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
index d6e27851ce5..6eeeab6e3cb 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// InstrumentationScope is a message representing the instrumentation scope information.
@@ -18,10 +17,10 @@ import (
//
// Must use NewInstrumentationScope function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type InstrumentationScope internal.InstrumentationScope
+type InstrumentationScope internal.InstrumentationScopeWrapper
-func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *internal.State) InstrumentationScope {
- return InstrumentationScope(internal.NewInstrumentationScope(orig, state))
+func newInstrumentationScope(orig *internal.InstrumentationScope, state *internal.State) InstrumentationScope {
+ return InstrumentationScope(internal.NewInstrumentationScopeWrapper(orig, state))
}
// NewInstrumentationScope creates a new empty InstrumentationScope.
@@ -29,8 +28,7 @@ func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *inter
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewInstrumentationScope() InstrumentationScope {
- state := internal.StateMutable
- return newInstrumentationScope(&otlpcommon.InstrumentationScope{}, &state)
+ return newInstrumentationScope(internal.NewInstrumentationScope(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -42,16 +40,8 @@ func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) {
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = *ms.getOrig()
- *ms.getOrig() = otlpcommon.InstrumentationScope{}
-}
-
-func (ms InstrumentationScope) getOrig() *otlpcommon.InstrumentationScope {
- return internal.GetOrigInstrumentationScope(internal.InstrumentationScope(ms))
-}
-
-func (ms InstrumentationScope) getState() *internal.State {
- return internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms))
+ internal.DeleteInstrumentationScope(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Name returns the name associated with this InstrumentationScope.
@@ -78,7 +68,7 @@ func (ms InstrumentationScope) SetVersion(v string) {
// Attributes returns the Attributes associated with this InstrumentationScope.
func (ms InstrumentationScope) Attributes() Map {
- return Map(internal.NewMap(&ms.getOrig().Attributes, internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms))))
+ return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope.
@@ -95,5 +85,13 @@ func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) {
dest.getState().AssertMutable()
- internal.CopyOrigInstrumentationScope(dest.getOrig(), ms.getOrig())
+ internal.CopyInstrumentationScope(dest.getOrig(), ms.getOrig())
+}
+
+func (ms InstrumentationScope) getOrig() *internal.InstrumentationScope {
+ return internal.GetInstrumentationScopeOrig(internal.InstrumentationScopeWrapper(ms))
+}
+
+func (ms InstrumentationScope) getState() *internal.State {
+ return internal.GetInstrumentationScopeState(internal.InstrumentationScopeWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
index 23c07b9e33a..d2bb746df53 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewInt32Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Int32Slice internal.Int32Slice
+type Int32Slice internal.Int32SliceWrapper
func (ms Int32Slice) getOrig() *[]int32 {
- return internal.GetOrigInt32Slice(internal.Int32Slice(ms))
+ return internal.GetInt32SliceOrig(internal.Int32SliceWrapper(ms))
}
func (ms Int32Slice) getState() *internal.State {
- return internal.GetInt32SliceState(internal.Int32Slice(ms))
+ return internal.GetInt32SliceState(internal.Int32SliceWrapper(ms))
}
// NewInt32Slice creates a new empty Int32Slice.
func NewInt32Slice() Int32Slice {
orig := []int32(nil)
- state := internal.StateMutable
- return Int32Slice(internal.NewInt32Slice(&orig, &state))
+ return Int32Slice(internal.NewInt32SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int32 slice.
func (ms Int32Slice) AsRaw() []int32 {
- return internal.CopyOrigInt32Slice(nil, *ms.getOrig())
+ return copyInt32Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int32 into the slice Int32Slice.
func (ms Int32Slice) FromRaw(val []int32) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigInt32Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyInt32Slice(*ms.getOrig(), val)
}
// Len returns length of the []int32 slice value.
@@ -128,13 +127,42 @@ func (ms Int32Slice) MoveAndAppendTo(dest Int32Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Int32Slice) RemoveIf(f func(int32) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero int32
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int32Slice) CopyTo(dest Int32Slice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigInt32Slice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int32Slice
func (ms Int32Slice) Equal(val Int32Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyInt32Slice(dst, src []int32) []int32 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
index 6364eae37be..4c22f2ed825 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Int64Slice internal.Int64Slice
+type Int64Slice internal.Int64SliceWrapper
func (ms Int64Slice) getOrig() *[]int64 {
- return internal.GetOrigInt64Slice(internal.Int64Slice(ms))
+ return internal.GetInt64SliceOrig(internal.Int64SliceWrapper(ms))
}
func (ms Int64Slice) getState() *internal.State {
- return internal.GetInt64SliceState(internal.Int64Slice(ms))
+ return internal.GetInt64SliceState(internal.Int64SliceWrapper(ms))
}
// NewInt64Slice creates a new empty Int64Slice.
func NewInt64Slice() Int64Slice {
orig := []int64(nil)
- state := internal.StateMutable
- return Int64Slice(internal.NewInt64Slice(&orig, &state))
+ return Int64Slice(internal.NewInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int64 slice.
func (ms Int64Slice) AsRaw() []int64 {
- return internal.CopyOrigInt64Slice(nil, *ms.getOrig())
+ return copyInt64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int64 into the slice Int64Slice.
func (ms Int64Slice) FromRaw(val []int64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigInt64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyInt64Slice(*ms.getOrig(), val)
}
// Len returns length of the []int64 slice value.
@@ -128,13 +127,42 @@ func (ms Int64Slice) MoveAndAppendTo(dest Int64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms Int64Slice) RemoveIf(f func(int64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero int64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int64Slice) CopyTo(dest Int64Slice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigInt64Slice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int64Slice
func (ms Int64Slice) Equal(val Int64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyInt64Slice(dst, src []int64) []int64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go
deleted file mode 100644
index 1a72889d554..00000000000
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
-// To regenerate this file run "make genpdata".
-
-package pcommon
-
-import (
- "go.opentelemetry.io/collector/pdata/internal"
-)
-
-// IntSlice represents a []int slice.
-// The instance of IntSlice can be assigned to multiple objects since it's immutable.
-//
-// Must use NewIntSlice function to create new instances.
-// Important: zero-initialized instance is not valid for use.
-type IntSlice internal.IntSlice
-
-func (ms IntSlice) getOrig() *[]int {
- return internal.GetOrigIntSlice(internal.IntSlice(ms))
-}
-
-func (ms IntSlice) getState() *internal.State {
- return internal.GetIntSliceState(internal.IntSlice(ms))
-}
-
-// NewIntSlice creates a new empty IntSlice.
-func NewIntSlice() IntSlice {
- orig := []int(nil)
- state := internal.StateMutable
- return IntSlice(internal.NewIntSlice(&orig, &state))
-}
-
-// AsRaw returns a copy of the []int slice.
-func (ms IntSlice) AsRaw() []int {
- return copyIntSlice(nil, *ms.getOrig())
-}
-
-// FromRaw copies raw []int into the slice IntSlice.
-func (ms IntSlice) FromRaw(val []int) {
- ms.getState().AssertMutable()
- *ms.getOrig() = copyIntSlice(*ms.getOrig(), val)
-}
-
-// Len returns length of the []int slice value.
-// Equivalent of len(intSlice).
-func (ms IntSlice) Len() int {
- return len(*ms.getOrig())
-}
-
-// At returns an item from particular index.
-// Equivalent of intSlice[i].
-func (ms IntSlice) At(i int) int {
- return (*ms.getOrig())[i]
-}
-
-// SetAt sets int item at particular index.
-// Equivalent of intSlice[i] = val
-func (ms IntSlice) SetAt(i int, val int) {
- ms.getState().AssertMutable()
- (*ms.getOrig())[i] = val
-}
-
-// EnsureCapacity ensures IntSlice has at least the specified capacity.
-// 1. If the newCap <= cap, then is no change in capacity.
-// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
-// buf := make([]int, len(intSlice), newCap)
-// copy(buf, intSlice)
-// intSlice = buf
-func (ms IntSlice) EnsureCapacity(newCap int) {
- ms.getState().AssertMutable()
- oldCap := cap(*ms.getOrig())
- if newCap <= oldCap {
- return
- }
-
- newOrig := make([]int, len(*ms.getOrig()), newCap)
- copy(newOrig, *ms.getOrig())
- *ms.getOrig() = newOrig
-}
-
-// Append appends extra elements to IntSlice.
-// Equivalent of intSlice = append(intSlice, elms...)
-func (ms IntSlice) Append(elms ...int) {
- ms.getState().AssertMutable()
- *ms.getOrig() = append(*ms.getOrig(), elms...)
-}
-
-// MoveTo moves all elements from the current slice overriding the destination and
-// resetting the current instance to its zero value.
-func (ms IntSlice) MoveTo(dest IntSlice) {
- ms.getState().AssertMutable()
- dest.getState().AssertMutable()
- *dest.getOrig() = *ms.getOrig()
- *ms.getOrig() = nil
-}
-
-// CopyTo copies all elements from the current slice overriding the destination.
-func (ms IntSlice) CopyTo(dest IntSlice) {
- dest.getState().AssertMutable()
- *dest.getOrig() = copyIntSlice(*dest.getOrig(), *ms.getOrig())
-}
-
-func copyIntSlice(dst, src []int) []int {
- dst = dst[:0]
- return append(dst, src...)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
index 4d829e049d8..4f767693b72 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Resource is a message representing the resource information.
@@ -18,10 +17,10 @@ import (
//
// Must use NewResource function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Resource internal.Resource
+type Resource internal.ResourceWrapper
-func newResource(orig *otlpresource.Resource, state *internal.State) Resource {
- return Resource(internal.NewResource(orig, state))
+func newResource(orig *internal.Resource, state *internal.State) Resource {
+ return Resource(internal.NewResourceWrapper(orig, state))
}
// NewResource creates a new empty Resource.
@@ -29,8 +28,7 @@ func newResource(orig *otlpresource.Resource, state *internal.State) Resource {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResource() Resource {
- state := internal.StateMutable
- return newResource(&otlpresource.Resource{}, &state)
+ return newResource(internal.NewResource(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -42,21 +40,13 @@ func (ms Resource) MoveTo(dest Resource) {
if ms.getOrig() == dest.getOrig() {
return
}
- *dest.getOrig() = *ms.getOrig()
- *ms.getOrig() = otlpresource.Resource{}
-}
-
-func (ms Resource) getOrig() *otlpresource.Resource {
- return internal.GetOrigResource(internal.Resource(ms))
-}
-
-func (ms Resource) getState() *internal.State {
- return internal.GetResourceState(internal.Resource(ms))
+ internal.DeleteResource(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Attributes returns the Attributes associated with this Resource.
func (ms Resource) Attributes() Map {
- return Map(internal.NewMap(&ms.getOrig().Attributes, internal.GetResourceState(internal.Resource(ms))))
+ return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Resource.
@@ -73,5 +63,13 @@ func (ms Resource) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Resource) CopyTo(dest Resource) {
dest.getState().AssertMutable()
- internal.CopyOrigResource(dest.getOrig(), ms.getOrig())
+ internal.CopyResource(dest.getOrig(), ms.getOrig())
+}
+
+func (ms Resource) getOrig() *internal.Resource {
+ return internal.GetResourceOrig(internal.ResourceWrapper(ms))
+}
+
+func (ms Resource) getState() *internal.State {
+ return internal.GetResourceState(internal.ResourceWrapper(ms))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go
new file mode 100644
index 00000000000..0a28bde3af6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_slice.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pcommon
+
+import (
+ "iter"
+
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Slice logically represents a slice of Value.
+//
+// This is a reference type. If passed by value and callee modifies it, the
+// caller will see the modification.
+//
+// Must use NewSlice function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Slice internal.SliceWrapper
+
+func newSlice(orig *[]internal.AnyValue, state *internal.State) Slice {
+ return Slice(internal.NewSliceWrapper(orig, state))
+}
+
+// NewSlice creates a SliceWrapper with 0 elements.
+// Can use "EnsureCapacity" to initialize with a given capacity.
+func NewSlice() Slice {
+ orig := []internal.AnyValue(nil)
+ return newSlice(&orig, internal.NewState())
+}
+
+// Len returns the number of elements in the slice.
+//
+// Returns "0" for a newly instance created with "NewSlice()".
+func (es Slice) Len() int {
+ return len(*es.getOrig())
+}
+
+// At returns the element at the given index.
+//
+// This function is used mostly for iterating over all the values in the slice:
+//
+// for i := 0; i < es.Len(); i++ {
+// e := es.At(i)
+// ... // Do something with the element
+// }
+func (es Slice) At(i int) Value {
+ return newValue(&(*es.getOrig())[i], es.getState())
+}
+
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es Slice) All() iter.Seq2[int, Value] {
+ return func(yield func(int, Value) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
+// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
+// 1. If the newCap <= cap then no change in capacity.
+// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
+//
+// Here is how a new Slice can be initialized:
+//
+// es := NewSlice()
+// es.EnsureCapacity(4)
+// for i := 0; i < 4; i++ {
+// e := es.AppendEmpty()
+// // Here should set all the values for e.
+// }
+func (es Slice) EnsureCapacity(newCap int) {
+ es.getState().AssertMutable()
+ oldCap := cap(*es.getOrig())
+ if newCap <= oldCap {
+ return
+ }
+
+ newOrig := make([]internal.AnyValue, len(*es.getOrig()), newCap)
+ copy(newOrig, *es.getOrig())
+ *es.getOrig() = newOrig
+}
+
+// AppendEmpty will append to the end of the slice an empty Value.
+// It returns the newly added Value.
+func (es Slice) AppendEmpty() Value {
+ es.getState().AssertMutable()
+ *es.getOrig() = append(*es.getOrig(), internal.AnyValue{})
+ return es.At(es.Len() - 1)
+}
+
+// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
+// The current slice will be cleared.
+func (es Slice) MoveAndAppendTo(dest Slice) {
+ es.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if es.getOrig() == dest.getOrig() {
+ return
+ }
+ if *dest.getOrig() == nil {
+ // We can simply move the entire vector and avoid any allocations.
+ *dest.getOrig() = *es.getOrig()
+ } else {
+ *dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...)
+ }
+ *es.getOrig() = nil
+}
+
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (es Slice) RemoveIf(f func(Value) bool) {
+ es.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*es.getOrig()); i++ {
+ if f(es.At(i)) {
+ internal.DeleteAnyValue(&(*es.getOrig())[i], false)
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*es.getOrig())[newLen] = (*es.getOrig())[i]
+ (*es.getOrig())[i].Reset()
+ newLen++
+ }
+ *es.getOrig() = (*es.getOrig())[:newLen]
+}
+
+// CopyTo copies all elements from the current slice overriding the destination.
+func (es Slice) CopyTo(dest Slice) {
+ dest.getState().AssertMutable()
+ if es.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = internal.CopyAnyValueSlice(*dest.getOrig(), *es.getOrig())
+}
+
+func (ms Slice) getOrig() *[]internal.AnyValue {
+ return internal.GetSliceOrig(internal.SliceWrapper(ms))
+}
+
+func (ms Slice) getState() *internal.State {
+ return internal.GetSliceState(internal.SliceWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
index 22c318e7a5b..ff8422805d8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewStringSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type StringSlice internal.StringSlice
+type StringSlice internal.StringSliceWrapper
func (ms StringSlice) getOrig() *[]string {
- return internal.GetOrigStringSlice(internal.StringSlice(ms))
+ return internal.GetStringSliceOrig(internal.StringSliceWrapper(ms))
}
func (ms StringSlice) getState() *internal.State {
- return internal.GetStringSliceState(internal.StringSlice(ms))
+ return internal.GetStringSliceState(internal.StringSliceWrapper(ms))
}
// NewStringSlice creates a new empty StringSlice.
func NewStringSlice() StringSlice {
orig := []string(nil)
- state := internal.StateMutable
- return StringSlice(internal.NewStringSlice(&orig, &state))
+ return StringSlice(internal.NewStringSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []string slice.
func (ms StringSlice) AsRaw() []string {
- return internal.CopyOrigStringSlice(nil, *ms.getOrig())
+ return copyStringSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []string into the slice StringSlice.
func (ms StringSlice) FromRaw(val []string) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigStringSlice(*ms.getOrig(), val)
+ *ms.getOrig() = copyStringSlice(*ms.getOrig(), val)
}
// Len returns length of the []string slice value.
@@ -128,13 +127,42 @@ func (ms StringSlice) MoveAndAppendTo(dest StringSlice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms StringSlice) RemoveIf(f func(string) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero string
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms StringSlice) CopyTo(dest StringSlice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigStringSlice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another StringSlice
func (ms StringSlice) Equal(val StringSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyStringSlice(dst, src []string) []string {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
index df58ff360a9..22539d9875e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
@@ -18,32 +18,31 @@ import (
//
// Must use NewUInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type UInt64Slice internal.UInt64Slice
+type UInt64Slice internal.UInt64SliceWrapper
func (ms UInt64Slice) getOrig() *[]uint64 {
- return internal.GetOrigUInt64Slice(internal.UInt64Slice(ms))
+ return internal.GetUInt64SliceOrig(internal.UInt64SliceWrapper(ms))
}
func (ms UInt64Slice) getState() *internal.State {
- return internal.GetUInt64SliceState(internal.UInt64Slice(ms))
+ return internal.GetUInt64SliceState(internal.UInt64SliceWrapper(ms))
}
// NewUInt64Slice creates a new empty UInt64Slice.
func NewUInt64Slice() UInt64Slice {
orig := []uint64(nil)
- state := internal.StateMutable
- return UInt64Slice(internal.NewUInt64Slice(&orig, &state))
+ return UInt64Slice(internal.NewUInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []uint64 slice.
func (ms UInt64Slice) AsRaw() []uint64 {
- return internal.CopyOrigUInt64Slice(nil, *ms.getOrig())
+ return copyUint64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []uint64 into the slice UInt64Slice.
func (ms UInt64Slice) FromRaw(val []uint64) {
ms.getState().AssertMutable()
- *ms.getOrig() = internal.CopyOrigUInt64Slice(*ms.getOrig(), val)
+ *ms.getOrig() = copyUint64Slice(*ms.getOrig(), val)
}
// Len returns length of the []uint64 slice value.
@@ -128,13 +127,42 @@ func (ms UInt64Slice) MoveAndAppendTo(dest UInt64Slice) {
*ms.getOrig() = nil
}
+// RemoveIf calls f sequentially for each element present in the slice.
+// If f returns true, the element is removed from the slice.
+func (ms UInt64Slice) RemoveIf(f func(uint64) bool) {
+ ms.getState().AssertMutable()
+ newLen := 0
+ for i := 0; i < len(*ms.getOrig()); i++ {
+ if f((*ms.getOrig())[i]) {
+ continue
+ }
+ if newLen == i {
+ // Nothing to move, element is at the right place.
+ newLen++
+ continue
+ }
+ (*ms.getOrig())[newLen] = (*ms.getOrig())[i]
+ var zero uint64
+ (*ms.getOrig())[i] = zero
+ newLen++
+ }
+ *ms.getOrig() = (*ms.getOrig())[:newLen]
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigUInt64Slice(*dest.getOrig(), *ms.getOrig())
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = copyUint64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another UInt64Slice
func (ms UInt64Slice) Equal(val UInt64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
+
+func copyUint64Slice(dst, src []uint64) []uint64 {
+ return append(dst[:0], src...)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
index ed71f9b410d..b5d94967c28 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
@@ -9,32 +9,30 @@ import (
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Map stores a map of string keys to elements of Value type.
//
// Must use NewMap function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type Map internal.Map
+type Map internal.MapWrapper
// NewMap creates a Map with 0 elements.
func NewMap() Map {
- orig := []otlpcommon.KeyValue(nil)
- state := internal.StateMutable
- return Map(internal.NewMap(&orig, &state))
+ orig := []internal.KeyValue(nil)
+ return Map(internal.NewMapWrapper(&orig, internal.NewState()))
}
-func (m Map) getOrig() *[]otlpcommon.KeyValue {
- return internal.GetOrigMap(internal.Map(m))
+func (m Map) getOrig() *[]internal.KeyValue {
+ return internal.GetMapOrig(internal.MapWrapper(m))
}
func (m Map) getState() *internal.State {
- return internal.GetMapState(internal.Map(m))
+ return internal.GetMapState(internal.MapWrapper(m))
}
-func newMap(orig *[]otlpcommon.KeyValue, state *internal.State) Map {
- return Map(internal.NewMap(orig, state))
+func newMap(orig *[]internal.KeyValue, state *internal.State) Map {
+ return Map(internal.NewMapWrapper(orig, state))
}
// Clear erases any existing entries in this Map instance.
@@ -51,16 +49,18 @@ func (m Map) EnsureCapacity(capacity int) {
if capacity <= cap(oldOrig) {
return
}
- *m.getOrig() = make([]otlpcommon.KeyValue, len(oldOrig), capacity)
+ *m.getOrig() = make([]internal.KeyValue, len(oldOrig), capacity)
copy(*m.getOrig(), oldOrig)
}
-// Get returns the Value associated with the key and true. Returned
+// Get returns the Value associated with the key and true. The returned
// Value is not a copy, it is a reference to the value stored in this map.
// It is allowed to modify the returned value using Value.Set* functions.
// Such modification will be applied to the value stored in this map.
+// Accessing the returned value after modifying the underlying map
+// (removing or adding new values) is an undefined behavior.
//
-// If the key does not exist returns a zero-initialized KeyValue and false.
+// If the key does not exist, returns a zero-initialized KeyValue and false.
// Calling any functions on the returned invalid instance may cause a panic.
func (m Map) Get(key string) (Value, bool) {
for i := range *m.getOrig() {
@@ -92,8 +92,8 @@ func (m Map) RemoveIf(f func(string, Value) bool) {
m.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*m.getOrig()); i++ {
- akv := &(*m.getOrig())[i]
- if f(akv.Key, newValue(&akv.Value, m.getState())) {
+ if f((*m.getOrig())[i].Key, newValue(&(*m.getOrig())[i].Value, m.getState())) {
+ (*m.getOrig())[i] = internal.KeyValue{}
continue
}
if newLen == i {
@@ -102,6 +102,7 @@ func (m Map) RemoveIf(f func(string, Value) bool) {
continue
}
(*m.getOrig())[newLen] = (*m.getOrig())[i]
+ (*m.getOrig())[i] = internal.KeyValue{}
newLen++
}
*m.getOrig() = (*m.getOrig())[:newLen]
@@ -115,20 +116,34 @@ func (m Map) PutEmpty(k string) Value {
av.getOrig().Value = nil
return newValue(av.getOrig(), m.getState())
}
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k})
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState())
}
+// GetOrPutEmpty returns the Value associated with the key and true (loaded) if the key exists in the map,
+// otherwise inserts an empty value to the map under the given key and returns the inserted value
+// and false (loaded).
+func (m Map) GetOrPutEmpty(k string) (Value, bool) {
+ m.getState().AssertMutable()
+ if av, existing := m.Get(k); existing {
+ return av, true
+ }
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
+ return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()), false
+}
+
// PutStr performs the Insert or Update action. The Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
-func (m Map) PutStr(k string, v string) {
+func (m Map) PutStr(k, v string) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetStr(v)
- } else {
- *m.getOrig() = append(*m.getOrig(), newKeyValueString(k, v))
+ return
}
+ ov := internal.NewAnyValueStringValue()
+ ov.StringValue = v
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutInt performs the Insert or Update action. The int Value is
@@ -138,9 +153,11 @@ func (m Map) PutInt(k string, v int64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetInt(v)
- } else {
- *m.getOrig() = append(*m.getOrig(), newKeyValueInt(k, v))
+ return
}
+ ov := internal.NewAnyValueIntValue()
+ ov.IntValue = v
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutDouble performs the Insert or Update action. The double Value is
@@ -150,9 +167,11 @@ func (m Map) PutDouble(k string, v float64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetDouble(v)
- } else {
- *m.getOrig() = append(*m.getOrig(), newKeyValueDouble(k, v))
+ return
}
+ ov := internal.NewAnyValueDoubleValue()
+ ov.DoubleValue = v
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutBool performs the Insert or Update action. The bool Value is
@@ -162,45 +181,46 @@ func (m Map) PutBool(k string, v bool) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetBool(v)
- } else {
- *m.getOrig() = append(*m.getOrig(), newKeyValueBool(k, v))
+ return
}
+ ov := internal.NewAnyValueBoolValue()
+ ov.BoolValue = v
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutEmptyBytes inserts or updates an empty byte slice under given key and returns it.
func (m Map) PutEmptyBytes(k string) ByteSlice {
m.getState().AssertMutable()
- bv := otlpcommon.AnyValue_BytesValue{}
if av, existing := m.Get(k); existing {
- av.getOrig().Value = &bv
- } else {
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &bv}})
+ return av.SetEmptyBytes()
}
- return ByteSlice(internal.NewByteSlice(&bv.BytesValue, m.getState()))
+ ov := internal.NewAnyValueBytesValue()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return ByteSlice(internal.NewByteSliceWrapper(&ov.BytesValue, m.getState()))
}
// PutEmptyMap inserts or updates an empty map under given key and returns it.
func (m Map) PutEmptyMap(k string) Map {
m.getState().AssertMutable()
- kvl := otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{Values: []otlpcommon.KeyValue(nil)}}
if av, existing := m.Get(k); existing {
- av.getOrig().Value = &kvl
- } else {
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &kvl}})
+ return av.SetEmptyMap()
}
- return Map(internal.NewMap(&kvl.KvlistValue.Values, m.getState()))
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return Map(internal.NewMapWrapper(&ov.KvlistValue.Values, m.getState()))
}
// PutEmptySlice inserts or updates an empty slice under given key and returns it.
func (m Map) PutEmptySlice(k string) Slice {
m.getState().AssertMutable()
- vl := otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{Values: []otlpcommon.AnyValue(nil)}}
if av, existing := m.Get(k); existing {
- av.getOrig().Value = &vl
- } else {
- *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &vl}})
+ return av.SetEmptySlice()
}
- return Slice(internal.NewSlice(&vl.ArrayValue.Values, m.getState()))
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
+ *m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
+ return Slice(internal.NewSliceWrapper(&ov.ArrayValue.Values, m.getState()))
}
// Len returns the length of this map.
@@ -221,7 +241,7 @@ func (m Map) Len() int {
func (m Map) Range(f func(k string, v Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
- if !f(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
+ if !f(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
break
}
}
@@ -236,7 +256,7 @@ func (m Map) All() iter.Seq2[string, Value] {
return func(yield func(string, Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
- if !yield(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
+ if !yield(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
return
}
}
@@ -259,7 +279,10 @@ func (m Map) MoveTo(dest Map) {
// CopyTo copies all elements from the current map overriding the destination.
func (m Map) CopyTo(dest Map) {
dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigMap(*dest.getOrig(), *m.getOrig())
+ if m.getOrig() == dest.getOrig() {
+ return
+ }
+ *dest.getOrig() = internal.CopyKeyValueSlice(*dest.getOrig(), *m.getOrig())
}
// AsRaw returns a standard go map representation of this Map.
@@ -281,7 +304,7 @@ func (m Map) FromRaw(rawMap map[string]any) error {
}
var errs error
- origs := make([]otlpcommon.KeyValue, len(rawMap))
+ origs := make([]internal.KeyValue, len(rawMap))
ix := 0
for k, iv := range rawMap {
origs[ix].Key = k
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
index 5e6e431eb9b..380fc5c1b61 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
@@ -4,149 +4,11 @@
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
- "iter"
-
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
-// Slice logically represents a slice of Value.
-//
-// This is a reference type. If passed by value and callee modifies it, the
-// caller will see the modification.
-//
-// Must use NewSlice function to create new instances.
-// Important: zero-initialized instance is not valid for use.
-type Slice internal.Slice
-
-func newSlice(orig *[]otlpcommon.AnyValue, state *internal.State) Slice {
- return Slice(internal.NewSlice(orig, state))
-}
-
-func (es Slice) getOrig() *[]otlpcommon.AnyValue {
- return internal.GetOrigSlice(internal.Slice(es))
-}
-
-func (es Slice) getState() *internal.State {
- return internal.GetSliceState(internal.Slice(es))
-}
-
-// NewSlice creates a Slice with 0 elements.
-// Can use "EnsureCapacity" to initialize with a given capacity.
-func NewSlice() Slice {
- orig := []otlpcommon.AnyValue(nil)
- state := internal.StateMutable
- return Slice(internal.NewSlice(&orig, &state))
-}
-
-// Len returns the number of elements in the slice.
-//
-// Returns "0" for a newly instance created with "NewSlice()".
-func (es Slice) Len() int {
- return len(*es.getOrig())
-}
-
-// At returns the element at the given index.
-//
-// This function is used mostly for iterating over all the values in the slice:
-//
-// for i := 0; i < es.Len(); i++ {
-// e := es.At(i)
-// ... // Do something with the element
-// }
-func (es Slice) At(ix int) Value {
- return newValue(&(*es.getOrig())[ix], es.getState())
-}
-
-// All returns an iterator over index-value pairs in the slice.
-//
-// for i, v := range es.All() {
-// ... // Do something with index-value pair
-// }
-func (es Slice) All() iter.Seq2[int, Value] {
- return func(yield func(int, Value) bool) {
- for i := 0; i < es.Len(); i++ {
- if !yield(i, es.At(i)) {
- return
- }
- }
- }
-}
-
-// CopyTo copies all elements from the current slice overriding the destination.
-func (es Slice) CopyTo(dest Slice) {
- dest.getState().AssertMutable()
- *dest.getOrig() = internal.CopyOrigSlice(*dest.getOrig(), *es.getOrig())
-}
-
-// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
-// 1. If the newCap <= cap then no change in capacity.
-// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
-//
-// Here is how a new Slice can be initialized:
-//
-// es := NewSlice()
-// es.EnsureCapacity(4)
-// for i := 0; i < 4; i++ {
-// e := es.AppendEmpty()
-// // Here should set all the values for e.
-// }
-func (es Slice) EnsureCapacity(newCap int) {
- es.getState().AssertMutable()
- oldCap := cap(*es.getOrig())
- if newCap <= oldCap {
- return
- }
-
- newOrig := make([]otlpcommon.AnyValue, len(*es.getOrig()), newCap)
- copy(newOrig, *es.getOrig())
- *es.getOrig() = newOrig
-}
-
-// AppendEmpty will append to the end of the slice an empty Value.
-// It returns the newly added Value.
-func (es Slice) AppendEmpty() Value {
- es.getState().AssertMutable()
- *es.getOrig() = append(*es.getOrig(), otlpcommon.AnyValue{})
- return es.At(es.Len() - 1)
-}
-
-// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
-// The current slice will be cleared.
-func (es Slice) MoveAndAppendTo(dest Slice) {
- es.getState().AssertMutable()
- dest.getState().AssertMutable()
- if *dest.getOrig() == nil {
- // We can simply move the entire vector and avoid any allocations.
- *dest.getOrig() = *es.getOrig()
- } else {
- *dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...)
- }
- *es.getOrig() = nil
-}
-
-// RemoveIf calls f sequentially for each element present in the slice.
-// If f returns true, the element is removed from the slice.
-func (es Slice) RemoveIf(f func(Value) bool) {
- es.getState().AssertMutable()
- newLen := 0
- for i := 0; i < len(*es.getOrig()); i++ {
- if f(es.At(i)) {
- continue
- }
- if newLen == i {
- // Nothing to move, element is at the right place.
- newLen++
- continue
- }
- (*es.getOrig())[newLen] = (*es.getOrig())[i]
- newLen++
- }
- *es.getOrig() = (*es.getOrig())[:newLen]
-}
-
// AsRaw return []any copy of the Slice.
func (es Slice) AsRaw() []any {
rawSlice := make([]any, 0, es.Len())
@@ -164,7 +26,7 @@ func (es Slice) FromRaw(rawSlice []any) error {
return nil
}
var errs error
- origs := make([]otlpcommon.AnyValue, len(rawSlice))
+ origs := make([]internal.AnyValue, len(rawSlice))
for ix, iv := range rawSlice {
errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
index 63399cb58ca..853c1a2d017 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go
@@ -5,7 +5,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
- "go.opentelemetry.io/collector/pdata/internal/data"
+ "go.opentelemetry.io/collector/pdata/internal"
)
var emptySpanID = SpanID([8]byte{})
@@ -32,5 +32,5 @@ func (ms SpanID) String() string {
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms SpanID) IsEmpty() bool {
- return data.SpanID(ms).IsEmpty()
+ return internal.SpanID(ms).IsEmpty()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
index 3bde05482f5..167f7a3278c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go
@@ -11,19 +11,18 @@ import (
//
// Must use NewTraceState function to create new instances.
// Important: zero-initialized instance is not valid for use.
-type TraceState internal.TraceState
+type TraceState internal.TraceStateWrapper
func NewTraceState() TraceState {
- state := internal.StateMutable
- return TraceState(internal.NewTraceState(new(string), &state))
+ return TraceState(internal.NewTraceStateWrapper(new(string), internal.NewState()))
}
func (ms TraceState) getOrig() *string {
- return internal.GetOrigTraceState(internal.TraceState(ms))
+ return internal.GetTraceStateOrig(internal.TraceStateWrapper(ms))
}
func (ms TraceState) getState() *internal.State {
- return internal.GetTraceStateState(internal.TraceState(ms))
+ return internal.GetTraceStateState(internal.TraceStateWrapper(ms))
}
// AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
index 22ad5a5af4e..fd1df45d2c2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go
@@ -6,7 +6,7 @@ package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
- "go.opentelemetry.io/collector/pdata/internal/data"
+ "go.opentelemetry.io/collector/pdata/internal"
)
var emptyTraceID = TraceID([16]byte{})
@@ -33,5 +33,5 @@ func (ms TraceID) String() string {
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms TraceID) IsEmpty() bool {
- return data.TraceID(ms).IsEmpty()
+ return internal.TraceID(ms).IsEmpty()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
index 4a88fe29556..f74fa79467b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
@@ -11,7 +11,6 @@ import (
"strconv"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// ValueType specifies the type of Value.
@@ -67,66 +66,85 @@ func (avt ValueType) String() string {
//
// Important: zero-initialized instance is not valid for use. All Value functions below must
// be called only on instances that are created via NewValue+ functions.
-type Value internal.Value
+type Value internal.ValueWrapper
// NewValueEmpty creates a new Value with an empty value.
func NewValueEmpty() Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{}, &state)
+ return newValue(&internal.AnyValue{}, internal.NewState())
}
// NewValueStr creates a new Value with the given string value.
func NewValueStr(v string) Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: v}}, &state)
+ ov := internal.NewAnyValueStringValue()
+ ov.StringValue = v
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueInt creates a new Value with the given int64 value.
func NewValueInt(v int64) Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: v}}, &state)
+ ov := internal.NewAnyValueIntValue()
+ ov.IntValue = v
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueDouble creates a new Value with the given float64 value.
func NewValueDouble(v float64) Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_DoubleValue{DoubleValue: v}}, &state)
+ ov := internal.NewAnyValueDoubleValue()
+ ov.DoubleValue = v
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueBool creates a new Value with the given bool value.
func NewValueBool(v bool) Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BoolValue{BoolValue: v}}, &state)
+ ov := internal.NewAnyValueBoolValue()
+ ov.BoolValue = v
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueMap creates a new Value of map type.
func NewValueMap() Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}}, &state)
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueSlice creates a new Value of array type.
func NewValueSlice() Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}}, &state)
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
// NewValueBytes creates a new empty Value of byte type.
func NewValueBytes() Value {
- state := internal.StateMutable
- return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BytesValue{BytesValue: nil}}, &state)
+ ov := internal.NewAnyValueBytesValue()
+ orig := internal.NewAnyValue()
+ orig.Value = ov
+ return newValue(orig, internal.NewState())
}
-func newValue(orig *otlpcommon.AnyValue, state *internal.State) Value {
- return Value(internal.NewValue(orig, state))
+func newValue(orig *internal.AnyValue, state *internal.State) Value {
+ return Value(internal.NewValueWrapper(orig, state))
}
-func (v Value) getOrig() *otlpcommon.AnyValue {
- return internal.GetOrigValue(internal.Value(v))
+func (v Value) getOrig() *internal.AnyValue {
+ return internal.GetValueOrig(internal.ValueWrapper(v))
}
func (v Value) getState() *internal.State {
- return internal.GetValueState(internal.Value(v))
+ return internal.GetValueState(internal.ValueWrapper(v))
}
// FromRaw sets the value from the given raw value.
@@ -181,19 +199,19 @@ func (v Value) FromRaw(iv any) error {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) Type() ValueType {
switch v.getOrig().Value.(type) {
- case *otlpcommon.AnyValue_StringValue:
+ case *internal.AnyValue_StringValue:
return ValueTypeStr
- case *otlpcommon.AnyValue_BoolValue:
+ case *internal.AnyValue_BoolValue:
return ValueTypeBool
- case *otlpcommon.AnyValue_IntValue:
+ case *internal.AnyValue_IntValue:
return ValueTypeInt
- case *otlpcommon.AnyValue_DoubleValue:
+ case *internal.AnyValue_DoubleValue:
return ValueTypeDouble
- case *otlpcommon.AnyValue_KvlistValue:
+ case *internal.AnyValue_KvlistValue:
return ValueTypeMap
- case *otlpcommon.AnyValue_ArrayValue:
+ case *internal.AnyValue_ArrayValue:
return ValueTypeSlice
- case *otlpcommon.AnyValue_BytesValue:
+ case *internal.AnyValue_BytesValue:
return ValueTypeBytes
}
return ValueTypeEmpty
@@ -232,7 +250,7 @@ func (v Value) Map() Map {
if kvlist == nil {
return Map{}
}
- return newMap(&kvlist.Values, internal.GetValueState(internal.Value(v)))
+ return newMap(&kvlist.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Slice returns the slice value associated with this Value.
@@ -243,18 +261,18 @@ func (v Value) Slice() Slice {
if arr == nil {
return Slice{}
}
- return newSlice(&arr.Values, internal.GetValueState(internal.Value(v)))
+ return newSlice(&arr.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Bytes returns the ByteSlice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes
// then returns an invalid ByteSlice object. Note that using such slice can cause panic.
func (v Value) Bytes() ByteSlice {
- bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue)
+ bv, ok := v.getOrig().GetValue().(*internal.AnyValue_BytesValue)
if !ok {
return ByteSlice{}
}
- return ByteSlice(internal.NewByteSlice(&bv.BytesValue, internal.GetValueState(internal.Value(v))))
+ return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, internal.GetValueState(internal.ValueWrapper(v))))
}
// SetStr replaces the string value associated with this Value,
@@ -264,7 +282,11 @@ func (v Value) Bytes() ByteSlice {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetStr(sv string) {
v.getState().AssertMutable()
- v.getOrig().Value = &otlpcommon.AnyValue_StringValue{StringValue: sv}
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueStringValue()
+ ov.StringValue = sv
+ v.getOrig().Value = ov
}
// SetInt replaces the int64 value associated with this Value,
@@ -272,7 +294,11 @@ func (v Value) SetStr(sv string) {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetInt(iv int64) {
v.getState().AssertMutable()
- v.getOrig().Value = &otlpcommon.AnyValue_IntValue{IntValue: iv}
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueIntValue()
+ ov.IntValue = iv
+ v.getOrig().Value = ov
}
// SetDouble replaces the float64 value associated with this Value,
@@ -280,7 +306,11 @@ func (v Value) SetInt(iv int64) {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetDouble(dv float64) {
v.getState().AssertMutable()
- v.getOrig().Value = &otlpcommon.AnyValue_DoubleValue{DoubleValue: dv}
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueDoubleValue()
+ ov.DoubleValue = dv
+ v.getOrig().Value = ov
}
// SetBool replaces the bool value associated with this Value,
@@ -288,34 +318,46 @@ func (v Value) SetDouble(dv float64) {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetBool(bv bool) {
v.getState().AssertMutable()
- v.getOrig().Value = &otlpcommon.AnyValue_BoolValue{BoolValue: bv}
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueBoolValue()
+ ov.BoolValue = bv
+ v.getOrig().Value = ov
}
// SetEmptyBytes sets value to an empty byte slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyBytes() ByteSlice {
v.getState().AssertMutable()
- bv := otlpcommon.AnyValue_BytesValue{BytesValue: nil}
- v.getOrig().Value = &bv
- return ByteSlice(internal.NewByteSlice(&bv.BytesValue, v.getState()))
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ bv := internal.NewAnyValueBytesValue()
+ v.getOrig().Value = bv
+ return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, v.getState()))
}
// SetEmptyMap sets value to an empty map and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyMap() Map {
v.getState().AssertMutable()
- kv := &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}
- v.getOrig().Value = kv
- return newMap(&kv.KvlistValue.Values, v.getState())
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueKvlistValue()
+ ov.KvlistValue = internal.NewKeyValueList()
+ v.getOrig().Value = ov
+ return newMap(&ov.KvlistValue.Values, v.getState())
}
// SetEmptySlice sets value to an empty slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptySlice() Slice {
v.getState().AssertMutable()
- av := &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}
- v.getOrig().Value = av
- return newSlice(&av.ArrayValue.Values, v.getState())
+ // Delete everything but the AnyValue object itself.
+ internal.DeleteAnyValue(v.getOrig(), false)
+ ov := internal.NewAnyValueArrayValue()
+ ov.ArrayValue = internal.NewArrayValue()
+ v.getOrig().Value = ov
+ return newSlice(&ov.ArrayValue.Values, v.getState())
}
// MoveTo moves the Value from current overriding the destination and
@@ -336,7 +378,7 @@ func (v Value) MoveTo(dest Value) {
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) CopyTo(dest Value) {
dest.getState().AssertMutable()
- internal.CopyOrigValue(dest.getOrig(), v.getOrig())
+ internal.CopyAnyValue(dest.getOrig(), v.getOrig())
}
// AsString converts an OTLP Value object of any type to its equivalent string
@@ -455,35 +497,3 @@ func (v Value) Equal(c Value) bool {
return false
}
-
-func newKeyValueString(k string, v string) otlpcommon.KeyValue {
- orig := otlpcommon.KeyValue{Key: k}
- state := internal.StateMutable
- akv := newValue(&orig.Value, &state)
- akv.SetStr(v)
- return orig
-}
-
-func newKeyValueInt(k string, v int64) otlpcommon.KeyValue {
- orig := otlpcommon.KeyValue{Key: k}
- state := internal.StateMutable
- akv := newValue(&orig.Value, &state)
- akv.SetInt(v)
- return orig
-}
-
-func newKeyValueDouble(k string, v float64) otlpcommon.KeyValue {
- orig := otlpcommon.KeyValue{Key: k}
- state := internal.StateMutable
- akv := newValue(&orig.Value, &state)
- akv.SetDouble(v)
- return orig
-}
-
-func newKeyValueBool(k string, v bool) otlpcommon.KeyValue {
- orig := otlpcommon.KeyValue{Key: k}
- state := internal.StateMutable
- akv := newValue(&orig.Value, &state)
- akv.SetBool(v)
- return orig
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go
index 99e6ac83653..92f3dc1ecaa 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/encoding.go
@@ -9,17 +9,17 @@ type MarshalSizer interface {
Sizer
}
-// Marshaler marshals pdata.Logs into bytes.
+// Marshaler marshals Logs into bytes.
type Marshaler interface {
- // MarshalLogs the given pdata.Logs into bytes.
+ // MarshalLogs the given Logs into bytes.
// If the error is not nil, the returned bytes slice cannot be used.
MarshalLogs(ld Logs) ([]byte, error)
}
-// Unmarshaler unmarshalls bytes into pdata.Logs.
+// Unmarshaler unmarshalls bytes into Logs.
type Unmarshaler interface {
- // UnmarshalLogs the given bytes into pdata.Logs.
- // If the error is not nil, the returned pdata.Logs cannot be used.
+ // UnmarshalLogs the given bytes into Logs.
+ // If the error is not nil, the returned Logs cannot be used.
UnmarshalLogs(buf []byte) (Logs, error)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go
index e9d8fa3daf9..80910c903fc 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go
@@ -1,15 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -21,11 +19,11 @@ import (
// Must use NewLogRecord function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecord struct {
- orig *otlplogs.LogRecord
+ orig *internal.LogRecord
state *internal.State
}
-func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord {
+func newLogRecord(orig *internal.LogRecord, state *internal.State) LogRecord {
return LogRecord{orig: orig, state: state}
}
@@ -34,8 +32,7 @@ func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLogRecord() LogRecord {
- state := internal.StateMutable
- return newLogRecord(&otlplogs.LogRecord{}, &state)
+ return newLogRecord(internal.NewLogRecord(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -47,8 +44,19 @@ func (ms LogRecord) MoveTo(dest LogRecord) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlplogs.LogRecord{}
+ internal.DeleteLogRecord(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// Timestamp returns the timestamp associated with this LogRecord.
+func (ms LogRecord) Timestamp() pcommon.Timestamp {
+ return pcommon.Timestamp(ms.orig.TimeUnixNano)
+}
+
+// SetTimestamp replaces the timestamp associated with this LogRecord.
+func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) {
+ ms.state.AssertMutable()
+ ms.orig.TimeUnixNano = uint64(v)
}
// ObservedTimestamp returns the observedtimestamp associated with this LogRecord.
@@ -62,37 +70,47 @@ func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) {
ms.orig.ObservedTimeUnixNano = uint64(v)
}
-// Timestamp returns the timestamp associated with this LogRecord.
-func (ms LogRecord) Timestamp() pcommon.Timestamp {
- return pcommon.Timestamp(ms.orig.TimeUnixNano)
+// SeverityNumber returns the severitynumber associated with this LogRecord.
+func (ms LogRecord) SeverityNumber() SeverityNumber {
+ return SeverityNumber(ms.orig.SeverityNumber)
}
-// SetTimestamp replaces the timestamp associated with this LogRecord.
-func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) {
+// SetSeverityNumber replaces the severitynumber associated with this LogRecord.
+func (ms LogRecord) SetSeverityNumber(v SeverityNumber) {
ms.state.AssertMutable()
- ms.orig.TimeUnixNano = uint64(v)
+ ms.orig.SeverityNumber = internal.SeverityNumber(v)
}
-// TraceID returns the traceid associated with this LogRecord.
-func (ms LogRecord) TraceID() pcommon.TraceID {
- return pcommon.TraceID(ms.orig.TraceId)
+// SeverityText returns the severitytext associated with this LogRecord.
+func (ms LogRecord) SeverityText() string {
+ return ms.orig.SeverityText
}
-// SetTraceID replaces the traceid associated with this LogRecord.
-func (ms LogRecord) SetTraceID(v pcommon.TraceID) {
+// SetSeverityText replaces the severitytext associated with this LogRecord.
+func (ms LogRecord) SetSeverityText(v string) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.SeverityText = v
}
-// SpanID returns the spanid associated with this LogRecord.
-func (ms LogRecord) SpanID() pcommon.SpanID {
- return pcommon.SpanID(ms.orig.SpanId)
+// Body returns the body associated with this LogRecord.
+func (ms LogRecord) Body() pcommon.Value {
+ return pcommon.Value(internal.NewValueWrapper(&ms.orig.Body, ms.state))
}
-// SetSpanID replaces the spanid associated with this LogRecord.
-func (ms LogRecord) SetSpanID(v pcommon.SpanID) {
+// Attributes returns the Attributes associated with this LogRecord.
+func (ms LogRecord) Attributes() pcommon.Map {
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
+}
+
+// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord.
+func (ms LogRecord) DroppedAttributesCount() uint32 {
+ return ms.orig.DroppedAttributesCount
+}
+
+// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord.
+func (ms LogRecord) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.DroppedAttributesCount = v
}
// Flags returns the flags associated with this LogRecord.
@@ -106,76 +124,41 @@ func (ms LogRecord) SetFlags(v LogRecordFlags) {
ms.orig.Flags = uint32(v)
}
-// EventName returns the eventname associated with this LogRecord.
-func (ms LogRecord) EventName() string {
- return ms.orig.EventName
-}
-
-// SetEventName replaces the eventname associated with this LogRecord.
-func (ms LogRecord) SetEventName(v string) {
- ms.state.AssertMutable()
- ms.orig.EventName = v
-}
-
-// SeverityText returns the severitytext associated with this LogRecord.
-func (ms LogRecord) SeverityText() string {
- return ms.orig.SeverityText
+// TraceID returns the traceid associated with this LogRecord.
+func (ms LogRecord) TraceID() pcommon.TraceID {
+ return pcommon.TraceID(ms.orig.TraceId)
}
-// SetSeverityText replaces the severitytext associated with this LogRecord.
-func (ms LogRecord) SetSeverityText(v string) {
+// SetTraceID replaces the traceid associated with this LogRecord.
+func (ms LogRecord) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.SeverityText = v
+ ms.orig.TraceId = internal.TraceID(v)
}
-// SeverityNumber returns the severitynumber associated with this LogRecord.
-func (ms LogRecord) SeverityNumber() SeverityNumber {
- return SeverityNumber(ms.orig.SeverityNumber)
+// SpanID returns the spanid associated with this LogRecord.
+func (ms LogRecord) SpanID() pcommon.SpanID {
+ return pcommon.SpanID(ms.orig.SpanId)
}
-// SetSeverityNumber replaces the severitynumber associated with this LogRecord.
-func (ms LogRecord) SetSeverityNumber(v SeverityNumber) {
+// SetSpanID replaces the spanid associated with this LogRecord.
+func (ms LogRecord) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SeverityNumber = otlplogs.SeverityNumber(v)
-}
-
-// Body returns the body associated with this LogRecord.
-func (ms LogRecord) Body() pcommon.Value {
- return pcommon.Value(internal.NewValue(&ms.orig.Body, ms.state))
-}
-
-// Attributes returns the Attributes associated with this LogRecord.
-func (ms LogRecord) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ ms.orig.SpanId = internal.SpanID(v)
}
-// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord.
-func (ms LogRecord) DroppedAttributesCount() uint32 {
- return ms.orig.DroppedAttributesCount
+// EventName returns the eventname associated with this LogRecord.
+func (ms LogRecord) EventName() string {
+ return ms.orig.EventName
}
-// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord.
-func (ms LogRecord) SetDroppedAttributesCount(v uint32) {
+// SetEventName replaces the eventname associated with this LogRecord.
+func (ms LogRecord) SetEventName(v string) {
ms.state.AssertMutable()
- ms.orig.DroppedAttributesCount = v
+ ms.orig.EventName = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms LogRecord) CopyTo(dest LogRecord) {
dest.state.AssertMutable()
- copyOrigLogRecord(dest.orig, ms.orig)
-}
-
-func copyOrigLogRecord(dest, src *otlplogs.LogRecord) {
- dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano
- dest.TimeUnixNano = src.TimeUnixNano
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
- dest.Flags = src.Flags
- dest.EventName = src.EventName
- dest.SeverityText = src.SeverityText
- dest.SeverityNumber = src.SeverityNumber
- internal.CopyOrigValue(&dest.Body, &src.Body)
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
+ internal.CopyLogRecord(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
index bdd40031b3a..58751158d7b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// LogRecordSlice logically represents a slice of LogRecord.
@@ -22,20 +21,19 @@ import (
// Must use NewLogRecordSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecordSlice struct {
- orig *[]*otlplogs.LogRecord
+ orig *[]*internal.LogRecord
state *internal.State
}
-func newLogRecordSlice(orig *[]*otlplogs.LogRecord, state *internal.State) LogRecordSlice {
+func newLogRecordSlice(orig *[]*internal.LogRecord, state *internal.State) LogRecordSlice {
return LogRecordSlice{orig: orig, state: state}
}
-// NewLogRecordSlice creates a LogRecordSlice with 0 elements.
+// NewLogRecordSlice creates a LogRecordSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLogRecordSlice() LogRecordSlice {
- orig := []*otlplogs.LogRecord(nil)
- state := internal.StateMutable
- return newLogRecordSlice(&orig, &state)
+ orig := []*internal.LogRecord(nil)
+ return newLogRecordSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es LogRecordSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap)
+ newOrig := make([]*internal.LogRecord, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es LogRecordSlice) EnsureCapacity(newCap int) {
// It returns the newly added LogRecord.
func (es LogRecordSlice) AppendEmpty() LogRecord {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlplogs.LogRecord{})
+ *es.orig = append(*es.orig, internal.NewLogRecord())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteLogRecord((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es LogRecordSlice) CopyTo(dest LogRecordSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigLogRecordSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyLogRecordPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the LogRecord elements within LogRecordSlice given the
@@ -155,18 +161,3 @@ func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigLogRecordSlice(dest, src []*otlplogs.LogRecord) []*otlplogs.LogRecord {
- if cap(dest) < len(src) {
- dest = make([]*otlplogs.LogRecord, len(src))
- data := make([]otlplogs.LogRecord, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigLogRecord(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go
new file mode 100644
index 00000000000..238ed8764d5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logs.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package plog
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Logs is the top-level struct that is propagated through the logs pipeline.
+// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewLogs function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Logs internal.LogsWrapper
+
+func newLogs(orig *internal.ExportLogsServiceRequest, state *internal.State) Logs {
+ return Logs(internal.NewLogsWrapper(orig, state))
+}
+
+// NewLogs creates a new empty Logs.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewLogs() Logs {
+ return newLogs(internal.NewExportLogsServiceRequest(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Logs) MoveTo(dest Logs) {
+ ms.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ internal.DeleteExportLogsServiceRequest(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
+}
+
+// ResourceLogs returns the ResourceLogs associated with this Logs.
+func (ms Logs) ResourceLogs() ResourceLogsSlice {
+ return newResourceLogsSlice(&ms.getOrig().ResourceLogs, ms.getState())
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Logs) CopyTo(dest Logs) {
+ dest.getState().AssertMutable()
+ internal.CopyExportLogsServiceRequest(dest.getOrig(), ms.getOrig())
+}
+
+func (ms Logs) getOrig() *internal.ExportLogsServiceRequest {
+ return internal.GetLogsOrig(internal.LogsWrapper(ms))
+}
+
+func (ms Logs) getState() *internal.State {
+ return internal.GetLogsState(internal.LogsWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go
index bf2c916329a..b3b64ff4461 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogs.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewResourceLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogs struct {
- orig *otlplogs.ResourceLogs
+ orig *internal.ResourceLogs
state *internal.State
}
-func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) ResourceLogs {
+func newResourceLogs(orig *internal.ResourceLogs, state *internal.State) ResourceLogs {
return ResourceLogs{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) Resourc
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceLogs() ResourceLogs {
- state := internal.StateMutable
- return newResourceLogs(&otlplogs.ResourceLogs{}, &state)
+ return newResourceLogs(internal.NewResourceLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ResourceLogs) MoveTo(dest ResourceLogs) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlplogs.ResourceLogs{}
+ internal.DeleteResourceLogs(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceLogs.
func (ms ResourceLogs) Resource() pcommon.Resource {
- return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state))
+ return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
+}
+
+// ScopeLogs returns the ScopeLogs associated with this ResourceLogs.
+func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice {
+ return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceLogs.
@@ -66,19 +69,8 @@ func (ms ResourceLogs) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// ScopeLogs returns the ScopeLogs associated with this ResourceLogs.
-func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice {
- return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceLogs) CopyTo(dest ResourceLogs) {
dest.state.AssertMutable()
- copyOrigResourceLogs(dest.orig, ms.orig)
-}
-
-func copyOrigResourceLogs(dest, src *otlplogs.ResourceLogs) {
- internal.CopyOrigResource(&dest.Resource, &src.Resource)
- dest.SchemaUrl = src.SchemaUrl
- dest.ScopeLogs = copyOrigScopeLogsSlice(dest.ScopeLogs, src.ScopeLogs)
+ internal.CopyResourceLogs(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
index b016f7cdf8d..d730aed983a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// ResourceLogsSlice logically represents a slice of ResourceLogs.
@@ -22,20 +21,19 @@ import (
// Must use NewResourceLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogsSlice struct {
- orig *[]*otlplogs.ResourceLogs
+ orig *[]*internal.ResourceLogs
state *internal.State
}
-func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs, state *internal.State) ResourceLogsSlice {
+func newResourceLogsSlice(orig *[]*internal.ResourceLogs, state *internal.State) ResourceLogsSlice {
return ResourceLogsSlice{orig: orig, state: state}
}
-// NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements.
+// NewResourceLogsSlice creates a ResourceLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceLogsSlice() ResourceLogsSlice {
- orig := []*otlplogs.ResourceLogs(nil)
- state := internal.StateMutable
- return newResourceLogsSlice(&orig, &state)
+ orig := []*internal.ResourceLogs(nil)
+ return newResourceLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ResourceLogsSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap)
+ newOrig := make([]*internal.ResourceLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ResourceLogsSlice) EnsureCapacity(newCap int) {
// It returns the newly added ResourceLogs.
func (es ResourceLogsSlice) AppendEmpty() ResourceLogs {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlplogs.ResourceLogs{})
+ *es.orig = append(*es.orig, internal.NewResourceLogs())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteResourceLogs((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigResourceLogsSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyResourceLogsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceLogs elements within ResourceLogsSlice given the
@@ -155,18 +161,3 @@ func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigResourceLogsSlice(dest, src []*otlplogs.ResourceLogs) []*otlplogs.ResourceLogs {
- if cap(dest) < len(src) {
- dest = make([]*otlplogs.ResourceLogs, len(src))
- data := make([]otlplogs.ResourceLogs, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigResourceLogs(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go
index 857cc921866..9cb55863ad2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogs.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewScopeLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogs struct {
- orig *otlplogs.ScopeLogs
+ orig *internal.ScopeLogs
state *internal.State
}
-func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs {
+func newScopeLogs(orig *internal.ScopeLogs, state *internal.State) ScopeLogs {
return ScopeLogs{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeLogs() ScopeLogs {
- state := internal.StateMutable
- return newScopeLogs(&otlplogs.ScopeLogs{}, &state)
+ return newScopeLogs(internal.NewScopeLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ScopeLogs) MoveTo(dest ScopeLogs) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlplogs.ScopeLogs{}
+ internal.DeleteScopeLogs(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeLogs.
func (ms ScopeLogs) Scope() pcommon.InstrumentationScope {
- return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state))
+ return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
+}
+
+// LogRecords returns the LogRecords associated with this ScopeLogs.
+func (ms ScopeLogs) LogRecords() LogRecordSlice {
+ return newLogRecordSlice(&ms.orig.LogRecords, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeLogs.
@@ -66,19 +69,8 @@ func (ms ScopeLogs) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// LogRecords returns the LogRecords associated with this ScopeLogs.
-func (ms ScopeLogs) LogRecords() LogRecordSlice {
- return newLogRecordSlice(&ms.orig.LogRecords, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeLogs) CopyTo(dest ScopeLogs) {
dest.state.AssertMutable()
- copyOrigScopeLogs(dest.orig, ms.orig)
-}
-
-func copyOrigScopeLogs(dest, src *otlplogs.ScopeLogs) {
- internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.SchemaUrl = src.SchemaUrl
- dest.LogRecords = copyOrigLogRecordSlice(dest.LogRecords, src.LogRecords)
+ internal.CopyScopeLogs(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
index 1eff72908ba..50309c7df9e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// ScopeLogsSlice logically represents a slice of ScopeLogs.
@@ -22,20 +21,19 @@ import (
// Must use NewScopeLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogsSlice struct {
- orig *[]*otlplogs.ScopeLogs
+ orig *[]*internal.ScopeLogs
state *internal.State
}
-func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs, state *internal.State) ScopeLogsSlice {
+func newScopeLogsSlice(orig *[]*internal.ScopeLogs, state *internal.State) ScopeLogsSlice {
return ScopeLogsSlice{orig: orig, state: state}
}
-// NewScopeLogsSlice creates a ScopeLogsSlice with 0 elements.
+// NewScopeLogsSlice creates a ScopeLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeLogsSlice() ScopeLogsSlice {
- orig := []*otlplogs.ScopeLogs(nil)
- state := internal.StateMutable
- return newScopeLogsSlice(&orig, &state)
+ orig := []*internal.ScopeLogs(nil)
+ return newScopeLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ScopeLogsSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap)
+ newOrig := make([]*internal.ScopeLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ScopeLogsSlice) EnsureCapacity(newCap int) {
// It returns the newly added ScopeLogs.
func (es ScopeLogsSlice) AppendEmpty() ScopeLogs {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlplogs.ScopeLogs{})
+ *es.orig = append(*es.orig, internal.NewScopeLogs())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteScopeLogs((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigScopeLogsSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyScopeLogsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeLogs elements within ScopeLogsSlice given the
@@ -155,18 +161,3 @@ func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigScopeLogsSlice(dest, src []*otlplogs.ScopeLogs) []*otlplogs.ScopeLogs {
- if cap(dest) < len(src) {
- dest = make([]*otlplogs.ScopeLogs, len(src))
- data := make([]otlplogs.ScopeLogs, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigScopeLogs(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go
index 5e1cdd70d5b..7d7bdaae256 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/json.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/json.go
@@ -4,134 +4,40 @@
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
- "bytes"
- "fmt"
+ "slices"
- jsoniter "github.com/json-iterator/go"
-
- "go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
-// JSONMarshaler marshals pdata.Logs to JSON bytes using the OTLP/JSON format.
+// JSONMarshaler marshals Logs to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalLogs to the OTLP/JSON format.
func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
- buf := bytes.Buffer{}
- pb := internal.LogsToProto(internal.Logs(ld))
- err := json.Marshal(&buf, &pb)
- return buf.Bytes(), err
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ ld.getOrig().MarshalJSON(dest)
+ if dest.Error() != nil {
+ return nil, dest.Error()
+ }
+ return slices.Clone(dest.Buffer()), nil
}
var _ Unmarshaler = (*JSONUnmarshaler)(nil)
-// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Logs.
+// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Logs.
type JSONUnmarshaler struct{}
-// UnmarshalLogs from OTLP/JSON format into pdata.Logs.
+// UnmarshalLogs from OTLP/JSON format into Logs.
func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
- iter := jsoniter.ConfigFastest.BorrowIterator(buf)
- defer jsoniter.ConfigFastest.ReturnIterator(iter)
+ iter := json.BorrowIterator(buf)
+ defer json.ReturnIterator(iter)
ld := NewLogs()
- ld.unmarshalJsoniter(iter)
- if iter.Error != nil {
- return Logs{}, iter.Error
+ ld.getOrig().UnmarshalJSON(iter)
+ if iter.Error() != nil {
+ return Logs{}, iter.Error()
}
otlp.MigrateLogs(ld.getOrig().ResourceLogs)
return ld, nil
}
-
-func (ms Logs) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resource_logs", "resourceLogs":
- iter.ReadArrayCB(func(*jsoniter.Iterator) bool {
- ms.ResourceLogs().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ResourceLogs) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resource":
- json.ReadResource(iter, &ms.orig.Resource)
- case "scope_logs", "scopeLogs":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.ScopeLogs().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ScopeLogs) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "scope":
- json.ReadScope(iter, &ms.orig.Scope)
- case "log_records", "logRecords":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.LogRecords().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms LogRecord) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "observed_time_unix_nano", "observedTimeUnixNano":
- ms.orig.ObservedTimeUnixNano = json.ReadUint64(iter)
- case "severity_number", "severityNumber":
- ms.orig.SeverityNumber = otlplogs.SeverityNumber(json.ReadEnumValue(iter, otlplogs.SeverityNumber_value))
- case "severity_text", "severityText":
- ms.orig.SeverityText = iter.ReadString()
- case "event_name", "eventName":
- ms.orig.EventName = iter.ReadString()
- case "body":
- json.ReadValue(iter, &ms.orig.Body)
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- ms.orig.DroppedAttributesCount = json.ReadUint32(iter)
- case "flags":
- ms.orig.Flags = json.ReadUint32(iter)
- case "traceId", "trace_id":
- if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readLog.traceId", fmt.Sprintf("parse trace_id:%v", err))
- }
- case "spanId", "span_id":
- if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readLog.spanId", fmt.Sprintf("parse span_id:%v", err))
- }
- default:
- iter.Skip()
- }
- return true
- })
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go
index 490526090f8..8fd939e7f35 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/logs.go
@@ -3,41 +3,14 @@
package plog // import "go.opentelemetry.io/collector/pdata/plog"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
-)
-
-// Logs is the top-level struct that is propagated through the logs pipeline.
-// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
-type Logs internal.Logs
-
-func newLogs(orig *otlpcollectorlog.ExportLogsServiceRequest) Logs {
- state := internal.StateMutable
- return Logs(internal.NewLogs(orig, &state))
-}
-
-func (ms Logs) getOrig() *otlpcollectorlog.ExportLogsServiceRequest {
- return internal.GetOrigLogs(internal.Logs(ms))
-}
-
-func (ms Logs) getState() *internal.State {
- return internal.GetLogsState(internal.Logs(ms))
-}
-
-// NewLogs creates a new Logs struct.
-func NewLogs() Logs {
- return newLogs(&otlpcollectorlog.ExportLogsServiceRequest{})
+// MarkReadOnly marks the Logs as shared so that no further modifications can be done on it.
+func (ms Logs) MarkReadOnly() {
+ ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Logs instance is read-only.
func (ms Logs) IsReadOnly() bool {
- return *ms.getState() == internal.StateReadOnly
-}
-
-// CopyTo copies the Logs instance overriding the destination.
-func (ms Logs) CopyTo(dest Logs) {
- ms.ResourceLogs().CopyTo(dest.ResourceLogs())
+ return ms.getState().IsReadOnly()
}
// LogRecordCount calculates the total number of log records.
@@ -54,13 +27,3 @@ func (ms Logs) LogRecordCount() int {
}
return logCount
}
-
-// ResourceLogs returns the ResourceLogsSlice associated with this Logs.
-func (ms Logs) ResourceLogs() ResourceLogsSlice {
- return newResourceLogsSlice(&ms.getOrig().ResourceLogs, internal.GetLogsState(internal.Logs(ms)))
-}
-
-// MarkReadOnly marks the Logs as shared so that no further modifications can be done on it.
-func (ms Logs) MarkReadOnly() {
- internal.SetLogsState(internal.Logs(ms), internal.StateReadOnly)
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go
index a4cb09eb6ea..1f717928f93 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/pb.go
@@ -3,35 +3,31 @@
package plog // import "go.opentelemetry.io/collector/pdata/plog"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
-)
-
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
- pb := internal.LogsToProto(internal.Logs(ld))
- return pb.Marshal()
+ size := ld.getOrig().SizeProto()
+ buf := make([]byte, size)
+ _ = ld.getOrig().MarshalProto(buf)
+ return buf, nil
}
func (e *ProtoMarshaler) LogsSize(ld Logs) int {
- pb := internal.LogsToProto(internal.Logs(ld))
- return pb.Size()
+ return ld.getOrig().SizeProto()
}
-func (e *ProtoMarshaler) ResourceLogsSize(rl ResourceLogs) int {
- return rl.orig.Size()
+func (e *ProtoMarshaler) ResourceLogsSize(ld ResourceLogs) int {
+ return ld.orig.SizeProto()
}
-func (e *ProtoMarshaler) ScopeLogsSize(sl ScopeLogs) int {
- return sl.orig.Size()
+func (e *ProtoMarshaler) ScopeLogsSize(ld ScopeLogs) int {
+ return ld.orig.SizeProto()
}
-func (e *ProtoMarshaler) LogRecordSize(lr LogRecord) int {
- return lr.orig.Size()
+func (e *ProtoMarshaler) LogRecordSize(ld LogRecord) int {
+ return ld.orig.SizeProto()
}
var _ Unmarshaler = (*ProtoUnmarshaler)(nil)
@@ -39,7 +35,10 @@ var _ Unmarshaler = (*ProtoUnmarshaler)(nil)
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
- pb := otlplogs.LogsData{}
- err := pb.Unmarshal(buf)
- return Logs(internal.LogsFromProto(pb)), err
+ ld := NewLogs()
+ err := ld.getOrig().UnmarshalProto(buf)
+ if err != nil {
+ return Logs{}, err
+ }
+ return ld, nil
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go
index 53a9d4179c4..5df0690872b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/severity_number.go
@@ -4,38 +4,38 @@
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
- otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// SeverityNumber represents severity number of a log record.
type SeverityNumber int32
const (
- SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED)
- SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE)
- SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2)
- SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3)
- SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4)
- SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG)
- SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2)
- SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3)
- SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4)
- SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO)
- SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2)
- SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3)
- SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4)
- SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN)
- SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2)
- SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3)
- SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4)
- SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR)
- SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2)
- SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3)
- SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4)
- SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL)
- SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2)
- SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3)
- SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4)
+ SeverityNumberUnspecified = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED)
+ SeverityNumberTrace = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE)
+ SeverityNumberTrace2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE2)
+ SeverityNumberTrace3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE3)
+ SeverityNumberTrace4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE4)
+ SeverityNumberDebug = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG)
+ SeverityNumberDebug2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG2)
+ SeverityNumberDebug3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG3)
+ SeverityNumberDebug4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG4)
+ SeverityNumberInfo = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO)
+ SeverityNumberInfo2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO2)
+ SeverityNumberInfo3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO3)
+ SeverityNumberInfo4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO4)
+ SeverityNumberWarn = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN)
+ SeverityNumberWarn2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN2)
+ SeverityNumberWarn3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN3)
+ SeverityNumberWarn4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN4)
+ SeverityNumberError = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR)
+ SeverityNumberError2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR2)
+ SeverityNumberError3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR3)
+ SeverityNumberError4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR4)
+ SeverityNumberFatal = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL)
+ SeverityNumberFatal2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL2)
+ SeverityNumberFatal3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL3)
+ SeverityNumberFatal4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL4)
)
// String returns the string representation of the SeverityNumber.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go
index 0ba2b28c29b..f2abb4f62b8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go
@@ -4,7 +4,7 @@
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// AggregationTemporality defines how a metric aggregator reports aggregated values.
@@ -13,11 +13,11 @@ type AggregationTemporality int32
const (
// AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
- AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
+ AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
// AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
- AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
+ AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
// AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
- AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
+ AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
)
// String returns the string representation of the AggregationTemporality.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go
index f8f6168b20b..e0681f6d17f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go
@@ -1,15 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -24,11 +22,11 @@ import (
// Must use NewExemplar function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Exemplar struct {
- orig *otlpmetrics.Exemplar
+ orig *internal.Exemplar
state *internal.State
}
-func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar {
+func newExemplar(orig *internal.Exemplar, state *internal.State) Exemplar {
return Exemplar{orig: orig, state: state}
}
@@ -37,8 +35,7 @@ func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExemplar() Exemplar {
- state := internal.StateMutable
- return newExemplar(&otlpmetrics.Exemplar{}, &state)
+ return newExemplar(internal.NewExemplar(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -50,8 +47,13 @@ func (ms Exemplar) MoveTo(dest Exemplar) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Exemplar{}
+ internal.DeleteExemplar(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// FilteredAttributes returns the FilteredAttributes associated with this Exemplar.
+func (ms Exemplar) FilteredAttributes() pcommon.Map {
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.FilteredAttributes, ms.state))
}
// Timestamp returns the timestamp associated with this Exemplar.
@@ -69,9 +71,9 @@ func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) {
// Calling this function on zero-initialized Exemplar will cause a panic.
func (ms Exemplar) ValueType() ExemplarValueType {
switch ms.orig.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
+ case *internal.Exemplar_AsDouble:
return ExemplarValueTypeDouble
- case *otlpmetrics.Exemplar_AsInt:
+ case *internal.Exemplar_AsInt:
return ExemplarValueTypeInt
}
return ExemplarValueTypeEmpty
@@ -85,9 +87,14 @@ func (ms Exemplar) DoubleValue() float64 {
// SetDoubleValue replaces the double associated with this Exemplar.
func (ms Exemplar) SetDoubleValue(v float64) {
ms.state.AssertMutable()
- ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{
- AsDouble: v,
+ var ov *internal.Exemplar_AsDouble
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Exemplar_AsDouble{}
+ } else {
+ ov = internal.ProtoPoolExemplar_AsDouble.Get().(*internal.Exemplar_AsDouble)
}
+ ov.AsDouble = v
+ ms.orig.Value = ov
}
// IntValue returns the int associated with this Exemplar.
@@ -98,14 +105,14 @@ func (ms Exemplar) IntValue() int64 {
// SetIntValue replaces the int associated with this Exemplar.
func (ms Exemplar) SetIntValue(v int64) {
ms.state.AssertMutable()
- ms.orig.Value = &otlpmetrics.Exemplar_AsInt{
- AsInt: v,
+ var ov *internal.Exemplar_AsInt
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Exemplar_AsInt{}
+ } else {
+ ov = internal.ProtoPoolExemplar_AsInt.Get().(*internal.Exemplar_AsInt)
}
-}
-
-// FilteredAttributes returns the FilteredAttributes associated with this Exemplar.
-func (ms Exemplar) FilteredAttributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.FilteredAttributes, ms.state))
+ ov.AsInt = v
+ ms.orig.Value = ov
}
// TraceID returns the traceid associated with this Exemplar.
@@ -116,7 +123,7 @@ func (ms Exemplar) TraceID() pcommon.TraceID {
// SetTraceID replaces the traceid associated with this Exemplar.
func (ms Exemplar) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Exemplar.
@@ -127,24 +134,11 @@ func (ms Exemplar) SpanID() pcommon.SpanID {
// SetSpanID replaces the spanid associated with this Exemplar.
func (ms Exemplar) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.SpanId = internal.SpanID(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Exemplar) CopyTo(dest Exemplar) {
dest.state.AssertMutable()
- copyOrigExemplar(dest.orig, ms.orig)
-}
-
-func copyOrigExemplar(dest, src *otlpmetrics.Exemplar) {
- dest.TimeUnixNano = src.TimeUnixNano
- switch t := src.Value.(type) {
- case *otlpmetrics.Exemplar_AsDouble:
- dest.Value = &otlpmetrics.Exemplar_AsDouble{AsDouble: t.AsDouble}
- case *otlpmetrics.Exemplar_AsInt:
- dest.Value = &otlpmetrics.Exemplar_AsInt{AsInt: t.AsInt}
- }
- dest.FilteredAttributes = internal.CopyOrigMap(dest.FilteredAttributes, src.FilteredAttributes)
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
+ internal.CopyExemplar(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
index 49dd8db254f..d6fb809cf7a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -10,7 +10,6 @@ import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExemplarSlice logically represents a slice of Exemplar.
@@ -21,20 +20,19 @@ import (
// Must use NewExemplarSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExemplarSlice struct {
- orig *[]otlpmetrics.Exemplar
+ orig *[]internal.Exemplar
state *internal.State
}
-func newExemplarSlice(orig *[]otlpmetrics.Exemplar, state *internal.State) ExemplarSlice {
+func newExemplarSlice(orig *[]internal.Exemplar, state *internal.State) ExemplarSlice {
return ExemplarSlice{orig: orig, state: state}
}
-// NewExemplarSlice creates a ExemplarSlice with 0 elements.
+// NewExemplarSlice creates a ExemplarSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExemplarSlice() ExemplarSlice {
- orig := []otlpmetrics.Exemplar(nil)
- state := internal.StateMutable
- return newExemplarSlice(&orig, &state)
+ orig := []internal.Exemplar(nil)
+ return newExemplarSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -90,7 +88,7 @@ func (es ExemplarSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]otlpmetrics.Exemplar, len(*es.orig), newCap)
+ newOrig := make([]internal.Exemplar, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -99,7 +97,7 @@ func (es ExemplarSlice) EnsureCapacity(newCap int) {
// It returns the newly added Exemplar.
func (es ExemplarSlice) AppendEmpty() Exemplar {
es.state.AssertMutable()
- *es.orig = append(*es.orig, otlpmetrics.Exemplar{})
+ *es.orig = append(*es.orig, internal.Exemplar{})
return es.At(es.Len() - 1)
}
@@ -128,6 +126,7 @@ func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteExemplar(&(*es.orig)[i], false)
continue
}
if newLen == i {
@@ -136,6 +135,7 @@ func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ (*es.orig)[i].Reset()
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -144,16 +144,8 @@ func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExemplarSlice) CopyTo(dest ExemplarSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigExemplarSlice(*dest.orig, *es.orig)
-}
-
-func copyOrigExemplarSlice(dest, src []otlpmetrics.Exemplar) []otlpmetrics.Exemplar {
- if cap(dest) < len(src) {
- dest = make([]otlpmetrics.Exemplar, len(src))
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigExemplar(&dest[i], &src[i])
+ if es.orig == dest.orig {
+ return
}
- return dest
+ *dest.orig = internal.CopyExemplarSlice(*dest.orig, *es.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go
index ceddeacc357..9970cb5878c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
@@ -20,11 +19,11 @@ import (
// Must use NewExponentialHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogram struct {
- orig *otlpmetrics.ExponentialHistogram
+ orig *internal.ExponentialHistogram
state *internal.State
}
-func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *internal.State) ExponentialHistogram {
+func newExponentialHistogram(orig *internal.ExponentialHistogram, state *internal.State) ExponentialHistogram {
return ExponentialHistogram{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *inte
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogram() ExponentialHistogram {
- state := internal.StateMutable
- return newExponentialHistogram(&otlpmetrics.ExponentialHistogram{}, &state)
+ return newExponentialHistogram(internal.NewExponentialHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,8 +44,13 @@ func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.ExponentialHistogram{}
+ internal.DeleteExponentialHistogram(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// DataPoints returns the DataPoints associated with this ExponentialHistogram.
+func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice {
+ return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this ExponentialHistogram.
@@ -58,21 +61,11 @@ func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality {
// SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram.
func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
- ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
-}
-
-// DataPoints returns the DataPoints associated with this ExponentialHistogram.
-func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice {
- return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
+ ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) {
dest.state.AssertMutable()
- copyOrigExponentialHistogram(dest.orig, ms.orig)
-}
-
-func copyOrigExponentialHistogram(dest, src *otlpmetrics.ExponentialHistogram) {
- dest.AggregationTemporality = src.AggregationTemporality
- dest.DataPoints = copyOrigExponentialHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
+ internal.CopyExponentialHistogram(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go
index 299d13f38ee..168fdebb97a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -23,11 +22,11 @@ import (
// Must use NewExponentialHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPoint struct {
- orig *otlpmetrics.ExponentialHistogramDataPoint
+ orig *internal.ExponentialHistogramDataPoint
state *internal.State
}
-func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint {
+func newExponentialHistogramDataPoint(orig *internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint {
return ExponentialHistogramDataPoint{orig: orig, state: state}
}
@@ -36,8 +35,7 @@ func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramData
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint {
- state := internal.StateMutable
- return newExponentialHistogramDataPoint(&otlpmetrics.ExponentialHistogramDataPoint{}, &state)
+ return newExponentialHistogramDataPoint(internal.NewExponentialHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -49,13 +47,13 @@ func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoin
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.ExponentialHistogramDataPoint{}
+ internal.DeleteExponentialHistogramDataPoint(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint.
@@ -91,6 +89,29 @@ func (ms ExponentialHistogramDataPoint) SetCount(v uint64) {
ms.orig.Count = v
}
+// Sum returns the sum associated with this ExponentialHistogramDataPoint.
+func (ms ExponentialHistogramDataPoint) Sum() float64 {
+ return ms.orig.GetSum()
+}
+
+// HasSum returns true if the ExponentialHistogramDataPoint contains a
+// Sum value otherwise.
+func (ms ExponentialHistogramDataPoint) HasSum() bool {
+ return ms.orig.Sum_ != nil
+}
+
+// SetSum replaces the sum associated with this ExponentialHistogramDataPoint.
+func (ms ExponentialHistogramDataPoint) SetSum(v float64) {
+ ms.state.AssertMutable()
+ ms.orig.Sum_ = &internal.ExponentialHistogramDataPoint_Sum{Sum: v}
+}
+
+// RemoveSum removes the sum associated with this ExponentialHistogramDataPoint.
+func (ms ExponentialHistogramDataPoint) RemoveSum() {
+ ms.state.AssertMutable()
+ ms.orig.Sum_ = nil
+}
+
// Scale returns the scale associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Scale() int32 {
return ms.orig.Scale
@@ -123,11 +144,6 @@ func (ms ExponentialHistogramDataPoint) Negative() ExponentialHistogramDataPoint
return newExponentialHistogramDataPointBuckets(&ms.orig.Negative, ms.state)
}
-// Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint.
-func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice {
- return newExemplarSlice(&ms.orig.Exemplars, ms.state)
-}
-
// Flags returns the flags associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
@@ -139,27 +155,9 @@ func (ms ExponentialHistogramDataPoint) SetFlags(v DataPointFlags) {
ms.orig.Flags = uint32(v)
}
-// Sum returns the sum associated with this ExponentialHistogramDataPoint.
-func (ms ExponentialHistogramDataPoint) Sum() float64 {
- return ms.orig.GetSum()
-}
-
-// HasSum returns true if the ExponentialHistogramDataPoint contains a
-// Sum value, false otherwise.
-func (ms ExponentialHistogramDataPoint) HasSum() bool {
- return ms.orig.Sum_ != nil
-}
-
-// SetSum replaces the sum associated with this ExponentialHistogramDataPoint.
-func (ms ExponentialHistogramDataPoint) SetSum(v float64) {
- ms.state.AssertMutable()
- ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: v}
-}
-
-// RemoveSum removes the sum associated with this ExponentialHistogramDataPoint.
-func (ms ExponentialHistogramDataPoint) RemoveSum() {
- ms.state.AssertMutable()
- ms.orig.Sum_ = nil
+// Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint.
+func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice {
+ return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Min returns the min associated with this ExponentialHistogramDataPoint.
@@ -168,7 +166,7 @@ func (ms ExponentialHistogramDataPoint) Min() float64 {
}
// HasMin returns true if the ExponentialHistogramDataPoint contains a
-// Min value, false otherwise.
+// Min value otherwise.
func (ms ExponentialHistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
@@ -176,7 +174,7 @@ func (ms ExponentialHistogramDataPoint) HasMin() bool {
// SetMin replaces the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
- ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: v}
+ ms.orig.Min_ = &internal.ExponentialHistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this ExponentialHistogramDataPoint.
@@ -191,7 +189,7 @@ func (ms ExponentialHistogramDataPoint) Max() float64 {
}
// HasMax returns true if the ExponentialHistogramDataPoint contains a
-// Max value, false otherwise.
+// Max value otherwise.
func (ms ExponentialHistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
@@ -199,7 +197,7 @@ func (ms ExponentialHistogramDataPoint) HasMax() bool {
// SetMax replaces the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
- ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: v}
+ ms.orig.Max_ = &internal.ExponentialHistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this ExponentialHistogramDataPoint.
@@ -222,34 +220,5 @@ func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) {
dest.state.AssertMutable()
- copyOrigExponentialHistogramDataPoint(dest.orig, ms.orig)
-}
-
-func copyOrigExponentialHistogramDataPoint(dest, src *otlpmetrics.ExponentialHistogramDataPoint) {
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.StartTimeUnixNano = src.StartTimeUnixNano
- dest.TimeUnixNano = src.TimeUnixNano
- dest.Count = src.Count
- dest.Scale = src.Scale
- dest.ZeroCount = src.ZeroCount
- copyOrigExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive)
- copyOrigExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative)
- dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
- dest.Flags = src.Flags
- if src.Sum_ == nil {
- dest.Sum_ = nil
- } else {
- dest.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: src.GetSum()}
- }
- if src.Min_ == nil {
- dest.Min_ = nil
- } else {
- dest.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: src.GetMin()}
- }
- if src.Max_ == nil {
- dest.Max_ = nil
- } else {
- dest.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: src.GetMax()}
- }
- dest.ZeroThreshold = src.ZeroThreshold
+ internal.CopyExponentialHistogramDataPoint(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go
index b4d0666ae54..5864cd1a3d8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewExponentialHistogramDataPointBuckets function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointBuckets struct {
- orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets
+ orig *internal.ExponentialHistogramDataPointBuckets
state *internal.State
}
-func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, state *internal.State) ExponentialHistogramDataPointBuckets {
+func newExponentialHistogramDataPointBuckets(orig *internal.ExponentialHistogramDataPointBuckets, state *internal.State) ExponentialHistogramDataPointBuckets {
return ExponentialHistogramDataPointBuckets{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistog
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets {
- state := internal.StateMutable
- return newExponentialHistogramDataPointBuckets(&otlpmetrics.ExponentialHistogramDataPoint_Buckets{}, &state)
+ return newExponentialHistogramDataPointBuckets(internal.NewExponentialHistogramDataPointBuckets(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,8 +44,8 @@ func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramD
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.ExponentialHistogramDataPoint_Buckets{}
+ internal.DeleteExponentialHistogramDataPointBuckets(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Offset returns the offset associated with this ExponentialHistogramDataPointBuckets.
@@ -61,18 +59,13 @@ func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) {
ms.orig.Offset = v
}
-// BucketCounts returns the bucketcounts associated with this ExponentialHistogramDataPointBuckets.
+// BucketCounts returns the BucketCounts associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice {
- return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state))
+ return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) {
dest.state.AssertMutable()
- copyOrigExponentialHistogramDataPointBuckets(dest.orig, ms.orig)
-}
-
-func copyOrigExponentialHistogramDataPointBuckets(dest, src *otlpmetrics.ExponentialHistogramDataPoint_Buckets) {
- dest.Offset = src.Offset
- dest.BucketCounts = internal.CopyOrigUInt64Slice(dest.BucketCounts, src.BucketCounts)
+ internal.CopyExponentialHistogramDataPointBuckets(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
index 41c194d7729..c672d5cedaa 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint.
@@ -22,20 +21,19 @@ import (
// Must use NewExponentialHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointSlice struct {
- orig *[]*otlpmetrics.ExponentialHistogramDataPoint
+ orig *[]*internal.ExponentialHistogramDataPoint
state *internal.State
}
-func newExponentialHistogramDataPointSlice(orig *[]*otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice {
+func newExponentialHistogramDataPointSlice(orig *[]*internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice {
return ExponentialHistogramDataPointSlice{orig: orig, state: state}
}
-// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSlice with 0 elements.
+// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice {
- orig := []*otlpmetrics.ExponentialHistogramDataPoint(nil)
- state := internal.StateMutable
- return newExponentialHistogramDataPointSlice(&orig, &state)
+ orig := []*internal.ExponentialHistogramDataPoint(nil)
+ return newExponentialHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.ExponentialHistogramDataPoint, len(*es.orig), newCap)
+ newOrig := make([]*internal.ExponentialHistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) {
// It returns the newly added ExponentialHistogramDataPoint.
func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.ExponentialHistogramDataPoint{})
+ *es.orig = append(*es.orig, internal.NewExponentialHistogramDataPoint())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogra
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteExponentialHistogramDataPoint((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogra
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogra
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDataPointSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigExponentialHistogramDataPointSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyExponentialHistogramDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the
@@ -155,18 +161,3 @@ func (es ExponentialHistogramDataPointSlice) Sort(less func(a, b ExponentialHist
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigExponentialHistogramDataPointSlice(dest, src []*otlpmetrics.ExponentialHistogramDataPoint) []*otlpmetrics.ExponentialHistogramDataPoint {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.ExponentialHistogramDataPoint, len(src))
- data := make([]otlpmetrics.ExponentialHistogramDataPoint, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigExponentialHistogramDataPoint(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go
index 46ca09fd1b0..c05f403a6bc 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
@@ -19,11 +18,11 @@ import (
// Must use NewGauge function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Gauge struct {
- orig *otlpmetrics.Gauge
+ orig *internal.Gauge
state *internal.State
}
-func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge {
+func newGauge(orig *internal.Gauge, state *internal.State) Gauge {
return Gauge{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewGauge() Gauge {
- state := internal.StateMutable
- return newGauge(&otlpmetrics.Gauge{}, &state)
+ return newGauge(internal.NewGauge(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,8 @@ func (ms Gauge) MoveTo(dest Gauge) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Gauge{}
+ internal.DeleteGauge(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Gauge.
@@ -57,9 +55,5 @@ func (ms Gauge) DataPoints() NumberDataPointSlice {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Gauge) CopyTo(dest Gauge) {
dest.state.AssertMutable()
- copyOrigGauge(dest.orig, ms.orig)
-}
-
-func copyOrigGauge(dest, src *otlpmetrics.Gauge) {
- dest.DataPoints = copyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints)
+ internal.CopyGauge(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go
index 8926bf69837..924cd4f1d70 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
@@ -19,11 +18,11 @@ import (
// Must use NewHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Histogram struct {
- orig *otlpmetrics.Histogram
+ orig *internal.Histogram
state *internal.State
}
-func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram {
+func newHistogram(orig *internal.Histogram, state *internal.State) Histogram {
return Histogram{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogram() Histogram {
- state := internal.StateMutable
- return newHistogram(&otlpmetrics.Histogram{}, &state)
+ return newHistogram(internal.NewHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,13 @@ func (ms Histogram) MoveTo(dest Histogram) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Histogram{}
+ internal.DeleteHistogram(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// DataPoints returns the DataPoints associated with this Histogram.
+func (ms Histogram) DataPoints() HistogramDataPointSlice {
+ return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Histogram.
@@ -57,21 +60,11 @@ func (ms Histogram) AggregationTemporality() AggregationTemporality {
// SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram.
func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
- ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
-}
-
-// DataPoints returns the DataPoints associated with this Histogram.
-func (ms Histogram) DataPoints() HistogramDataPointSlice {
- return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
+ ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Histogram) CopyTo(dest Histogram) {
dest.state.AssertMutable()
- copyOrigHistogram(dest.orig, ms.orig)
-}
-
-func copyOrigHistogram(dest, src *otlpmetrics.Histogram) {
- dest.AggregationTemporality = src.AggregationTemporality
- dest.DataPoints = copyOrigHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
+ internal.CopyHistogram(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go
index 0f864d2e49e..3ed9090a5c1 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPoint struct {
- orig *otlpmetrics.HistogramDataPoint
+ orig *internal.HistogramDataPoint
state *internal.State
}
-func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPoint {
+func newHistogramDataPoint(orig *internal.HistogramDataPoint, state *internal.State) HistogramDataPoint {
return HistogramDataPoint{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogramDataPoint() HistogramDataPoint {
- state := internal.StateMutable
- return newHistogramDataPoint(&otlpmetrics.HistogramDataPoint{}, &state)
+ return newHistogramDataPoint(internal.NewHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,13 @@ func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.HistogramDataPoint{}
+ internal.DeleteHistogramDataPoint(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this HistogramDataPoint.
@@ -88,14 +86,37 @@ func (ms HistogramDataPoint) SetCount(v uint64) {
ms.orig.Count = v
}
-// BucketCounts returns the bucketcounts associated with this HistogramDataPoint.
+// Sum returns the sum associated with this HistogramDataPoint.
+func (ms HistogramDataPoint) Sum() float64 {
+ return ms.orig.GetSum()
+}
+
+// HasSum returns true if the HistogramDataPoint contains a
+// Sum value otherwise.
+func (ms HistogramDataPoint) HasSum() bool {
+ return ms.orig.Sum_ != nil
+}
+
+// SetSum replaces the sum associated with this HistogramDataPoint.
+func (ms HistogramDataPoint) SetSum(v float64) {
+ ms.state.AssertMutable()
+ ms.orig.Sum_ = &internal.HistogramDataPoint_Sum{Sum: v}
+}
+
+// RemoveSum removes the sum associated with this HistogramDataPoint.
+func (ms HistogramDataPoint) RemoveSum() {
+ ms.state.AssertMutable()
+ ms.orig.Sum_ = nil
+}
+
+// BucketCounts returns the BucketCounts associated with this HistogramDataPoint.
func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice {
- return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state))
+ return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
-// ExplicitBounds returns the explicitbounds associated with this HistogramDataPoint.
+// ExplicitBounds returns the ExplicitBounds associated with this HistogramDataPoint.
func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice {
- return pcommon.Float64Slice(internal.NewFloat64Slice(&ms.orig.ExplicitBounds, ms.state))
+ return pcommon.Float64Slice(internal.NewFloat64SliceWrapper(&ms.orig.ExplicitBounds, ms.state))
}
// Exemplars returns the Exemplars associated with this HistogramDataPoint.
@@ -114,36 +135,13 @@ func (ms HistogramDataPoint) SetFlags(v DataPointFlags) {
ms.orig.Flags = uint32(v)
}
-// Sum returns the sum associated with this HistogramDataPoint.
-func (ms HistogramDataPoint) Sum() float64 {
- return ms.orig.GetSum()
-}
-
-// HasSum returns true if the HistogramDataPoint contains a
-// Sum value, false otherwise.
-func (ms HistogramDataPoint) HasSum() bool {
- return ms.orig.Sum_ != nil
-}
-
-// SetSum replaces the sum associated with this HistogramDataPoint.
-func (ms HistogramDataPoint) SetSum(v float64) {
- ms.state.AssertMutable()
- ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: v}
-}
-
-// RemoveSum removes the sum associated with this HistogramDataPoint.
-func (ms HistogramDataPoint) RemoveSum() {
- ms.state.AssertMutable()
- ms.orig.Sum_ = nil
-}
-
// Min returns the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Min() float64 {
return ms.orig.GetMin()
}
// HasMin returns true if the HistogramDataPoint contains a
-// Min value, false otherwise.
+// Min value otherwise.
func (ms HistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
@@ -151,7 +149,7 @@ func (ms HistogramDataPoint) HasMin() bool {
// SetMin replaces the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
- ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: v}
+ ms.orig.Min_ = &internal.HistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this HistogramDataPoint.
@@ -166,7 +164,7 @@ func (ms HistogramDataPoint) Max() float64 {
}
// HasMax returns true if the HistogramDataPoint contains a
-// Max value, false otherwise.
+// Max value otherwise.
func (ms HistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
@@ -174,7 +172,7 @@ func (ms HistogramDataPoint) HasMax() bool {
// SetMax replaces the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
- ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: v}
+ ms.orig.Max_ = &internal.HistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this HistogramDataPoint.
@@ -186,31 +184,5 @@ func (ms HistogramDataPoint) RemoveMax() {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) {
dest.state.AssertMutable()
- copyOrigHistogramDataPoint(dest.orig, ms.orig)
-}
-
-func copyOrigHistogramDataPoint(dest, src *otlpmetrics.HistogramDataPoint) {
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.StartTimeUnixNano = src.StartTimeUnixNano
- dest.TimeUnixNano = src.TimeUnixNano
- dest.Count = src.Count
- dest.BucketCounts = internal.CopyOrigUInt64Slice(dest.BucketCounts, src.BucketCounts)
- dest.ExplicitBounds = internal.CopyOrigFloat64Slice(dest.ExplicitBounds, src.ExplicitBounds)
- dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
- dest.Flags = src.Flags
- if src.Sum_ == nil {
- dest.Sum_ = nil
- } else {
- dest.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: src.GetSum()}
- }
- if src.Min_ == nil {
- dest.Min_ = nil
- } else {
- dest.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: src.GetMin()}
- }
- if src.Max_ == nil {
- dest.Max_ = nil
- } else {
- dest.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: src.GetMax()}
- }
+ internal.CopyHistogramDataPoint(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
index 0268cc4a35e..296c372b433 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// HistogramDataPointSlice logically represents a slice of HistogramDataPoint.
@@ -22,20 +21,19 @@ import (
// Must use NewHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPointSlice struct {
- orig *[]*otlpmetrics.HistogramDataPoint
+ orig *[]*internal.HistogramDataPoint
state *internal.State
}
-func newHistogramDataPointSlice(orig *[]*otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPointSlice {
+func newHistogramDataPointSlice(orig *[]*internal.HistogramDataPoint, state *internal.State) HistogramDataPointSlice {
return HistogramDataPointSlice{orig: orig, state: state}
}
-// NewHistogramDataPointSlice creates a HistogramDataPointSlice with 0 elements.
+// NewHistogramDataPointSlice creates a HistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewHistogramDataPointSlice() HistogramDataPointSlice {
- orig := []*otlpmetrics.HistogramDataPoint(nil)
- state := internal.StateMutable
- return newHistogramDataPointSlice(&orig, &state)
+ orig := []*internal.HistogramDataPoint(nil)
+ return newHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es HistogramDataPointSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.HistogramDataPoint, len(*es.orig), newCap)
+ newOrig := make([]*internal.HistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es HistogramDataPointSlice) EnsureCapacity(newCap int) {
// It returns the newly added HistogramDataPoint.
func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.HistogramDataPoint{})
+ *es.orig = append(*es.orig, internal.NewHistogramDataPoint())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteHistogramDataPoint((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigHistogramDataPointSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyHistogramDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the
@@ -155,18 +161,3 @@ func (es HistogramDataPointSlice) Sort(less func(a, b HistogramDataPoint) bool)
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigHistogramDataPointSlice(dest, src []*otlpmetrics.HistogramDataPoint) []*otlpmetrics.HistogramDataPoint {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.HistogramDataPoint, len(src))
- data := make([]otlpmetrics.HistogramDataPoint, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigHistogramDataPoint(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go
index 70b282a46da..04eebe8e09a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -21,11 +20,11 @@ import (
// Must use NewMetric function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Metric struct {
- orig *otlpmetrics.Metric
+ orig *internal.Metric
state *internal.State
}
-func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric {
+func newMetric(orig *internal.Metric, state *internal.State) Metric {
return Metric{orig: orig, state: state}
}
@@ -34,8 +33,7 @@ func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMetric() Metric {
- state := internal.StateMutable
- return newMetric(&otlpmetrics.Metric{}, &state)
+ return newMetric(internal.NewMetric(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -47,8 +45,8 @@ func (ms Metric) MoveTo(dest Metric) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Metric{}
+ internal.DeleteMetric(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Name returns the name associated with this Metric.
@@ -84,24 +82,19 @@ func (ms Metric) SetUnit(v string) {
ms.orig.Unit = v
}
-// Metadata returns the Metadata associated with this Metric.
-func (ms Metric) Metadata() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Metadata, ms.state))
-}
-
// Type returns the type of the data for this Metric.
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Type() MetricType {
switch ms.orig.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
+ case *internal.Metric_Gauge:
return MetricTypeGauge
- case *otlpmetrics.Metric_Sum:
+ case *internal.Metric_Sum:
return MetricTypeSum
- case *otlpmetrics.Metric_Histogram:
+ case *internal.Metric_Histogram:
return MetricTypeHistogram
- case *otlpmetrics.Metric_ExponentialHistogram:
+ case *internal.Metric_ExponentialHistogram:
return MetricTypeExponentialHistogram
- case *otlpmetrics.Metric_Summary:
+ case *internal.Metric_Summary:
return MetricTypeSummary
}
return MetricTypeEmpty
@@ -114,7 +107,7 @@ func (ms Metric) Type() MetricType {
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Gauge() Gauge {
- v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Gauge)
+ v, ok := ms.orig.GetData().(*internal.Metric_Gauge)
if !ok {
return Gauge{}
}
@@ -128,9 +121,15 @@ func (ms Metric) Gauge() Gauge {
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyGauge() Gauge {
ms.state.AssertMutable()
- val := &otlpmetrics.Gauge{}
- ms.orig.Data = &otlpmetrics.Metric_Gauge{Gauge: val}
- return newGauge(val, ms.state)
+ var ov *internal.Metric_Gauge
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Metric_Gauge{}
+ } else {
+ ov = internal.ProtoPoolMetric_Gauge.Get().(*internal.Metric_Gauge)
+ }
+ ov.Gauge = internal.NewGauge()
+ ms.orig.Data = ov
+ return newGauge(ov.Gauge, ms.state)
}
// Sum returns the sum associated with this Metric.
@@ -140,7 +139,7 @@ func (ms Metric) SetEmptyGauge() Gauge {
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Sum() Sum {
- v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Sum)
+ v, ok := ms.orig.GetData().(*internal.Metric_Sum)
if !ok {
return Sum{}
}
@@ -154,9 +153,15 @@ func (ms Metric) Sum() Sum {
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySum() Sum {
ms.state.AssertMutable()
- val := &otlpmetrics.Sum{}
- ms.orig.Data = &otlpmetrics.Metric_Sum{Sum: val}
- return newSum(val, ms.state)
+ var ov *internal.Metric_Sum
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Metric_Sum{}
+ } else {
+ ov = internal.ProtoPoolMetric_Sum.Get().(*internal.Metric_Sum)
+ }
+ ov.Sum = internal.NewSum()
+ ms.orig.Data = ov
+ return newSum(ov.Sum, ms.state)
}
// Histogram returns the histogram associated with this Metric.
@@ -166,7 +171,7 @@ func (ms Metric) SetEmptySum() Sum {
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Histogram() Histogram {
- v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Histogram)
+ v, ok := ms.orig.GetData().(*internal.Metric_Histogram)
if !ok {
return Histogram{}
}
@@ -180,9 +185,15 @@ func (ms Metric) Histogram() Histogram {
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyHistogram() Histogram {
ms.state.AssertMutable()
- val := &otlpmetrics.Histogram{}
- ms.orig.Data = &otlpmetrics.Metric_Histogram{Histogram: val}
- return newHistogram(val, ms.state)
+ var ov *internal.Metric_Histogram
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Metric_Histogram{}
+ } else {
+ ov = internal.ProtoPoolMetric_Histogram.Get().(*internal.Metric_Histogram)
+ }
+ ov.Histogram = internal.NewHistogram()
+ ms.orig.Data = ov
+ return newHistogram(ov.Histogram, ms.state)
}
// ExponentialHistogram returns the exponentialhistogram associated with this Metric.
@@ -192,7 +203,7 @@ func (ms Metric) SetEmptyHistogram() Histogram {
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) ExponentialHistogram() ExponentialHistogram {
- v, ok := ms.orig.GetData().(*otlpmetrics.Metric_ExponentialHistogram)
+ v, ok := ms.orig.GetData().(*internal.Metric_ExponentialHistogram)
if !ok {
return ExponentialHistogram{}
}
@@ -206,9 +217,15 @@ func (ms Metric) ExponentialHistogram() ExponentialHistogram {
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram {
ms.state.AssertMutable()
- val := &otlpmetrics.ExponentialHistogram{}
- ms.orig.Data = &otlpmetrics.Metric_ExponentialHistogram{ExponentialHistogram: val}
- return newExponentialHistogram(val, ms.state)
+ var ov *internal.Metric_ExponentialHistogram
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Metric_ExponentialHistogram{}
+ } else {
+ ov = internal.ProtoPoolMetric_ExponentialHistogram.Get().(*internal.Metric_ExponentialHistogram)
+ }
+ ov.ExponentialHistogram = internal.NewExponentialHistogram()
+ ms.orig.Data = ov
+ return newExponentialHistogram(ov.ExponentialHistogram, ms.state)
}
// Summary returns the summary associated with this Metric.
@@ -218,7 +235,7 @@ func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram {
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Summary() Summary {
- v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Summary)
+ v, ok := ms.orig.GetData().(*internal.Metric_Summary)
if !ok {
return Summary{}
}
@@ -232,52 +249,24 @@ func (ms Metric) Summary() Summary {
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySummary() Summary {
ms.state.AssertMutable()
- val := &otlpmetrics.Summary{}
- ms.orig.Data = &otlpmetrics.Metric_Summary{Summary: val}
- return newSummary(val, ms.state)
+ var ov *internal.Metric_Summary
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.Metric_Summary{}
+ } else {
+ ov = internal.ProtoPoolMetric_Summary.Get().(*internal.Metric_Summary)
+ }
+ ov.Summary = internal.NewSummary()
+ ms.orig.Data = ov
+ return newSummary(ov.Summary, ms.state)
+}
+
+// Metadata returns the Metadata associated with this Metric.
+func (ms Metric) Metadata() pcommon.Map {
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Metadata, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Metric) CopyTo(dest Metric) {
dest.state.AssertMutable()
- copyOrigMetric(dest.orig, ms.orig)
-}
-
-func copyOrigMetric(dest, src *otlpmetrics.Metric) {
- dest.Name = src.Name
- dest.Description = src.Description
- dest.Unit = src.Unit
- dest.Metadata = internal.CopyOrigMap(dest.Metadata, src.Metadata)
- switch t := src.Data.(type) {
- case *otlpmetrics.Metric_Gauge:
- gauge := &otlpmetrics.Gauge{}
- copyOrigGauge(gauge, t.Gauge)
- dest.Data = &otlpmetrics.Metric_Gauge{
- Gauge: gauge,
- }
- case *otlpmetrics.Metric_Sum:
- sum := &otlpmetrics.Sum{}
- copyOrigSum(sum, t.Sum)
- dest.Data = &otlpmetrics.Metric_Sum{
- Sum: sum,
- }
- case *otlpmetrics.Metric_Histogram:
- histogram := &otlpmetrics.Histogram{}
- copyOrigHistogram(histogram, t.Histogram)
- dest.Data = &otlpmetrics.Metric_Histogram{
- Histogram: histogram,
- }
- case *otlpmetrics.Metric_ExponentialHistogram:
- exponentialhistogram := &otlpmetrics.ExponentialHistogram{}
- copyOrigExponentialHistogram(exponentialhistogram, t.ExponentialHistogram)
- dest.Data = &otlpmetrics.Metric_ExponentialHistogram{
- ExponentialHistogram: exponentialhistogram,
- }
- case *otlpmetrics.Metric_Summary:
- summary := &otlpmetrics.Summary{}
- copyOrigSummary(summary, t.Summary)
- dest.Data = &otlpmetrics.Metric_Summary{
- Summary: summary,
- }
- }
+ internal.CopyMetric(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go
new file mode 100644
index 00000000000..9d873d5ee0b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metrics.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pmetric
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Metrics is the top-level struct that is propagated through the metrics pipeline.
+// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewMetrics function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Metrics internal.MetricsWrapper
+
+func newMetrics(orig *internal.ExportMetricsServiceRequest, state *internal.State) Metrics {
+ return Metrics(internal.NewMetricsWrapper(orig, state))
+}
+
+// NewMetrics creates a new empty Metrics.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewMetrics() Metrics {
+ return newMetrics(internal.NewExportMetricsServiceRequest(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Metrics) MoveTo(dest Metrics) {
+ ms.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ internal.DeleteExportMetricsServiceRequest(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
+}
+
+// ResourceMetrics returns the ResourceMetrics associated with this Metrics.
+func (ms Metrics) ResourceMetrics() ResourceMetricsSlice {
+ return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, ms.getState())
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Metrics) CopyTo(dest Metrics) {
+ dest.getState().AssertMutable()
+ internal.CopyExportMetricsServiceRequest(dest.getOrig(), ms.getOrig())
+}
+
+func (ms Metrics) getOrig() *internal.ExportMetricsServiceRequest {
+ return internal.GetMetricsOrig(internal.MetricsWrapper(ms))
+}
+
+func (ms Metrics) getState() *internal.State {
+ return internal.GetMetricsState(internal.MetricsWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
index f57ae001705..3c8d26481e8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// MetricSlice logically represents a slice of Metric.
@@ -22,20 +21,19 @@ import (
// Must use NewMetricSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type MetricSlice struct {
- orig *[]*otlpmetrics.Metric
+ orig *[]*internal.Metric
state *internal.State
}
-func newMetricSlice(orig *[]*otlpmetrics.Metric, state *internal.State) MetricSlice {
+func newMetricSlice(orig *[]*internal.Metric, state *internal.State) MetricSlice {
return MetricSlice{orig: orig, state: state}
}
-// NewMetricSlice creates a MetricSlice with 0 elements.
+// NewMetricSlice creates a MetricSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewMetricSlice() MetricSlice {
- orig := []*otlpmetrics.Metric(nil)
- state := internal.StateMutable
- return newMetricSlice(&orig, &state)
+ orig := []*internal.Metric(nil)
+ return newMetricSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es MetricSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.Metric, len(*es.orig), newCap)
+ newOrig := make([]*internal.Metric, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es MetricSlice) EnsureCapacity(newCap int) {
// It returns the newly added Metric.
func (es MetricSlice) AppendEmpty() Metric {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.Metric{})
+ *es.orig = append(*es.orig, internal.NewMetric())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es MetricSlice) RemoveIf(f func(Metric) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteMetric((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es MetricSlice) RemoveIf(f func(Metric) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es MetricSlice) RemoveIf(f func(Metric) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es MetricSlice) CopyTo(dest MetricSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigMetricSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyMetricPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Metric elements within MetricSlice given the
@@ -155,18 +161,3 @@ func (es MetricSlice) Sort(less func(a, b Metric) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigMetricSlice(dest, src []*otlpmetrics.Metric) []*otlpmetrics.Metric {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.Metric, len(src))
- data := make([]otlpmetrics.Metric, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigMetric(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go
index d1dbc66c452..e61eba8a8c5 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewNumberDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPoint struct {
- orig *otlpmetrics.NumberDataPoint
+ orig *internal.NumberDataPoint
state *internal.State
}
-func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPoint {
+func newNumberDataPoint(orig *internal.NumberDataPoint, state *internal.State) NumberDataPoint {
return NumberDataPoint{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewNumberDataPoint() NumberDataPoint {
- state := internal.StateMutable
- return newNumberDataPoint(&otlpmetrics.NumberDataPoint{}, &state)
+ return newNumberDataPoint(internal.NewNumberDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,13 @@ func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.NumberDataPoint{}
+ internal.DeleteNumberDataPoint(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this NumberDataPoint.
func (ms NumberDataPoint) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this NumberDataPoint.
@@ -81,9 +79,9 @@ func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) {
// Calling this function on zero-initialized NumberDataPoint will cause a panic.
func (ms NumberDataPoint) ValueType() NumberDataPointValueType {
switch ms.orig.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
+ case *internal.NumberDataPoint_AsDouble:
return NumberDataPointValueTypeDouble
- case *otlpmetrics.NumberDataPoint_AsInt:
+ case *internal.NumberDataPoint_AsInt:
return NumberDataPointValueTypeInt
}
return NumberDataPointValueTypeEmpty
@@ -97,9 +95,14 @@ func (ms NumberDataPoint) DoubleValue() float64 {
// SetDoubleValue replaces the double associated with this NumberDataPoint.
func (ms NumberDataPoint) SetDoubleValue(v float64) {
ms.state.AssertMutable()
- ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{
- AsDouble: v,
+ var ov *internal.NumberDataPoint_AsDouble
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.NumberDataPoint_AsDouble{}
+ } else {
+ ov = internal.ProtoPoolNumberDataPoint_AsDouble.Get().(*internal.NumberDataPoint_AsDouble)
}
+ ov.AsDouble = v
+ ms.orig.Value = ov
}
// IntValue returns the int associated with this NumberDataPoint.
@@ -110,9 +113,14 @@ func (ms NumberDataPoint) IntValue() int64 {
// SetIntValue replaces the int associated with this NumberDataPoint.
func (ms NumberDataPoint) SetIntValue(v int64) {
ms.state.AssertMutable()
- ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{
- AsInt: v,
+ var ov *internal.NumberDataPoint_AsInt
+ if !internal.UseProtoPooling.IsEnabled() {
+ ov = &internal.NumberDataPoint_AsInt{}
+ } else {
+ ov = internal.ProtoPoolNumberDataPoint_AsInt.Get().(*internal.NumberDataPoint_AsInt)
}
+ ov.AsInt = v
+ ms.orig.Value = ov
}
// Exemplars returns the Exemplars associated with this NumberDataPoint.
@@ -134,19 +142,5 @@ func (ms NumberDataPoint) SetFlags(v DataPointFlags) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) {
dest.state.AssertMutable()
- copyOrigNumberDataPoint(dest.orig, ms.orig)
-}
-
-func copyOrigNumberDataPoint(dest, src *otlpmetrics.NumberDataPoint) {
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.StartTimeUnixNano = src.StartTimeUnixNano
- dest.TimeUnixNano = src.TimeUnixNano
- switch t := src.Value.(type) {
- case *otlpmetrics.NumberDataPoint_AsDouble:
- dest.Value = &otlpmetrics.NumberDataPoint_AsDouble{AsDouble: t.AsDouble}
- case *otlpmetrics.NumberDataPoint_AsInt:
- dest.Value = &otlpmetrics.NumberDataPoint_AsInt{AsInt: t.AsInt}
- }
- dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars)
- dest.Flags = src.Flags
+ internal.CopyNumberDataPoint(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
index ff7f0f9c958..0daafa7e7a4 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// NumberDataPointSlice logically represents a slice of NumberDataPoint.
@@ -22,20 +21,19 @@ import (
// Must use NewNumberDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPointSlice struct {
- orig *[]*otlpmetrics.NumberDataPoint
+ orig *[]*internal.NumberDataPoint
state *internal.State
}
-func newNumberDataPointSlice(orig *[]*otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPointSlice {
+func newNumberDataPointSlice(orig *[]*internal.NumberDataPoint, state *internal.State) NumberDataPointSlice {
return NumberDataPointSlice{orig: orig, state: state}
}
-// NewNumberDataPointSlice creates a NumberDataPointSlice with 0 elements.
+// NewNumberDataPointSlice creates a NumberDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewNumberDataPointSlice() NumberDataPointSlice {
- orig := []*otlpmetrics.NumberDataPoint(nil)
- state := internal.StateMutable
- return newNumberDataPointSlice(&orig, &state)
+ orig := []*internal.NumberDataPoint(nil)
+ return newNumberDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es NumberDataPointSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.NumberDataPoint, len(*es.orig), newCap)
+ newOrig := make([]*internal.NumberDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es NumberDataPointSlice) EnsureCapacity(newCap int) {
// It returns the newly added NumberDataPoint.
func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.NumberDataPoint{})
+ *es.orig = append(*es.orig, internal.NewNumberDataPoint())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteNumberDataPoint((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigNumberDataPointSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyNumberDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the
@@ -155,18 +161,3 @@ func (es NumberDataPointSlice) Sort(less func(a, b NumberDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigNumberDataPointSlice(dest, src []*otlpmetrics.NumberDataPoint) []*otlpmetrics.NumberDataPoint {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.NumberDataPoint, len(src))
- data := make([]otlpmetrics.NumberDataPoint, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigNumberDataPoint(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go
index 23803d13e2d..a7c3dbc061e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewResourceMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetrics struct {
- orig *otlpmetrics.ResourceMetrics
+ orig *internal.ResourceMetrics
state *internal.State
}
-func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetrics {
+func newResourceMetrics(orig *internal.ResourceMetrics, state *internal.State) ResourceMetrics {
return ResourceMetrics{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceMetrics() ResourceMetrics {
- state := internal.StateMutable
- return newResourceMetrics(&otlpmetrics.ResourceMetrics{}, &state)
+ return newResourceMetrics(internal.NewResourceMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.ResourceMetrics{}
+ internal.DeleteResourceMetrics(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceMetrics.
func (ms ResourceMetrics) Resource() pcommon.Resource {
- return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state))
+ return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
+}
+
+// ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics.
+func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice {
+ return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceMetrics.
@@ -66,19 +69,8 @@ func (ms ResourceMetrics) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics.
-func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice {
- return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) {
dest.state.AssertMutable()
- copyOrigResourceMetrics(dest.orig, ms.orig)
-}
-
-func copyOrigResourceMetrics(dest, src *otlpmetrics.ResourceMetrics) {
- internal.CopyOrigResource(&dest.Resource, &src.Resource)
- dest.SchemaUrl = src.SchemaUrl
- dest.ScopeMetrics = copyOrigScopeMetricsSlice(dest.ScopeMetrics, src.ScopeMetrics)
+ internal.CopyResourceMetrics(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
index b46d0c3e279..d434ad3d24e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ResourceMetricsSlice logically represents a slice of ResourceMetrics.
@@ -22,20 +21,19 @@ import (
// Must use NewResourceMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetricsSlice struct {
- orig *[]*otlpmetrics.ResourceMetrics
+ orig *[]*internal.ResourceMetrics
state *internal.State
}
-func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetricsSlice {
+func newResourceMetricsSlice(orig *[]*internal.ResourceMetrics, state *internal.State) ResourceMetricsSlice {
return ResourceMetricsSlice{orig: orig, state: state}
}
-// NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements.
+// NewResourceMetricsSlice creates a ResourceMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceMetricsSlice() ResourceMetricsSlice {
- orig := []*otlpmetrics.ResourceMetrics(nil)
- state := internal.StateMutable
- return newResourceMetricsSlice(&orig, &state)
+ orig := []*internal.ResourceMetrics(nil)
+ return newResourceMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ResourceMetricsSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.ResourceMetrics, len(*es.orig), newCap)
+ newOrig := make([]*internal.ResourceMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ResourceMetricsSlice) EnsureCapacity(newCap int) {
// It returns the newly added ResourceMetrics.
func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.ResourceMetrics{})
+ *es.orig = append(*es.orig, internal.NewResourceMetrics())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteResourceMetrics((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigResourceMetricsSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyResourceMetricsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the
@@ -155,18 +161,3 @@ func (es ResourceMetricsSlice) Sort(less func(a, b ResourceMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigResourceMetricsSlice(dest, src []*otlpmetrics.ResourceMetrics) []*otlpmetrics.ResourceMetrics {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.ResourceMetrics, len(src))
- data := make([]otlpmetrics.ResourceMetrics, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigResourceMetrics(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go
index d13626f061b..563153a491f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewScopeMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetrics struct {
- orig *otlpmetrics.ScopeMetrics
+ orig *internal.ScopeMetrics
state *internal.State
}
-func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetrics {
+func newScopeMetrics(orig *internal.ScopeMetrics, state *internal.State) ScopeMetrics {
return ScopeMetrics{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) Scop
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeMetrics() ScopeMetrics {
- state := internal.StateMutable
- return newScopeMetrics(&otlpmetrics.ScopeMetrics{}, &state)
+ return newScopeMetrics(internal.NewScopeMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.ScopeMetrics{}
+ internal.DeleteScopeMetrics(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeMetrics.
func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope {
- return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state))
+ return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
+}
+
+// Metrics returns the Metrics associated with this ScopeMetrics.
+func (ms ScopeMetrics) Metrics() MetricSlice {
+ return newMetricSlice(&ms.orig.Metrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeMetrics.
@@ -66,19 +69,8 @@ func (ms ScopeMetrics) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// Metrics returns the Metrics associated with this ScopeMetrics.
-func (ms ScopeMetrics) Metrics() MetricSlice {
- return newMetricSlice(&ms.orig.Metrics, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) {
dest.state.AssertMutable()
- copyOrigScopeMetrics(dest.orig, ms.orig)
-}
-
-func copyOrigScopeMetrics(dest, src *otlpmetrics.ScopeMetrics) {
- internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.SchemaUrl = src.SchemaUrl
- dest.Metrics = copyOrigMetricSlice(dest.Metrics, src.Metrics)
+ internal.CopyScopeMetrics(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
index 2a1fc16c671..553e38a3c4c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ScopeMetricsSlice logically represents a slice of ScopeMetrics.
@@ -22,20 +21,19 @@ import (
// Must use NewScopeMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetricsSlice struct {
- orig *[]*otlpmetrics.ScopeMetrics
+ orig *[]*internal.ScopeMetrics
state *internal.State
}
-func newScopeMetricsSlice(orig *[]*otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetricsSlice {
+func newScopeMetricsSlice(orig *[]*internal.ScopeMetrics, state *internal.State) ScopeMetricsSlice {
return ScopeMetricsSlice{orig: orig, state: state}
}
-// NewScopeMetricsSlice creates a ScopeMetricsSlice with 0 elements.
+// NewScopeMetricsSlice creates a ScopeMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeMetricsSlice() ScopeMetricsSlice {
- orig := []*otlpmetrics.ScopeMetrics(nil)
- state := internal.StateMutable
- return newScopeMetricsSlice(&orig, &state)
+ orig := []*internal.ScopeMetrics(nil)
+ return newScopeMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ScopeMetricsSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.ScopeMetrics, len(*es.orig), newCap)
+ newOrig := make([]*internal.ScopeMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ScopeMetricsSlice) EnsureCapacity(newCap int) {
// It returns the newly added ScopeMetrics.
func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.ScopeMetrics{})
+ *es.orig = append(*es.orig, internal.NewScopeMetrics())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteScopeMetrics((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigScopeMetricsSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyScopeMetricsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the
@@ -155,18 +161,3 @@ func (es ScopeMetricsSlice) Sort(less func(a, b ScopeMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigScopeMetricsSlice(dest, src []*otlpmetrics.ScopeMetrics) []*otlpmetrics.ScopeMetrics {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.ScopeMetrics, len(src))
- data := make([]otlpmetrics.ScopeMetrics, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigScopeMetrics(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go
index 6b9c01d08d7..a56ece7dc53 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
@@ -19,11 +18,11 @@ import (
// Must use NewSum function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Sum struct {
- orig *otlpmetrics.Sum
+ orig *internal.Sum
state *internal.State
}
-func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum {
+func newSum(orig *internal.Sum, state *internal.State) Sum {
return Sum{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSum() Sum {
- state := internal.StateMutable
- return newSum(&otlpmetrics.Sum{}, &state)
+ return newSum(internal.NewSum(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,13 @@ func (ms Sum) MoveTo(dest Sum) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Sum{}
+ internal.DeleteSum(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// DataPoints returns the DataPoints associated with this Sum.
+func (ms Sum) DataPoints() NumberDataPointSlice {
+ return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Sum.
@@ -57,7 +60,7 @@ func (ms Sum) AggregationTemporality() AggregationTemporality {
// SetAggregationTemporality replaces the aggregationtemporality associated with this Sum.
func (ms Sum) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
- ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
+ ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// IsMonotonic returns the ismonotonic associated with this Sum.
@@ -71,19 +74,8 @@ func (ms Sum) SetIsMonotonic(v bool) {
ms.orig.IsMonotonic = v
}
-// DataPoints returns the DataPoints associated with this Sum.
-func (ms Sum) DataPoints() NumberDataPointSlice {
- return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Sum) CopyTo(dest Sum) {
dest.state.AssertMutable()
- copyOrigSum(dest.orig, ms.orig)
-}
-
-func copyOrigSum(dest, src *otlpmetrics.Sum) {
- dest.AggregationTemporality = src.AggregationTemporality
- dest.IsMonotonic = src.IsMonotonic
- dest.DataPoints = copyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints)
+ internal.CopySum(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go
index 85c119fc7cf..c6ec844d4d6 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
@@ -19,11 +18,11 @@ import (
// Must use NewSummary function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Summary struct {
- orig *otlpmetrics.Summary
+ orig *internal.Summary
state *internal.State
}
-func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary {
+func newSummary(orig *internal.Summary, state *internal.State) Summary {
return Summary{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummary() Summary {
- state := internal.StateMutable
- return newSummary(&otlpmetrics.Summary{}, &state)
+ return newSummary(internal.NewSummary(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,8 @@ func (ms Summary) MoveTo(dest Summary) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.Summary{}
+ internal.DeleteSummary(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Summary.
@@ -57,9 +55,5 @@ func (ms Summary) DataPoints() SummaryDataPointSlice {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Summary) CopyTo(dest Summary) {
dest.state.AssertMutable()
- copyOrigSummary(dest.orig, ms.orig)
-}
-
-func copyOrigSummary(dest, src *otlpmetrics.Summary) {
- dest.DataPoints = copyOrigSummaryDataPointSlice(dest.DataPoints, src.DataPoints)
+ internal.CopySummary(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go
index afdc32c3f57..715ef7247d1 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewSummaryDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPoint struct {
- orig *otlpmetrics.SummaryDataPoint
+ orig *internal.SummaryDataPoint
state *internal.State
}
-func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPoint {
+func newSummaryDataPoint(orig *internal.SummaryDataPoint, state *internal.State) SummaryDataPoint {
return SummaryDataPoint{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.Sta
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPoint() SummaryDataPoint {
- state := internal.StateMutable
- return newSummaryDataPoint(&otlpmetrics.SummaryDataPoint{}, &state)
+ return newSummaryDataPoint(internal.NewSummaryDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,13 @@ func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.SummaryDataPoint{}
+ internal.DeleteSummaryDataPoint(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this SummaryDataPoint.
@@ -118,15 +116,5 @@ func (ms SummaryDataPoint) SetFlags(v DataPointFlags) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) {
dest.state.AssertMutable()
- copyOrigSummaryDataPoint(dest.orig, ms.orig)
-}
-
-func copyOrigSummaryDataPoint(dest, src *otlpmetrics.SummaryDataPoint) {
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.StartTimeUnixNano = src.StartTimeUnixNano
- dest.TimeUnixNano = src.TimeUnixNano
- dest.Count = src.Count
- dest.Sum = src.Sum
- dest.QuantileValues = copyOrigSummaryDataPointValueAtQuantileSlice(dest.QuantileValues, src.QuantileValues)
- dest.Flags = src.Flags
+ internal.CopySummaryDataPoint(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
index fc2652c3a77..0a76d5ec332 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointSlice logically represents a slice of SummaryDataPoint.
@@ -22,20 +21,19 @@ import (
// Must use NewSummaryDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointSlice struct {
- orig *[]*otlpmetrics.SummaryDataPoint
+ orig *[]*internal.SummaryDataPoint
state *internal.State
}
-func newSummaryDataPointSlice(orig *[]*otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPointSlice {
+func newSummaryDataPointSlice(orig *[]*internal.SummaryDataPoint, state *internal.State) SummaryDataPointSlice {
return SummaryDataPointSlice{orig: orig, state: state}
}
-// NewSummaryDataPointSlice creates a SummaryDataPointSlice with 0 elements.
+// NewSummaryDataPointSlice creates a SummaryDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointSlice() SummaryDataPointSlice {
- orig := []*otlpmetrics.SummaryDataPoint(nil)
- state := internal.StateMutable
- return newSummaryDataPointSlice(&orig, &state)
+ orig := []*internal.SummaryDataPoint(nil)
+ return newSummaryDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es SummaryDataPointSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.SummaryDataPoint, len(*es.orig), newCap)
+ newOrig := make([]*internal.SummaryDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es SummaryDataPointSlice) EnsureCapacity(newCap int) {
// It returns the newly added SummaryDataPoint.
func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint{})
+ *es.orig = append(*es.orig, internal.NewSummaryDataPoint())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteSummaryDataPoint((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigSummaryDataPointSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySummaryDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the
@@ -155,18 +161,3 @@ func (es SummaryDataPointSlice) Sort(less func(a, b SummaryDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigSummaryDataPointSlice(dest, src []*otlpmetrics.SummaryDataPoint) []*otlpmetrics.SummaryDataPoint {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.SummaryDataPoint, len(src))
- data := make([]otlpmetrics.SummaryDataPoint, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigSummaryDataPoint(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go
index aad8eb56c6d..8275d2ecefa 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
@@ -19,11 +18,11 @@ import (
// Must use NewSummaryDataPointValueAtQuantile function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantile struct {
- orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile
+ orig *internal.SummaryDataPointValueAtQuantile
state *internal.State
}
-func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile {
+func newSummaryDataPointValueAtQuantile(orig *internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile {
return SummaryDataPointValueAtQuantile{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_Value
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile {
- state := internal.StateMutable
- return newSummaryDataPointValueAtQuantile(&otlpmetrics.SummaryDataPoint_ValueAtQuantile{}, &state)
+ return newSummaryDataPointValueAtQuantile(internal.NewSummaryDataPointValueAtQuantile(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,8 @@ func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQua
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpmetrics.SummaryDataPoint_ValueAtQuantile{}
+ internal.DeleteSummaryDataPointValueAtQuantile(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Quantile returns the quantile associated with this SummaryDataPointValueAtQuantile.
@@ -74,10 +72,5 @@ func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) {
dest.state.AssertMutable()
- copyOrigSummaryDataPointValueAtQuantile(dest.orig, ms.orig)
-}
-
-func copyOrigSummaryDataPointValueAtQuantile(dest, src *otlpmetrics.SummaryDataPoint_ValueAtQuantile) {
- dest.Quantile = src.Quantile
- dest.Value = src.Value
+ internal.CopySummaryDataPointValueAtQuantile(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
index 32b812c66cb..0aa50533057 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile.
@@ -22,20 +21,19 @@ import (
// Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantileSlice struct {
- orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile
+ orig *[]*internal.SummaryDataPointValueAtQuantile
state *internal.State
}
-func newSummaryDataPointValueAtQuantileSlice(orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice {
+func newSummaryDataPointValueAtQuantileSlice(orig *[]*internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice {
return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state}
}
-// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSlice with 0 elements.
+// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice {
- orig := []*otlpmetrics.SummaryDataPoint_ValueAtQuantile(nil)
- state := internal.StateMutable
- return newSummaryDataPointValueAtQuantileSlice(&orig, &state)
+ orig := []*internal.SummaryDataPointValueAtQuantile(nil)
+ return newSummaryDataPointValueAtQuantileSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(*es.orig), newCap)
+ newOrig := make([]*internal.SummaryDataPointValueAtQuantile, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) {
// It returns the newly added SummaryDataPointValueAtQuantile.
func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint_ValueAtQuantile{})
+ *es.orig = append(*es.orig, internal.NewSummaryDataPointValueAtQuantile())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointV
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteSummaryDataPointValueAtQuantile((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointV
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointV
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValueAtQuantileSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigSummaryDataPointValueAtQuantileSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySummaryDataPointValueAtQuantilePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the
@@ -155,18 +161,3 @@ func (es SummaryDataPointValueAtQuantileSlice) Sort(less func(a, b SummaryDataPo
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigSummaryDataPointValueAtQuantileSlice(dest, src []*otlpmetrics.SummaryDataPoint_ValueAtQuantile) []*otlpmetrics.SummaryDataPoint_ValueAtQuantile {
- if cap(dest) < len(src) {
- dest = make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src))
- data := make([]otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigSummaryDataPointValueAtQuantile(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go
index 394b8af7c65..cd3825f7f13 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go
@@ -4,437 +4,40 @@
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
- "bytes"
- "fmt"
+ "slices"
- jsoniter "github.com/json-iterator/go"
-
- "go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
var _ Marshaler = (*JSONMarshaler)(nil)
-// JSONMarshaler marshals pdata.Metrics to JSON bytes using the OTLP/JSON format.
+// JSONMarshaler marshals Metrics to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalMetrics to the OTLP/JSON format.
func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
- buf := bytes.Buffer{}
- pb := internal.MetricsToProto(internal.Metrics(md))
- err := json.Marshal(&buf, &pb)
- return buf.Bytes(), err
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ md.getOrig().MarshalJSON(dest)
+ if dest.Error() != nil {
+ return nil, dest.Error()
+ }
+ return slices.Clone(dest.Buffer()), nil
}
-// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Metrics.
+// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Metrics.
type JSONUnmarshaler struct{}
-// UnmarshalMetrics from OTLP/JSON format into pdata.Metrics.
+// UnmarshalMetrics from OTLP/JSON format into Metrics.
func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
- iter := jsoniter.ConfigFastest.BorrowIterator(buf)
- defer jsoniter.ConfigFastest.ReturnIterator(iter)
+ iter := json.BorrowIterator(buf)
+ defer json.ReturnIterator(iter)
md := NewMetrics()
- md.unmarshalJsoniter(iter)
- if iter.Error != nil {
- return Metrics{}, iter.Error
+ md.getOrig().UnmarshalJSON(iter)
+ if iter.Error() != nil {
+ return Metrics{}, iter.Error()
}
otlp.MigrateMetrics(md.getOrig().ResourceMetrics)
return md, nil
}
-
-func (ms Metrics) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resource_metrics", "resourceMetrics":
- iter.ReadArrayCB(func(*jsoniter.Iterator) bool {
- ms.ResourceMetrics().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ResourceMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resource":
- json.ReadResource(iter, &ms.orig.Resource)
- case "scopeMetrics", "scope_metrics":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.ScopeMetrics().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ScopeMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "scope":
- json.ReadScope(iter, &ms.orig.Scope)
- case "metrics":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.Metrics().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Metric) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "name":
- ms.orig.Name = iter.ReadString()
- case "description":
- ms.orig.Description = iter.ReadString()
- case "unit":
- ms.orig.Unit = iter.ReadString()
- case "metadata":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Metadata = append(ms.orig.Metadata, json.ReadAttribute(iter))
- return true
- })
- case "sum":
- ms.SetEmptySum().unmarshalJsoniter(iter)
- case "gauge":
- ms.SetEmptyGauge().unmarshalJsoniter(iter)
- case "histogram":
- ms.SetEmptyHistogram().unmarshalJsoniter(iter)
- case "exponential_histogram", "exponentialHistogram":
- ms.SetEmptyExponentialHistogram().unmarshalJsoniter(iter)
- case "summary":
- ms.SetEmptySummary().unmarshalJsoniter(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Sum) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "aggregation_temporality", "aggregationTemporality":
- ms.orig.AggregationTemporality = readAggregationTemporality(iter)
- case "is_monotonic", "isMonotonic":
- ms.orig.IsMonotonic = iter.ReadBool()
- case "data_points", "dataPoints":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Gauge) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "data_points", "dataPoints":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Histogram) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "data_points", "dataPoints":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "aggregation_temporality", "aggregationTemporality":
- ms.orig.AggregationTemporality = readAggregationTemporality(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ExponentialHistogram) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "data_points", "dataPoints":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "aggregation_temporality", "aggregationTemporality":
- ms.orig.AggregationTemporality = readAggregationTemporality(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Summary) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "data_points", "dataPoints":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms NumberDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "start_time_unix_nano", "startTimeUnixNano":
- ms.orig.StartTimeUnixNano = json.ReadUint64(iter)
- case "as_int", "asInt":
- ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{
- AsInt: json.ReadInt64(iter),
- }
- case "as_double", "asDouble":
- ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{
- AsDouble: json.ReadFloat64(iter),
- }
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "exemplars":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "flags":
- ms.orig.Flags = json.ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms HistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "start_time_unix_nano", "startTimeUnixNano":
- ms.orig.StartTimeUnixNano = json.ReadUint64(iter)
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "count":
- ms.orig.Count = json.ReadUint64(iter)
- case "sum":
- ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: json.ReadFloat64(iter)}
- case "bucket_counts", "bucketCounts":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter))
- return true
- })
- case "explicit_bounds", "explicitBounds":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.ExplicitBounds = append(ms.orig.ExplicitBounds, json.ReadFloat64(iter))
- return true
- })
- case "exemplars":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "flags":
- ms.orig.Flags = json.ReadUint32(iter)
- case "max":
- ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{
- Max: json.ReadFloat64(iter),
- }
- case "min":
- ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{
- Min: json.ReadFloat64(iter),
- }
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ExponentialHistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "start_time_unix_nano", "startTimeUnixNano":
- ms.orig.StartTimeUnixNano = json.ReadUint64(iter)
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "count":
- ms.orig.Count = json.ReadUint64(iter)
- case "sum":
- ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{
- Sum: json.ReadFloat64(iter),
- }
- case "scale":
- ms.orig.Scale = iter.ReadInt32()
- case "zero_count", "zeroCount":
- ms.orig.ZeroCount = json.ReadUint64(iter)
- case "positive":
- ms.Positive().unmarshalJsoniter(iter)
- case "negative":
- ms.Negative().unmarshalJsoniter(iter)
- case "exemplars":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "flags":
- ms.orig.Flags = json.ReadUint32(iter)
- case "max":
- ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{
- Max: json.ReadFloat64(iter),
- }
- case "min":
- ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{
- Min: json.ReadFloat64(iter),
- }
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms SummaryDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "start_time_unix_nano", "startTimeUnixNano":
- ms.orig.StartTimeUnixNano = json.ReadUint64(iter)
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "count":
- ms.orig.Count = json.ReadUint64(iter)
- case "sum":
- ms.orig.Sum = json.ReadFloat64(iter)
- case "quantile_values", "quantileValues":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.QuantileValues().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "flags":
- ms.orig.Flags = json.ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ExponentialHistogramDataPointBuckets) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "bucket_counts", "bucketCounts":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter))
- return true
- })
- case "offset":
- ms.orig.Offset = iter.ReadInt32()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms SummaryDataPointValueAtQuantile) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "quantile":
- ms.orig.Quantile = json.ReadFloat64(iter)
- case "value":
- ms.orig.Value = json.ReadFloat64(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms Exemplar) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "filtered_attributes", "filteredAttributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.orig.FilteredAttributes = append(ms.orig.FilteredAttributes, json.ReadAttribute(iter))
- return true
- })
- case "timeUnixNano", "time_unix_nano":
- ms.orig.TimeUnixNano = json.ReadUint64(iter)
- case "as_int", "asInt":
- ms.orig.Value = &otlpmetrics.Exemplar_AsInt{
- AsInt: json.ReadInt64(iter),
- }
- case "as_double", "asDouble":
- ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{
- AsDouble: json.ReadFloat64(iter),
- }
- case "traceId", "trace_id":
- if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("exemplar.traceId", fmt.Sprintf("parse trace_id:%v", err))
- }
- case "spanId", "span_id":
- if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("exemplar.spanId", fmt.Sprintf("parse span_id:%v", err))
- }
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func readAggregationTemporality(iter *jsoniter.Iterator) otlpmetrics.AggregationTemporality {
- return otlpmetrics.AggregationTemporality(json.ReadEnumValue(iter, otlpmetrics.AggregationTemporality_value))
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go
index 91195ca4dfa..2f3ac098a5b 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go
@@ -3,46 +3,14 @@
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
-)
-
-// Metrics is the top-level struct that is propagated through the metrics pipeline.
-// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
-type Metrics internal.Metrics
-
-func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest) Metrics {
- state := internal.StateMutable
- return Metrics(internal.NewMetrics(orig, &state))
-}
-
-func (ms Metrics) getOrig() *otlpcollectormetrics.ExportMetricsServiceRequest {
- return internal.GetOrigMetrics(internal.Metrics(ms))
-}
-
-func (ms Metrics) getState() *internal.State {
- return internal.GetMetricsState(internal.Metrics(ms))
-}
-
-// NewMetrics creates a new Metrics struct.
-func NewMetrics() Metrics {
- return newMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{})
+// MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it.
+func (ms Metrics) MarkReadOnly() {
+ ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Metrics instance is read-only.
func (ms Metrics) IsReadOnly() bool {
- return *ms.getState() == internal.StateReadOnly
-}
-
-// CopyTo copies the Metrics instance overriding the destination.
-func (ms Metrics) CopyTo(dest Metrics) {
- ms.ResourceMetrics().CopyTo(dest.ResourceMetrics())
-}
-
-// ResourceMetrics returns the ResourceMetricsSlice associated with this Metrics.
-func (ms Metrics) ResourceMetrics() ResourceMetricsSlice {
- return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, internal.GetMetricsState(internal.Metrics(ms)))
+ return ms.getState().IsReadOnly()
}
// MetricCount calculates the total number of metrics.
@@ -86,10 +54,5 @@ func (ms Metrics) DataPointCount() (dataPointCount int) {
}
}
}
- return
-}
-
-// MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it.
-func (ms Metrics) MarkReadOnly() {
- internal.SetMetricsState(internal.Metrics(ms), internal.StateReadOnly)
+ return dataPointCount
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
index 775a96f6a7e..bf703a764a7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
@@ -3,57 +3,56 @@
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
-)
-
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
- pb := internal.MetricsToProto(internal.Metrics(md))
- return pb.Marshal()
+ size := md.getOrig().SizeProto()
+ buf := make([]byte, size)
+ _ = md.getOrig().MarshalProto(buf)
+ return buf, nil
}
func (e *ProtoMarshaler) MetricsSize(md Metrics) int {
- pb := internal.MetricsToProto(internal.Metrics(md))
- return pb.Size()
+ return md.getOrig().SizeProto()
}
-func (e *ProtoMarshaler) ResourceMetricsSize(rm ResourceMetrics) int {
- return rm.orig.Size()
+func (e *ProtoMarshaler) ResourceMetricsSize(md ResourceMetrics) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) ScopeMetricsSize(sm ScopeMetrics) int {
- return sm.orig.Size()
+func (e *ProtoMarshaler) ScopeMetricsSize(md ScopeMetrics) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) MetricSize(m Metric) int {
- return m.orig.Size()
+func (e *ProtoMarshaler) MetricSize(md Metric) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) NumberDataPointSize(ndp NumberDataPoint) int {
- return ndp.orig.Size()
+func (e *ProtoMarshaler) NumberDataPointSize(md NumberDataPoint) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) SummaryDataPointSize(sdps SummaryDataPoint) int {
- return sdps.orig.Size()
+func (e *ProtoMarshaler) SummaryDataPointSize(md SummaryDataPoint) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) HistogramDataPointSize(hdp HistogramDataPoint) int {
- return hdp.orig.Size()
+func (e *ProtoMarshaler) HistogramDataPointSize(md HistogramDataPoint) int {
+ return md.orig.SizeProto()
}
-func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(ehdp ExponentialHistogramDataPoint) int {
- return ehdp.orig.Size()
+func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(md ExponentialHistogramDataPoint) int {
+ return md.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
- pb := otlpmetrics.MetricsData{}
- err := pb.Unmarshal(buf)
- return Metrics(internal.MetricsFromProto(pb)), err
+ md := NewMetrics()
+ err := md.getOrig().UnmarshalProto(buf)
+ if err != nil {
+ return Metrics{}, err
+ }
+ return md, nil
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go
index 70b1735efe6..df57abed3a8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetricotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
)
// ExportPartialSuccess represents the details of a partially successful export request.
@@ -19,11 +18,11 @@ import (
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
- orig *otlpcollectormetrics.ExportMetricsPartialSuccess
+ orig *internal.ExportMetricsPartialSuccess
state *internal.State
}
-func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess {
+func newExportPartialSuccess(orig *internal.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
@@ -32,8 +31,7 @@ func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSucc
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
- state := internal.StateMutable
- return newExportPartialSuccess(&otlpcollectormetrics.ExportMetricsPartialSuccess{}, &state)
+ return newExportPartialSuccess(internal.NewExportMetricsPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -45,8 +43,8 @@ func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlpcollectormetrics.ExportMetricsPartialSuccess{}
+ internal.DeleteExportMetricsPartialSuccess(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedDataPoints returns the rejecteddatapoints associated with this ExportPartialSuccess.
@@ -74,10 +72,5 @@ func (ms ExportPartialSuccess) SetErrorMessage(v string) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
- copyOrigExportPartialSuccess(dest.orig, ms.orig)
-}
-
-func copyOrigExportPartialSuccess(dest, src *otlpcollectormetrics.ExportMetricsPartialSuccess) {
- dest.RejectedDataPoints = src.RejectedDataPoints
- dest.ErrorMessage = src.ErrorMessage
+ internal.CopyExportMetricsPartialSuccess(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go
new file mode 100644
index 00000000000..53323987f1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportresponse.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package pmetricotlp
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// ExportResponse represents the response for gRPC/HTTP client/server.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewExportResponse function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type ExportResponse struct {
+ orig *internal.ExportMetricsServiceResponse
+ state *internal.State
+}
+
+func newExportResponse(orig *internal.ExportMetricsServiceResponse, state *internal.State) ExportResponse {
+ return ExportResponse{orig: orig, state: state}
+}
+
+// NewExportResponse creates a new empty ExportResponse.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewExportResponse() ExportResponse {
+ return newExportResponse(internal.NewExportMetricsServiceResponse(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms ExportResponse) MoveTo(dest ExportResponse) {
+ ms.state.AssertMutable()
+ dest.state.AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.orig == dest.orig {
+ return
+ }
+ internal.DeleteExportMetricsServiceResponse(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
+}
+
+// PartialSuccess returns the partialsuccess associated with this ExportResponse.
+func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
+ return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms ExportResponse) CopyTo(dest ExportResponse) {
+ dest.state.AssertMutable()
+ internal.CopyExportMetricsServiceResponse(dest.orig, ms.orig)
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go
index 98d864d7371..656f0e17650 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
+ "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
@@ -31,11 +31,11 @@ type GRPCClient interface {
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
- return &grpcClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)}
+ return &grpcClient{rawClient: otelgrpc.NewMetricsServiceClient(cc)}
}
type grpcClient struct {
- rawClient otlpcollectormetrics.MetricsServiceClient
+ rawClient otelgrpc.MetricsServiceClient
}
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
@@ -43,8 +43,7 @@ func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...
if err != nil {
return ExportResponse{}, err
}
- state := internal.StateMutable
- return ExportResponse{orig: rsp, state: &state}, err
+ return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
@@ -75,16 +74,15 @@ func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
- otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv})
+ otelgrpc.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv})
}
type rawMetricsServer struct {
srv GRPCServer
}
-func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) {
+func (s rawMetricsServer) Export(ctx context.Context, request *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) {
otlp.MigrateMetrics(request.ResourceMetrics)
- state := internal.StateMutable
- rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state})
+ rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go
index 4cca31a609f..895c0681834 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go
@@ -4,29 +4,26 @@
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
- "bytes"
+ "slices"
"go.opentelemetry.io/collector/pdata/internal"
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
+ "go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/pmetric"
)
-var jsonUnmarshaler = &pmetric.JSONUnmarshaler{}
-
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for pmetric.Metrics data.
type ExportRequest struct {
- orig *otlpcollectormetrics.ExportMetricsServiceRequest
+ orig *internal.ExportMetricsServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
- state := internal.StateMutable
return ExportRequest{
- orig: &otlpcollectormetrics.ExportMetricsServiceRequest{},
- state: &state,
+ orig: &internal.ExportMetricsServiceRequest{},
+ state: internal.NewState(),
}
}
@@ -35,40 +32,48 @@ func NewExportRequest() ExportRequest {
// any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest {
return ExportRequest{
- orig: internal.GetOrigMetrics(internal.Metrics(md)),
- state: internal.GetMetricsState(internal.Metrics(md)),
+ orig: internal.GetMetricsOrig(internal.MetricsWrapper(md)),
+ state: internal.GetMetricsState(internal.MetricsWrapper(md)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
- return ms.orig.Marshal()
+ size := ms.orig.SizeProto()
+ buf := make([]byte, size)
+ _ = ms.orig.MarshalProto(buf)
+ return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
- return ms.orig.Unmarshal(data)
+ err := ms.orig.UnmarshalProto(data)
+ if err != nil {
+ return err
+ }
+ otlp.MigrateMetrics(ms.orig.ResourceMetrics)
+ return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- if err := json.Marshal(&buf, ms.orig); err != nil {
- return nil, err
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ ms.orig.MarshalJSON(dest)
+ if dest.Error() != nil {
+ return nil, dest.Error()
}
- return buf.Bytes(), nil
+ return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
- md, err := jsonUnmarshaler.UnmarshalMetrics(data)
- if err != nil {
- return err
- }
- *ms.orig = *internal.GetOrigMetrics(internal.Metrics(md))
- return nil
+ iter := json.BorrowIterator(data)
+ defer json.ReturnIterator(iter)
+ ms.orig.UnmarshalJSON(iter)
+ return iter.Error()
}
func (ms ExportRequest) Metrics() pmetric.Metrics {
- return pmetric.Metrics(internal.NewMetrics(ms.orig, ms.state))
+ return pmetric.Metrics(internal.NewMetricsWrapper(ms.orig, ms.state))
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go
index 5942568ddb6..fe985edf2da 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go
@@ -4,84 +4,36 @@
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
- "bytes"
+ "slices"
- jsoniter "github.com/json-iterator/go"
-
- "go.opentelemetry.io/collector/pdata/internal"
- otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
)
-// ExportResponse represents the response for gRPC/HTTP client/server.
-type ExportResponse struct {
- orig *otlpcollectormetrics.ExportMetricsServiceResponse
- state *internal.State
-}
-
-// NewExportResponse returns an empty ExportResponse.
-func NewExportResponse() ExportResponse {
- state := internal.StateMutable
- return ExportResponse{
- orig: &otlpcollectormetrics.ExportMetricsServiceResponse{},
- state: &state,
- }
-}
-
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
- return ms.orig.Marshal()
+ size := ms.orig.SizeProto()
+ buf := make([]byte, size)
+ _ = ms.orig.MarshalProto(buf)
+ return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
- return ms.orig.Unmarshal(data)
+ return ms.orig.UnmarshalProto(data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- if err := json.Marshal(&buf, ms.orig); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ ms.orig.MarshalJSON(dest)
+ return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
- iter := jsoniter.ConfigFastest.BorrowIterator(data)
- defer jsoniter.ConfigFastest.ReturnIterator(iter)
- ms.unmarshalJsoniter(iter)
- return iter.Error
-}
-
-// PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse.
-func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
- return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
-}
-
-func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "partial_success", "partialSuccess":
- ms.PartialSuccess().unmarshalJsoniter(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool {
- switch f {
- case "rejected_data_points", "rejectedDataPoints":
- ms.orig.RejectedDataPoints = json.ReadInt64(iter)
- case "error_message", "errorMessage":
- ms.orig.ErrorMessage = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
+ iter := json.BorrowIterator(data)
+ defer json.ReturnIterator(iter)
+ ms.orig.UnmarshalJSON(iter)
+ return iter.Error()
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go
index 8212a03e11c..400b24659b2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/encoding.go
@@ -9,17 +9,17 @@ type MarshalSizer interface {
Sizer
}
-// Marshaler marshals pdata.Traces into bytes.
+// Marshaler marshals Traces into bytes.
type Marshaler interface {
- // MarshalTraces the given pdata.Traces into bytes.
+ // MarshalTraces the given Traces into bytes.
// If the error is not nil, the returned bytes slice cannot be used.
MarshalTraces(td Traces) ([]byte, error)
}
-// Unmarshaler unmarshalls bytes into pdata.Traces.
+// Unmarshaler unmarshalls bytes into Traces.
type Unmarshaler interface {
- // UnmarshalTraces the given bytes into pdata.Traces.
- // If the error is not nil, the returned pdata.Traces cannot be used.
+ // UnmarshalTraces the given bytes into Traces.
+ // If the error is not nil, the returned Traces cannot be used.
UnmarshalTraces(buf []byte) (Traces, error)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
index 9663d33f030..c68406cadeb 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespans.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewResourceSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpans struct {
- orig *otlptrace.ResourceSpans
+ orig *internal.ResourceSpans
state *internal.State
}
-func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans {
+func newResourceSpans(orig *internal.ResourceSpans, state *internal.State) ResourceSpans {
return ResourceSpans{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) Reso
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceSpans() ResourceSpans {
- state := internal.StateMutable
- return newResourceSpans(&otlptrace.ResourceSpans{}, &state)
+ return newResourceSpans(internal.NewResourceSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ResourceSpans) MoveTo(dest ResourceSpans) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.ResourceSpans{}
+ internal.DeleteResourceSpans(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceSpans.
func (ms ResourceSpans) Resource() pcommon.Resource {
- return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state))
+ return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
+}
+
+// ScopeSpans returns the ScopeSpans associated with this ResourceSpans.
+func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice {
+ return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceSpans.
@@ -66,19 +69,8 @@ func (ms ResourceSpans) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// ScopeSpans returns the ScopeSpans associated with this ResourceSpans.
-func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice {
- return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceSpans) CopyTo(dest ResourceSpans) {
dest.state.AssertMutable()
- copyOrigResourceSpans(dest.orig, ms.orig)
-}
-
-func copyOrigResourceSpans(dest, src *otlptrace.ResourceSpans) {
- internal.CopyOrigResource(&dest.Resource, &src.Resource)
- dest.SchemaUrl = src.SchemaUrl
- dest.ScopeSpans = copyOrigScopeSpansSlice(dest.ScopeSpans, src.ScopeSpans)
+ internal.CopyResourceSpans(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
index bf9776647a2..ed2a7a87230 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ResourceSpansSlice logically represents a slice of ResourceSpans.
@@ -22,20 +21,19 @@ import (
// Must use NewResourceSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpansSlice struct {
- orig *[]*otlptrace.ResourceSpans
+ orig *[]*internal.ResourceSpans
state *internal.State
}
-func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice {
+func newResourceSpansSlice(orig *[]*internal.ResourceSpans, state *internal.State) ResourceSpansSlice {
return ResourceSpansSlice{orig: orig, state: state}
}
-// NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements.
+// NewResourceSpansSlice creates a ResourceSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceSpansSlice() ResourceSpansSlice {
- orig := []*otlptrace.ResourceSpans(nil)
- state := internal.StateMutable
- return newResourceSpansSlice(&orig, &state)
+ orig := []*internal.ResourceSpans(nil)
+ return newResourceSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap)
+ newOrig := make([]*internal.ResourceSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
// It returns the newly added ResourceSpans.
func (es ResourceSpansSlice) AppendEmpty() ResourceSpans {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlptrace.ResourceSpans{})
+ *es.orig = append(*es.orig, internal.NewResourceSpans())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteResourceSpans((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigResourceSpansSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyResourceSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceSpans elements within ResourceSpansSlice given the
@@ -155,18 +161,3 @@ func (es ResourceSpansSlice) Sort(less func(a, b ResourceSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigResourceSpansSlice(dest, src []*otlptrace.ResourceSpans) []*otlptrace.ResourceSpans {
- if cap(dest) < len(src) {
- dest = make([]*otlptrace.ResourceSpans, len(src))
- data := make([]otlptrace.ResourceSpans, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigResourceSpans(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
index 06837b24339..a06c1df34d9 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespans.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -20,11 +19,11 @@ import (
// Must use NewScopeSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpans struct {
- orig *otlptrace.ScopeSpans
+ orig *internal.ScopeSpans
state *internal.State
}
-func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans {
+func newScopeSpans(orig *internal.ScopeSpans, state *internal.State) ScopeSpans {
return ScopeSpans{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeSpans() ScopeSpans {
- state := internal.StateMutable
- return newScopeSpans(&otlptrace.ScopeSpans{}, &state)
+ return newScopeSpans(internal.NewScopeSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,13 +44,18 @@ func (ms ScopeSpans) MoveTo(dest ScopeSpans) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.ScopeSpans{}
+ internal.DeleteScopeSpans(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeSpans.
func (ms ScopeSpans) Scope() pcommon.InstrumentationScope {
- return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state))
+ return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
+}
+
+// Spans returns the Spans associated with this ScopeSpans.
+func (ms ScopeSpans) Spans() SpanSlice {
+ return newSpanSlice(&ms.orig.Spans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeSpans.
@@ -66,19 +69,8 @@ func (ms ScopeSpans) SetSchemaUrl(v string) {
ms.orig.SchemaUrl = v
}
-// Spans returns the Spans associated with this ScopeSpans.
-func (ms ScopeSpans) Spans() SpanSlice {
- return newSpanSlice(&ms.orig.Spans, ms.state)
-}
-
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeSpans) CopyTo(dest ScopeSpans) {
dest.state.AssertMutable()
- copyOrigScopeSpans(dest.orig, ms.orig)
-}
-
-func copyOrigScopeSpans(dest, src *otlptrace.ScopeSpans) {
- internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope)
- dest.SchemaUrl = src.SchemaUrl
- dest.Spans = copyOrigSpanSlice(dest.Spans, src.Spans)
+ internal.CopyScopeSpans(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
index 63c526597f3..2522d5b2b6c 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ScopeSpansSlice logically represents a slice of ScopeSpans.
@@ -22,20 +21,19 @@ import (
// Must use NewScopeSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpansSlice struct {
- orig *[]*otlptrace.ScopeSpans
+ orig *[]*internal.ScopeSpans
state *internal.State
}
-func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice {
+func newScopeSpansSlice(orig *[]*internal.ScopeSpans, state *internal.State) ScopeSpansSlice {
return ScopeSpansSlice{orig: orig, state: state}
}
-// NewScopeSpansSlice creates a ScopeSpansSlice with 0 elements.
+// NewScopeSpansSlice creates a ScopeSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeSpansSlice() ScopeSpansSlice {
- orig := []*otlptrace.ScopeSpans(nil)
- state := internal.StateMutable
- return newScopeSpansSlice(&orig, &state)
+ orig := []*internal.ScopeSpans(nil)
+ return newScopeSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap)
+ newOrig := make([]*internal.ScopeSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
// It returns the newly added ScopeSpans.
func (es ScopeSpansSlice) AppendEmpty() ScopeSpans {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlptrace.ScopeSpans{})
+ *es.orig = append(*es.orig, internal.NewScopeSpans())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteScopeSpans((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigScopeSpansSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopyScopeSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeSpans elements within ScopeSpansSlice given the
@@ -155,18 +161,3 @@ func (es ScopeSpansSlice) Sort(less func(a, b ScopeSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigScopeSpansSlice(dest, src []*otlptrace.ScopeSpans) []*otlptrace.ScopeSpans {
- if cap(dest) < len(src) {
- dest = make([]*otlptrace.ScopeSpans, len(src))
- data := make([]otlptrace.ScopeSpans, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigScopeSpans(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
index 16e997ede99..238386c1798 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_span.go
@@ -1,15 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -22,11 +20,11 @@ import (
// Must use NewSpan function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Span struct {
- orig *otlptrace.Span
+ orig *internal.Span
state *internal.State
}
-func newSpan(orig *otlptrace.Span, state *internal.State) Span {
+func newSpan(orig *internal.Span, state *internal.State) Span {
return Span{orig: orig, state: state}
}
@@ -35,8 +33,7 @@ func newSpan(orig *otlptrace.Span, state *internal.State) Span {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpan() Span {
- state := internal.StateMutable
- return newSpan(&otlptrace.Span{}, &state)
+ return newSpan(internal.NewSpan(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -48,8 +45,8 @@ func (ms Span) MoveTo(dest Span) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.Span{}
+ internal.DeleteSpan(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this Span.
@@ -60,7 +57,7 @@ func (ms Span) TraceID() pcommon.TraceID {
// SetTraceID replaces the traceid associated with this Span.
func (ms Span) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Span.
@@ -71,12 +68,12 @@ func (ms Span) SpanID() pcommon.SpanID {
// SetSpanID replaces the spanid associated with this Span.
func (ms Span) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this Span.
func (ms Span) TraceState() pcommon.TraceState {
- return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state))
+ return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// ParentSpanID returns the parentspanid associated with this Span.
@@ -87,18 +84,7 @@ func (ms Span) ParentSpanID() pcommon.SpanID {
// SetParentSpanID replaces the parentspanid associated with this Span.
func (ms Span) SetParentSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.ParentSpanId = data.SpanID(v)
-}
-
-// Name returns the name associated with this Span.
-func (ms Span) Name() string {
- return ms.orig.Name
-}
-
-// SetName replaces the name associated with this Span.
-func (ms Span) SetName(v string) {
- ms.state.AssertMutable()
- ms.orig.Name = v
+ ms.orig.ParentSpanId = internal.SpanID(v)
}
// Flags returns the flags associated with this Span.
@@ -112,6 +98,17 @@ func (ms Span) SetFlags(v uint32) {
ms.orig.Flags = v
}
+// Name returns the name associated with this Span.
+func (ms Span) Name() string {
+ return ms.orig.Name
+}
+
+// SetName replaces the name associated with this Span.
+func (ms Span) SetName(v string) {
+ ms.state.AssertMutable()
+ ms.orig.Name = v
+}
+
// Kind returns the kind associated with this Span.
func (ms Span) Kind() SpanKind {
return SpanKind(ms.orig.Kind)
@@ -120,7 +117,7 @@ func (ms Span) Kind() SpanKind {
// SetKind replaces the kind associated with this Span.
func (ms Span) SetKind(v SpanKind) {
ms.state.AssertMutable()
- ms.orig.Kind = otlptrace.Span_SpanKind(v)
+ ms.orig.Kind = internal.SpanKind(v)
}
// StartTimestamp returns the starttimestamp associated with this Span.
@@ -147,7 +144,7 @@ func (ms Span) SetEndTimestamp(v pcommon.Timestamp) {
// Attributes returns the Attributes associated with this Span.
func (ms Span) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Span.
@@ -201,24 +198,5 @@ func (ms Span) Status() Status {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Span) CopyTo(dest Span) {
dest.state.AssertMutable()
- copyOrigSpan(dest.orig, ms.orig)
-}
-
-func copyOrigSpan(dest, src *otlptrace.Span) {
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
- internal.CopyOrigTraceState(&dest.TraceState, &src.TraceState)
- dest.ParentSpanId = src.ParentSpanId
- dest.Name = src.Name
- dest.Flags = src.Flags
- dest.Kind = src.Kind
- dest.StartTimeUnixNano = src.StartTimeUnixNano
- dest.EndTimeUnixNano = src.EndTimeUnixNano
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
- dest.Events = copyOrigSpanEventSlice(dest.Events, src.Events)
- dest.DroppedEventsCount = src.DroppedEventsCount
- dest.Links = copyOrigSpanLinkSlice(dest.Links, src.Links)
- dest.DroppedLinksCount = src.DroppedLinksCount
- copyOrigStatus(&dest.Status, &src.Status)
+ internal.CopySpan(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
index 337b5327d2a..333f5d47b4f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanevent.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -21,11 +20,11 @@ import (
// Must use NewSpanEvent function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEvent struct {
- orig *otlptrace.Span_Event
+ orig *internal.SpanEvent
state *internal.State
}
-func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent {
+func newSpanEvent(orig *internal.SpanEvent, state *internal.State) SpanEvent {
return SpanEvent{orig: orig, state: state}
}
@@ -34,8 +33,7 @@ func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanEvent() SpanEvent {
- state := internal.StateMutable
- return newSpanEvent(&otlptrace.Span_Event{}, &state)
+ return newSpanEvent(internal.NewSpanEvent(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -47,8 +45,8 @@ func (ms SpanEvent) MoveTo(dest SpanEvent) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.Span_Event{}
+ internal.DeleteSpanEvent(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Timestamp returns the timestamp associated with this SpanEvent.
@@ -75,7 +73,7 @@ func (ms SpanEvent) SetName(v string) {
// Attributes returns the Attributes associated with this SpanEvent.
func (ms SpanEvent) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent.
@@ -92,12 +90,5 @@ func (ms SpanEvent) SetDroppedAttributesCount(v uint32) {
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanEvent) CopyTo(dest SpanEvent) {
dest.state.AssertMutable()
- copyOrigSpanEvent(dest.orig, ms.orig)
-}
-
-func copyOrigSpanEvent(dest, src *otlptrace.Span_Event) {
- dest.TimeUnixNano = src.TimeUnixNano
- dest.Name = src.Name
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
+ internal.CopySpanEvent(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
index cd98da1bb56..f21a4d671a8 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanEventSlice logically represents a slice of SpanEvent.
@@ -22,20 +21,19 @@ import (
// Must use NewSpanEventSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEventSlice struct {
- orig *[]*otlptrace.Span_Event
+ orig *[]*internal.SpanEvent
state *internal.State
}
-func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice {
+func newSpanEventSlice(orig *[]*internal.SpanEvent, state *internal.State) SpanEventSlice {
return SpanEventSlice{orig: orig, state: state}
}
-// NewSpanEventSlice creates a SpanEventSlice with 0 elements.
+// NewSpanEventSlice creates a SpanEventSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanEventSlice() SpanEventSlice {
- orig := []*otlptrace.Span_Event(nil)
- state := internal.StateMutable
- return newSpanEventSlice(&orig, &state)
+ orig := []*internal.SpanEvent(nil)
+ return newSpanEventSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap)
+ newOrig := make([]*internal.SpanEvent, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es SpanEventSlice) EnsureCapacity(newCap int) {
// It returns the newly added SpanEvent.
func (es SpanEventSlice) AppendEmpty() SpanEvent {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlptrace.Span_Event{})
+ *es.orig = append(*es.orig, internal.NewSpanEvent())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteSpanEvent((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanEventSlice) CopyTo(dest SpanEventSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigSpanEventSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySpanEventPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanEvent elements within SpanEventSlice given the
@@ -155,18 +161,3 @@ func (es SpanEventSlice) Sort(less func(a, b SpanEvent) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigSpanEventSlice(dest, src []*otlptrace.Span_Event) []*otlptrace.Span_Event {
- if cap(dest) < len(src) {
- dest = make([]*otlptrace.Span_Event, len(src))
- data := make([]otlptrace.Span_Event, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigSpanEvent(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
index 85a5350b1eb..ab585857a56 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlink.go
@@ -1,15 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- "go.opentelemetry.io/collector/pdata/internal/data"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
@@ -23,11 +21,11 @@ import (
// Must use NewSpanLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLink struct {
- orig *otlptrace.Span_Link
+ orig *internal.SpanLink
state *internal.State
}
-func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink {
+func newSpanLink(orig *internal.SpanLink, state *internal.State) SpanLink {
return SpanLink{orig: orig, state: state}
}
@@ -36,8 +34,7 @@ func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanLink() SpanLink {
- state := internal.StateMutable
- return newSpanLink(&otlptrace.Span_Link{}, &state)
+ return newSpanLink(internal.NewSpanLink(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -49,8 +46,8 @@ func (ms SpanLink) MoveTo(dest SpanLink) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.Span_Link{}
+ internal.DeleteSpanLink(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this SpanLink.
@@ -61,7 +58,7 @@ func (ms SpanLink) TraceID() pcommon.TraceID {
// SetTraceID replaces the traceid associated with this SpanLink.
func (ms SpanLink) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
- ms.orig.TraceId = data.TraceID(v)
+ ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this SpanLink.
@@ -72,28 +69,17 @@ func (ms SpanLink) SpanID() pcommon.SpanID {
// SetSpanID replaces the spanid associated with this SpanLink.
func (ms SpanLink) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
- ms.orig.SpanId = data.SpanID(v)
+ ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this SpanLink.
func (ms SpanLink) TraceState() pcommon.TraceState {
- return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state))
-}
-
-// Flags returns the flags associated with this SpanLink.
-func (ms SpanLink) Flags() uint32 {
- return ms.orig.Flags
-}
-
-// SetFlags replaces the flags associated with this SpanLink.
-func (ms SpanLink) SetFlags(v uint32) {
- ms.state.AssertMutable()
- ms.orig.Flags = v
+ return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// Attributes returns the Attributes associated with this SpanLink.
func (ms SpanLink) Attributes() pcommon.Map {
- return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state))
+ return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink.
@@ -107,17 +93,19 @@ func (ms SpanLink) SetDroppedAttributesCount(v uint32) {
ms.orig.DroppedAttributesCount = v
}
+// Flags returns the flags associated with this SpanLink.
+func (ms SpanLink) Flags() uint32 {
+ return ms.orig.Flags
+}
+
+// SetFlags replaces the flags associated with this SpanLink.
+func (ms SpanLink) SetFlags(v uint32) {
+ ms.state.AssertMutable()
+ ms.orig.Flags = v
+}
+
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanLink) CopyTo(dest SpanLink) {
dest.state.AssertMutable()
- copyOrigSpanLink(dest.orig, ms.orig)
-}
-
-func copyOrigSpanLink(dest, src *otlptrace.Span_Link) {
- dest.TraceId = src.TraceId
- dest.SpanId = src.SpanId
- internal.CopyOrigTraceState(&dest.TraceState, &src.TraceState)
- dest.Flags = src.Flags
- dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes)
- dest.DroppedAttributesCount = src.DroppedAttributesCount
+ internal.CopySpanLink(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
index af205452e78..65b8b473591 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanLinkSlice logically represents a slice of SpanLink.
@@ -22,20 +21,19 @@ import (
// Must use NewSpanLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLinkSlice struct {
- orig *[]*otlptrace.Span_Link
+ orig *[]*internal.SpanLink
state *internal.State
}
-func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice {
+func newSpanLinkSlice(orig *[]*internal.SpanLink, state *internal.State) SpanLinkSlice {
return SpanLinkSlice{orig: orig, state: state}
}
-// NewSpanLinkSlice creates a SpanLinkSlice with 0 elements.
+// NewSpanLinkSlice creates a SpanLinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanLinkSlice() SpanLinkSlice {
- orig := []*otlptrace.Span_Link(nil)
- state := internal.StateMutable
- return newSpanLinkSlice(&orig, &state)
+ orig := []*internal.SpanLink(nil)
+ return newSpanLinkSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap)
+ newOrig := make([]*internal.SpanLink, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es SpanLinkSlice) EnsureCapacity(newCap int) {
// It returns the newly added SpanLink.
func (es SpanLinkSlice) AppendEmpty() SpanLink {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlptrace.Span_Link{})
+ *es.orig = append(*es.orig, internal.NewSpanLink())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteSpanLink((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigSpanLinkSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySpanLinkPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanLink elements within SpanLinkSlice given the
@@ -155,18 +161,3 @@ func (es SpanLinkSlice) Sort(less func(a, b SpanLink) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigSpanLinkSlice(dest, src []*otlptrace.Span_Link) []*otlptrace.Span_Link {
- if cap(dest) < len(src) {
- dest = make([]*otlptrace.Span_Link, len(src))
- data := make([]otlptrace.Span_Link, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigSpanLink(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
index 306e59158af..9cbc685062d 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
@@ -11,7 +11,6 @@ import (
"sort"
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanSlice logically represents a slice of Span.
@@ -22,20 +21,19 @@ import (
// Must use NewSpanSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanSlice struct {
- orig *[]*otlptrace.Span
+ orig *[]*internal.Span
state *internal.State
}
-func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice {
+func newSpanSlice(orig *[]*internal.Span, state *internal.State) SpanSlice {
return SpanSlice{orig: orig, state: state}
}
-// NewSpanSlice creates a SpanSlice with 0 elements.
+// NewSpanSlice creates a SpanSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanSlice() SpanSlice {
- orig := []*otlptrace.Span(nil)
- state := internal.StateMutable
- return newSpanSlice(&orig, &state)
+ orig := []*internal.Span(nil)
+ return newSpanSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
@@ -91,7 +89,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) {
return
}
- newOrig := make([]*otlptrace.Span, len(*es.orig), newCap)
+ newOrig := make([]*internal.Span, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
@@ -100,7 +98,7 @@ func (es SpanSlice) EnsureCapacity(newCap int) {
// It returns the newly added Span.
func (es SpanSlice) AppendEmpty() Span {
es.state.AssertMutable()
- *es.orig = append(*es.orig, &otlptrace.Span{})
+ *es.orig = append(*es.orig, internal.NewSpan())
return es.At(es.Len() - 1)
}
@@ -129,6 +127,9 @@ func (es SpanSlice) RemoveIf(f func(Span) bool) {
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
+ internal.DeleteSpan((*es.orig)[i], true)
+ (*es.orig)[i] = nil
+
continue
}
if newLen == i {
@@ -137,6 +138,8 @@ func (es SpanSlice) RemoveIf(f func(Span) bool) {
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
+ // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
+ (*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
@@ -145,7 +148,10 @@ func (es SpanSlice) RemoveIf(f func(Span) bool) {
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanSlice) CopyTo(dest SpanSlice) {
dest.state.AssertMutable()
- *dest.orig = copyOrigSpanSlice(*dest.orig, *es.orig)
+ if es.orig == dest.orig {
+ return
+ }
+ *dest.orig = internal.CopySpanPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Span elements within SpanSlice given the
@@ -155,18 +161,3 @@ func (es SpanSlice) Sort(less func(a, b Span) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
-
-func copyOrigSpanSlice(dest, src []*otlptrace.Span) []*otlptrace.Span {
- if cap(dest) < len(src) {
- dest = make([]*otlptrace.Span, len(src))
- data := make([]otlptrace.Span, len(src))
- for i := range src {
- dest[i] = &data[i]
- }
- }
- dest = dest[:len(src)]
- for i := range src {
- copyOrigSpan(dest[i], src[i])
- }
- return dest
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
index 2040674d6cb..da1a8272601 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_status.go
@@ -1,14 +1,13 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// Status is an optional final status for this span. Semantically, when Status was not
@@ -20,11 +19,11 @@ import (
// Must use NewStatus function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Status struct {
- orig *otlptrace.Status
+ orig *internal.Status
state *internal.State
}
-func newStatus(orig *otlptrace.Status, state *internal.State) Status {
+func newStatus(orig *internal.Status, state *internal.State) Status {
return Status{orig: orig, state: state}
}
@@ -33,8 +32,7 @@ func newStatus(orig *otlptrace.Status, state *internal.State) Status {
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStatus() Status {
- state := internal.StateMutable
- return newStatus(&otlptrace.Status{}, &state)
+ return newStatus(internal.NewStatus(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
@@ -46,19 +44,8 @@ func (ms Status) MoveTo(dest Status) {
if ms.orig == dest.orig {
return
}
- *dest.orig = *ms.orig
- *ms.orig = otlptrace.Status{}
-}
-
-// Code returns the code associated with this Status.
-func (ms Status) Code() StatusCode {
- return StatusCode(ms.orig.Code)
-}
-
-// SetCode replaces the code associated with this Status.
-func (ms Status) SetCode(v StatusCode) {
- ms.state.AssertMutable()
- ms.orig.Code = otlptrace.Status_StatusCode(v)
+ internal.DeleteStatus(dest.orig, false)
+ *dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Message returns the message associated with this Status.
@@ -72,13 +59,19 @@ func (ms Status) SetMessage(v string) {
ms.orig.Message = v
}
+// Code returns the code associated with this Status.
+func (ms Status) Code() StatusCode {
+ return StatusCode(ms.orig.Code)
+}
+
+// SetCode replaces the code associated with this Status.
+func (ms Status) SetCode(v StatusCode) {
+ ms.state.AssertMutable()
+ ms.orig.Code = internal.StatusCode(v)
+}
+
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Status) CopyTo(dest Status) {
dest.state.AssertMutable()
- copyOrigStatus(dest.orig, ms.orig)
-}
-
-func copyOrigStatus(dest, src *otlptrace.Status) {
- dest.Code = src.Code
- dest.Message = src.Message
+ internal.CopyStatus(dest.orig, ms.orig)
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go
new file mode 100644
index 00000000000..5e69bc03f27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_traces.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
+// To regenerate this file run "make genpdata".
+
+package ptrace
+
+import (
+ "go.opentelemetry.io/collector/pdata/internal"
+)
+
+// Traces is the top-level struct that is propagated through the traces pipeline.
+// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
+//
+// This is a reference type, if passed by value and callee modifies it the
+// caller will see the modification.
+//
+// Must use NewTraces function to create new instances.
+// Important: zero-initialized instance is not valid for use.
+type Traces internal.TracesWrapper
+
+func newTraces(orig *internal.ExportTraceServiceRequest, state *internal.State) Traces {
+ return Traces(internal.NewTracesWrapper(orig, state))
+}
+
+// NewTraces creates a new empty Traces.
+//
+// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
+// OR directly access the member if this is embedded in another struct.
+func NewTraces() Traces {
+ return newTraces(internal.NewExportTraceServiceRequest(), internal.NewState())
+}
+
+// MoveTo moves all properties from the current struct overriding the destination and
+// resetting the current instance to its zero value
+func (ms Traces) MoveTo(dest Traces) {
+ ms.getState().AssertMutable()
+ dest.getState().AssertMutable()
+ // If they point to the same data, they are the same, nothing to do.
+ if ms.getOrig() == dest.getOrig() {
+ return
+ }
+ internal.DeleteExportTraceServiceRequest(dest.getOrig(), false)
+ *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
+}
+
+// ResourceSpans returns the ResourceSpans associated with this Traces.
+func (ms Traces) ResourceSpans() ResourceSpansSlice {
+ return newResourceSpansSlice(&ms.getOrig().ResourceSpans, ms.getState())
+}
+
+// CopyTo copies all properties from the current struct overriding the destination.
+func (ms Traces) CopyTo(dest Traces) {
+ dest.getState().AssertMutable()
+ internal.CopyExportTraceServiceRequest(dest.getOrig(), ms.getOrig())
+}
+
+func (ms Traces) getOrig() *internal.ExportTraceServiceRequest {
+ return internal.GetTracesOrig(internal.TracesWrapper(ms))
+}
+
+func (ms Traces) getState() *internal.State {
+ return internal.GetTracesState(internal.TracesWrapper(ms))
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
index 2e35a95913a..a6f0dd36f41 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go
@@ -4,214 +4,38 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
- "bytes"
- "fmt"
+ "slices"
- jsoniter "github.com/json-iterator/go"
-
- "go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
-// JSONMarshaler marshals pdata.Traces to JSON bytes using the OTLP/JSON format.
+// JSONMarshaler marshals Traces to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalTraces to the OTLP/JSON format.
func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) {
- buf := bytes.Buffer{}
- pb := internal.TracesToProto(internal.Traces(td))
- err := json.Marshal(&buf, &pb)
- return buf.Bytes(), err
+ dest := json.BorrowStream(nil)
+ defer json.ReturnStream(dest)
+ td.getOrig().MarshalJSON(dest)
+ if dest.Error() != nil {
+ return nil, dest.Error()
+ }
+ return slices.Clone(dest.Buffer()), nil
}
-// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Traces.
+// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Traces.
type JSONUnmarshaler struct{}
-// UnmarshalTraces from OTLP/JSON format into pdata.Traces.
+// UnmarshalTraces from OTLP/JSON format into Traces.
func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
- iter := jsoniter.ConfigFastest.BorrowIterator(buf)
- defer jsoniter.ConfigFastest.ReturnIterator(iter)
+ iter := json.BorrowIterator(buf)
+ defer json.ReturnIterator(iter)
td := NewTraces()
- td.unmarshalJsoniter(iter)
- if iter.Error != nil {
- return Traces{}, iter.Error
+ td.getOrig().UnmarshalJSON(iter)
+ if iter.Error() != nil {
+ return Traces{}, iter.Error()
}
otlp.MigrateTraces(td.getOrig().ResourceSpans)
return td, nil
}
-
-func (ms Traces) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resourceSpans", "resource_spans":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.ResourceSpans().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ResourceSpans) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "resource":
- json.ReadResource(iter, internal.GetOrigResource(internal.Resource(ms.Resource())))
- case "scopeSpans", "scope_spans":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.ScopeSpans().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (ms ScopeSpans) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "scope":
- json.ReadScope(iter, &ms.orig.Scope)
- case "spans":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- ms.Spans().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "schemaUrl", "schema_url":
- ms.orig.SchemaUrl = iter.ReadString()
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (dest Span) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "traceId", "trace_id":
- if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readSpan.traceId", fmt.Sprintf("parse trace_id:%v", err))
- }
- case "spanId", "span_id":
- if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readSpan.spanId", fmt.Sprintf("parse span_id:%v", err))
- }
- case "traceState", "trace_state":
- dest.TraceState().FromRaw(iter.ReadString())
- case "parentSpanId", "parent_span_id":
- if err := dest.orig.ParentSpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readSpan.parentSpanId", fmt.Sprintf("parse parent_span_id:%v", err))
- }
- case "flags":
- dest.orig.Flags = json.ReadUint32(iter)
- case "name":
- dest.orig.Name = iter.ReadString()
- case "kind":
- dest.orig.Kind = otlptrace.Span_SpanKind(json.ReadEnumValue(iter, otlptrace.Span_SpanKind_value))
- case "startTimeUnixNano", "start_time_unix_nano":
- dest.orig.StartTimeUnixNano = json.ReadUint64(iter)
- case "endTimeUnixNano", "end_time_unix_nano":
- dest.orig.EndTimeUnixNano = json.ReadUint64(iter)
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- dest.orig.DroppedAttributesCount = json.ReadUint32(iter)
- case "events":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- dest.Events().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "droppedEventsCount", "dropped_events_count":
- dest.orig.DroppedEventsCount = json.ReadUint32(iter)
- case "links":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- dest.Links().AppendEmpty().unmarshalJsoniter(iter)
- return true
- })
- case "droppedLinksCount", "dropped_links_count":
- dest.orig.DroppedLinksCount = json.ReadUint32(iter)
- case "status":
- dest.Status().unmarshalJsoniter(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (dest Status) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "message":
- dest.orig.Message = iter.ReadString()
- case "code":
- dest.orig.Code = otlptrace.Status_StatusCode(json.ReadEnumValue(iter, otlptrace.Status_StatusCode_value))
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (dest SpanLink) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "traceId", "trace_id":
- if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readSpanLink", fmt.Sprintf("parse trace_id:%v", err))
- }
- case "spanId", "span_id":
- if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil {
- iter.ReportError("readSpanLink", fmt.Sprintf("parse span_id:%v", err))
- }
- case "traceState", "trace_state":
- dest.orig.TraceState = iter.ReadString()
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- dest.orig.DroppedAttributesCount = json.ReadUint32(iter)
- case "flags":
- dest.orig.Flags = json.ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
-
-func (dest SpanEvent) unmarshalJsoniter(iter *jsoniter.Iterator) {
- iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool {
- switch f {
- case "timeUnixNano", "time_unix_nano":
- dest.orig.TimeUnixNano = json.ReadUint64(iter)
- case "name":
- dest.orig.Name = iter.ReadString()
- case "attributes":
- iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
- dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter))
- return true
- })
- case "droppedAttributesCount", "dropped_attributes_count":
- dest.orig.DroppedAttributesCount = json.ReadUint32(iter)
- default:
- iter.Skip()
- }
- return true
- })
-}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
index a3c78be27c1..c9a02b2899a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
@@ -3,41 +3,40 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
-)
-
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) {
- pb := internal.TracesToProto(internal.Traces(td))
- return pb.Marshal()
+ size := td.getOrig().SizeProto()
+ buf := make([]byte, size)
+ _ = td.getOrig().MarshalProto(buf)
+ return buf, nil
}
func (e *ProtoMarshaler) TracesSize(td Traces) int {
- pb := internal.TracesToProto(internal.Traces(td))
- return pb.Size()
+ return td.getOrig().SizeProto()
}
-func (e *ProtoMarshaler) ResourceSpansSize(rs ResourceSpans) int {
- return rs.orig.Size()
+func (e *ProtoMarshaler) ResourceSpansSize(td ResourceSpans) int {
+ return td.orig.SizeProto()
}
-func (e *ProtoMarshaler) ScopeSpansSize(ss ScopeSpans) int {
- return ss.orig.Size()
+func (e *ProtoMarshaler) ScopeSpansSize(td ScopeSpans) int {
+ return td.orig.SizeProto()
}
-func (e *ProtoMarshaler) SpanSize(span Span) int {
- return span.orig.Size()
+func (e *ProtoMarshaler) SpanSize(td Span) int {
+ return td.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
- pb := otlptrace.TracesData{}
- err := pb.Unmarshal(buf)
- return Traces(internal.TracesFromProto(pb)), err
+ td := NewTraces()
+ err := td.getOrig().UnmarshalProto(buf)
+ if err != nil {
+ return Traces{}, err
+ }
+ return td, nil
}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
index 561d82cfffa..bb1702ffe15 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/span_kind.go
@@ -4,7 +4,7 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// SpanKind is the type of span. Can be used to specify additional relationships between spans
@@ -13,25 +13,25 @@ type SpanKind int32
const (
// SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used.
- SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED)
+ SpanKindUnspecified = SpanKind(internal.SpanKind_SPAN_KIND_UNSPECIFIED)
// SpanKindInternal indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
- SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL)
+ SpanKindInternal = SpanKind(internal.SpanKind_SPAN_KIND_INTERNAL)
// SpanKindServer indicates that the span covers server-side handling of an RPC or other
// remote network request.
- SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER)
+ SpanKindServer = SpanKind(internal.SpanKind_SPAN_KIND_SERVER)
// SpanKindClient indicates that the span describes a request to some remote service.
- SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT)
+ SpanKindClient = SpanKind(internal.SpanKind_SPAN_KIND_CLIENT)
// SpanKindProducer indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans.
// A PRODUCER span ends when the message was accepted by the broker while the logical processing of
// the message might span a much longer time.
- SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER)
+ SpanKindProducer = SpanKind(internal.SpanKind_SPAN_KIND_PRODUCER)
// SpanKindConsumer indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship between
// producer and consumer spans.
- SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER)
+ SpanKindConsumer = SpanKind(internal.SpanKind_SPAN_KIND_CONSUMER)
)
// String returns the string representation of the SpanKind.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
index 18a21f56ba8..d1da464363e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/status_code.go
@@ -4,7 +4,7 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
- otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
+ "go.opentelemetry.io/collector/pdata/internal"
)
// StatusCode mirrors the codes defined at
@@ -12,9 +12,9 @@ import (
type StatusCode int32
const (
- StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET)
- StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK)
- StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR)
+ StatusCodeUnset = StatusCode(internal.StatusCode_STATUS_CODE_UNSET)
+ StatusCodeOk = StatusCode(internal.StatusCode_STATUS_CODE_OK)
+ StatusCodeError = StatusCode(internal.StatusCode_STATUS_CODE_ERROR)
)
// String returns the string representation of the StatusCode.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go
index a4b71e17853..90833ee6385 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/traces.go
@@ -3,41 +3,14 @@
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
-import (
- "go.opentelemetry.io/collector/pdata/internal"
- otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
-)
-
-// Traces is the top-level struct that is propagated through the traces pipeline.
-// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
-type Traces internal.Traces
-
-func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest) Traces {
- state := internal.StateMutable
- return Traces(internal.NewTraces(orig, &state))
-}
-
-func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest {
- return internal.GetOrigTraces(internal.Traces(ms))
-}
-
-func (ms Traces) getState() *internal.State {
- return internal.GetTracesState(internal.Traces(ms))
-}
-
-// NewTraces creates a new Traces struct.
-func NewTraces() Traces {
- return newTraces(&otlpcollectortrace.ExportTraceServiceRequest{})
+// MarkReadOnly marks the Traces as shared so that no further modifications can be done on it.
+func (ms Traces) MarkReadOnly() {
+ ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Traces instance is read-only.
func (ms Traces) IsReadOnly() bool {
- return *ms.getState() == internal.StateReadOnly
-}
-
-// CopyTo copies the Traces instance overriding the destination.
-func (ms Traces) CopyTo(dest Traces) {
- ms.ResourceSpans().CopyTo(dest.ResourceSpans())
+ return ms.getState().IsReadOnly()
}
// SpanCount calculates the total number of spans.
@@ -53,13 +26,3 @@ func (ms Traces) SpanCount() int {
}
return spanCount
}
-
-// ResourceSpans returns the ResourceSpansSlice associated with this Metrics.
-func (ms Traces) ResourceSpans() ResourceSpansSlice {
- return newResourceSpansSlice(&ms.getOrig().ResourceSpans, internal.GetTracesState(internal.Traces(ms)))
-}
-
-// MarkReadOnly marks the Traces as shared so that no further modifications can be done on it.
-func (ms Traces) MarkReadOnly() {
- internal.SetTracesState(internal.Traces(ms), internal.StateReadOnly)
-}
diff --git a/vendor/go.opentelemetry.io/collector/processor/README.md b/vendor/go.opentelemetry.io/collector/processor/README.md
index f5636e0d748..c7f9ff5539c 100644
--- a/vendor/go.opentelemetry.io/collector/processor/README.md
+++ b/vendor/go.opentelemetry.io/collector/processor/README.md
@@ -1,9 +1,7 @@
# General Information
Processors are used at various stages of a pipeline. Generally, a processor
-pre-processes data before it is exported (e.g. modify attributes or sample) or
-helps ensure that data makes it through a pipeline successfully (e.g.
-batch/retry).
+pre-processes data before it is exported (e.g. modify attributes or sample).
Some important aspects of pipelines and processors to be aware of:
- [Recommended Processors](#recommended-processors)
@@ -32,7 +30,7 @@ processor documentation for more information.
1. [memory_limiter](memorylimiterprocessor/README.md)
2. Any sampling or initial filtering processors
3. Any processor relying on sending source from `Context` (e.g. `k8sattributes`)
-3. [batch](batchprocessor/README.md)
+3. [batch](batchprocessor/README.md), although prefer using the exporter's batching capabilities
4. Any other processors
## Data Ownership
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE
deleted file mode 100644
index 261eeb9e9f8..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md
deleted file mode 100644
index 5565260ae55..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# OpenTelemetry Zap Log Bridge
-
-[](https://pkg.go.dev/go.opentelemetry.io/contrib/bridges/otelzap)
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go
deleted file mode 100644
index 7ea50c5bb4b..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/logutil/convert.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap"
-
-import (
- "fmt"
- "math"
- "reflect"
- "strconv"
- "time"
-
- "go.opentelemetry.io/otel/log"
-)
-
-// convertValue converts various types to log.Value.
-func convertValue(v any) log.Value {
- // Handling the most common types without reflect is a small perf win.
- switch val := v.(type) {
- case bool:
- return log.BoolValue(val)
- case string:
- return log.StringValue(val)
- case int:
- return log.Int64Value(int64(val))
- case int8:
- return log.Int64Value(int64(val))
- case int16:
- return log.Int64Value(int64(val))
- case int32:
- return log.Int64Value(int64(val))
- case int64:
- return log.Int64Value(val)
- case uint:
- return convertUintValue(uint64(val))
- case uint8:
- return log.Int64Value(int64(val))
- case uint16:
- return log.Int64Value(int64(val))
- case uint32:
- return log.Int64Value(int64(val))
- case uint64:
- return convertUintValue(val)
- case uintptr:
- return convertUintValue(uint64(val))
- case float32:
- return log.Float64Value(float64(val))
- case float64:
- return log.Float64Value(val)
- case time.Duration:
- return log.Int64Value(val.Nanoseconds())
- case complex64:
- r := log.Float64("r", real(complex128(val)))
- i := log.Float64("i", imag(complex128(val)))
- return log.MapValue(r, i)
- case complex128:
- r := log.Float64("r", real(val))
- i := log.Float64("i", imag(val))
- return log.MapValue(r, i)
- case time.Time:
- return log.Int64Value(val.UnixNano())
- case []byte:
- return log.BytesValue(val)
- case error:
- return log.StringValue(val.Error())
- }
-
- t := reflect.TypeOf(v)
- if t == nil {
- return log.Value{}
- }
- val := reflect.ValueOf(v)
- switch t.Kind() {
- case reflect.Struct:
- return log.StringValue(fmt.Sprintf("%+v", v))
- case reflect.Slice, reflect.Array:
- items := make([]log.Value, 0, val.Len())
- for i := 0; i < val.Len(); i++ {
- items = append(items, convertValue(val.Index(i).Interface()))
- }
- return log.SliceValue(items...)
- case reflect.Map:
- kvs := make([]log.KeyValue, 0, val.Len())
- for _, k := range val.MapKeys() {
- var key string
- switch k.Kind() {
- case reflect.String:
- key = k.String()
- default:
- key = fmt.Sprintf("%+v", k.Interface())
- }
- kvs = append(kvs, log.KeyValue{
- Key: key,
- Value: convertValue(val.MapIndex(k).Interface()),
- })
- }
- return log.MapValue(kvs...)
- case reflect.Ptr, reflect.Interface:
- if val.IsNil() {
- return log.Value{}
- }
- return convertValue(val.Elem().Interface())
- }
-
- // Try to handle this as gracefully as possible.
- //
- // Don't panic here. it is preferable to have user's open issue
- // asking why their attributes have a "unhandled: " prefix than
- // say that their code is panicking.
- return log.StringValue(fmt.Sprintf("unhandled: (%s) %+v", t, v))
-}
-
-// convertUintValue converts a uint64 to a log.Value.
-// If the value is too large to fit in an int64, it is converted to a string.
-func convertUintValue(v uint64) log.Value {
- if v > math.MaxInt64 {
- return log.StringValue(strconv.FormatUint(v, 10))
- }
- return log.Int64Value(int64(v))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go
deleted file mode 100644
index f3624042790..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package otelzap provides a bridge between the [go.uber.org/zap] and
-// [OpenTelemetry].
-//
-// # Record Conversion
-//
-// The [zapcore.Entry] and [zapcore.Field] are converted to OpenTelemetry [log.Record] in the following
-// way:
-//
-// - Time is set as the Timestamp.
-// - Message is set as the Body using a [log.StringValue].
-// - Level is transformed and set as the Severity. The SeverityText is also
-// set.
-// - Fields are transformed and set as the Attributes.
-// - Field value of type [context.Context] is used as context when emitting log records.
-// - For named loggers, LoggerName is used to access [log.Logger] from [log.LoggerProvider]
-//
-// The Level is transformed to the OpenTelemetry Severity types in the following way.
-//
-// - [zapcore.DebugLevel] is transformed to [log.SeverityDebug]
-// - [zapcore.InfoLevel] is transformed to [log.SeverityInfo]
-// - [zapcore.WarnLevel] is transformed to [log.SeverityWarn]
-// - [zapcore.ErrorLevel] is transformed to [log.SeverityError]
-// - [zapcore.DPanicLevel] is transformed to [log.SeverityFatal1]
-// - [zapcore.PanicLevel] is transformed to [log.SeverityFatal2]
-// - [zapcore.FatalLevel] is transformed to [log.SeverityFatal3]
-//
-// Fields are transformed based on their type into log attributes, or
-// into a string value encoded using [fmt.Sprintf] if there is no matching type.
-//
-// [OpenTelemetry]: https://opentelemetry.io/docs/concepts/signals/logs/
-package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap"
-
-import (
- "context"
- "slices"
-
- "go.uber.org/zap/zapcore"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/log"
- "go.opentelemetry.io/otel/log/global"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type config struct {
- provider log.LoggerProvider
- version string
- schemaURL string
- attributes []attribute.KeyValue
-}
-
-func newConfig(options []Option) config {
- var c config
- for _, opt := range options {
- c = opt.apply(c)
- }
-
- if c.provider == nil {
- c.provider = global.GetLoggerProvider()
- }
-
- return c
-}
-
-// Option configures a [Core].
-type Option interface {
- apply(config) config
-}
-
-type optFunc func(config) config
-
-func (f optFunc) apply(c config) config { return f(c) }
-
-// WithVersion returns an [Option] that configures the version of the
-// [log.Logger] used by a [Core]. The version should be the version of the
-// package that is being logged.
-func WithVersion(version string) Option {
- return optFunc(func(c config) config {
- c.version = version
- return c
- })
-}
-
-// WithSchemaURL returns an [Option] that configures the semantic convention
-// schema URL of the [log.Logger] used by a [Core]. The schemaURL should be
-// the schema URL for the semantic conventions used in log records.
-func WithSchemaURL(schemaURL string) Option {
- return optFunc(func(c config) config {
- c.schemaURL = schemaURL
- return c
- })
-}
-
-// WithAttributes returns an [Option] that configures the instrumentation scope
-// attributes of the [log.Logger] used by a [Core].
-func WithAttributes(attributes ...attribute.KeyValue) Option {
- return optFunc(func(c config) config {
- c.attributes = attributes
- return c
- })
-}
-
-// WithLoggerProvider returns an [Option] that configures [log.LoggerProvider]
-// used by a [Core] to create its [log.Logger].
-//
-// By default if this Option is not provided, the Handler will use the global
-// LoggerProvider.
-func WithLoggerProvider(provider log.LoggerProvider) Option {
- return optFunc(func(c config) config {
- c.provider = provider
- return c
- })
-}
-
-// Core is a [zapcore.Core] that sends logging records to OpenTelemetry.
-type Core struct {
- provider log.LoggerProvider
- logger log.Logger
- opts []log.LoggerOption
- attr []log.KeyValue
- ctx context.Context
-}
-
-// Compile-time check *Core implements zapcore.Core.
-var _ zapcore.Core = (*Core)(nil)
-
-// NewCore creates a new [zapcore.Core] that can be used with [go.uber.org/zap.New].
-// The name should be the package import path that is being logged.
-// The name is ignored for named loggers created using [go.uber.org/zap.Logger.Named].
-func NewCore(name string, opts ...Option) *Core {
- cfg := newConfig(opts)
-
- var loggerOpts []log.LoggerOption
- if cfg.version != "" {
- loggerOpts = append(loggerOpts, log.WithInstrumentationVersion(cfg.version))
- }
- if cfg.schemaURL != "" {
- loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL))
- }
- if cfg.attributes != nil {
- loggerOpts = append(loggerOpts, log.WithInstrumentationAttributes(cfg.attributes...))
- }
-
- logger := cfg.provider.Logger(name, loggerOpts...)
-
- return &Core{
- provider: cfg.provider,
- logger: logger,
- opts: loggerOpts,
- ctx: context.Background(),
- }
-}
-
-// Enabled decides whether a given logging level is enabled when logging a message.
-func (o *Core) Enabled(level zapcore.Level) bool {
- param := log.EnabledParameters{Severity: convertLevel(level)}
- return o.logger.Enabled(context.Background(), param)
-}
-
-// With adds structured context to the Core.
-func (o *Core) With(fields []zapcore.Field) zapcore.Core {
- cloned := o.clone()
- if len(fields) > 0 {
- ctx, attrbuf := convertField(fields)
- if ctx != nil {
- cloned.ctx = ctx
- }
- cloned.attr = append(cloned.attr, attrbuf...)
- }
- return cloned
-}
-
-func (o *Core) clone() *Core {
- return &Core{
- provider: o.provider,
- opts: o.opts,
- logger: o.logger,
- attr: slices.Clone(o.attr),
- ctx: o.ctx,
- }
-}
-
-// Sync flushes buffered logs (if any).
-func (o *Core) Sync() error {
- return nil
-}
-
-// Check determines whether the supplied Entry should be logged.
-// If the entry should be logged, the Core adds itself to the CheckedEntry and returns the result.
-func (o *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
- param := log.EnabledParameters{Severity: convertLevel(ent.Level)}
-
- logger := o.logger
- if ent.LoggerName != "" {
- logger = o.provider.Logger(ent.LoggerName, o.opts...)
- }
-
- if logger.Enabled(context.Background(), param) {
- return ce.AddCore(ent, o)
- }
- return ce
-}
-
-// Write method encodes zap fields to OTel logs and emits them.
-func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error {
- r := log.Record{}
- r.SetTimestamp(ent.Time)
- r.SetBody(log.StringValue(ent.Message))
- r.SetSeverity(convertLevel(ent.Level))
- r.SetSeverityText(ent.Level.String())
-
- r.AddAttributes(o.attr...)
- if ent.Caller.Defined {
- r.AddAttributes(
- log.String(string(semconv.CodeFilepathKey), ent.Caller.File),
- log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line),
- log.String(string(semconv.CodeFunctionKey), ent.Caller.Function),
- )
- }
- if ent.Stack != "" {
- r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack))
- }
- if len(fields) > 0 {
- ctx, attrbuf := convertField(fields)
- if ctx != nil {
- o.ctx = ctx
- }
- r.AddAttributes(attrbuf...)
- }
-
- logger := o.logger
- if ent.LoggerName != "" {
- logger = o.provider.Logger(ent.LoggerName, o.opts...)
- }
- logger.Emit(o.ctx, r)
- return nil
-}
-
-func convertField(fields []zapcore.Field) (context.Context, []log.KeyValue) {
- var ctx context.Context
- enc := newObjectEncoder(len(fields))
- for _, field := range fields {
- if ctxFld, ok := field.Interface.(context.Context); ok {
- ctx = ctxFld
- continue
- }
- field.AddTo(enc)
- }
-
- enc.calculate(enc.root)
- return ctx, enc.root.attrs
-}
-
-func convertLevel(level zapcore.Level) log.Severity {
- switch level {
- case zapcore.DebugLevel:
- return log.SeverityDebug
- case zapcore.InfoLevel:
- return log.SeverityInfo
- case zapcore.WarnLevel:
- return log.SeverityWarn
- case zapcore.ErrorLevel:
- return log.SeverityError
- case zapcore.DPanicLevel:
- return log.SeverityFatal1
- case zapcore.PanicLevel:
- return log.SeverityFatal2
- case zapcore.FatalLevel:
- return log.SeverityFatal3
- default:
- return log.SeverityUndefined
- }
-}
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go
deleted file mode 100644
index 8147576ae77..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap"
-
-import (
- "time"
-
- "go.uber.org/zap/zapcore"
-
- "go.opentelemetry.io/otel/log"
-)
-
-var (
- _ zapcore.ObjectEncoder = (*objectEncoder)(nil)
- _ zapcore.ArrayEncoder = (*arrayEncoder)(nil)
-)
-
-type namespace struct {
- name string
- attrs []log.KeyValue
- next *namespace
-}
-
-// objectEncoder implements zapcore.ObjectEncoder.
-// It encodes given fields to OTel key-values.
-type objectEncoder struct {
- // root is a pointer to the default namespace
- root *namespace
- // cur is a pointer to the namespace we're currently writing to.
- cur *namespace
-}
-
-func newObjectEncoder(n int) *objectEncoder {
- keyval := make([]log.KeyValue, 0, n)
- m := &namespace{
- attrs: keyval,
- }
- return &objectEncoder{
- root: m,
- cur: m,
- }
-}
-
-// It iterates to the end of the linked list and appends namespace data.
-// Run this function before accessing complete result.
-func (m *objectEncoder) calculate(o *namespace) {
- if o.next == nil {
- return
- }
- m.calculate(o.next)
- o.attrs = append(o.attrs, log.Map(o.next.name, o.next.attrs...))
-}
-
-func (m *objectEncoder) AddArray(key string, v zapcore.ArrayMarshaler) error {
- arr := newArrayEncoder()
- err := v.MarshalLogArray(arr)
- m.cur.attrs = append(m.cur.attrs, log.Slice(key, arr.elems...))
- return err
-}
-
-func (m *objectEncoder) AddObject(k string, v zapcore.ObjectMarshaler) error {
- // Similar to console_encoder which uses capacity of 2:
- // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33.
- newobj := newObjectEncoder(2)
- err := v.MarshalLogObject(newobj)
- newobj.calculate(newobj.root)
- m.cur.attrs = append(m.cur.attrs, log.Map(k, newobj.root.attrs...))
- return err
-}
-
-func (m *objectEncoder) AddBinary(k string, v []byte) {
- m.cur.attrs = append(m.cur.attrs, log.Bytes(k, v))
-}
-
-func (m *objectEncoder) AddByteString(k string, v []byte) {
- m.cur.attrs = append(m.cur.attrs, log.String(k, string(v)))
-}
-
-func (m *objectEncoder) AddBool(k string, v bool) {
- m.cur.attrs = append(m.cur.attrs, log.Bool(k, v))
-}
-
-func (m *objectEncoder) AddDuration(k string, v time.Duration) {
- m.AddInt64(k, v.Nanoseconds())
-}
-
-func (m *objectEncoder) AddComplex128(k string, v complex128) {
- r := log.Float64("r", real(v))
- i := log.Float64("i", imag(v))
- m.cur.attrs = append(m.cur.attrs, log.Map(k, r, i))
-}
-
-func (m *objectEncoder) AddFloat64(k string, v float64) {
- m.cur.attrs = append(m.cur.attrs, log.Float64(k, v))
-}
-
-func (m *objectEncoder) AddInt64(k string, v int64) {
- m.cur.attrs = append(m.cur.attrs, log.Int64(k, v))
-}
-
-func (m *objectEncoder) AddInt(k string, v int) {
- m.cur.attrs = append(m.cur.attrs, log.Int(k, v))
-}
-
-func (m *objectEncoder) AddString(k string, v string) {
- m.cur.attrs = append(m.cur.attrs, log.String(k, v))
-}
-
-func (m *objectEncoder) AddUint64(k string, v uint64) {
- m.cur.attrs = append(m.cur.attrs,
- log.KeyValue{
- Key: k,
- Value: assignUintValue(v),
- })
-}
-
-func (m *objectEncoder) AddReflected(k string, v interface{}) error {
- m.cur.attrs = append(m.cur.attrs,
- log.KeyValue{
- Key: k,
- Value: convertValue(v),
- })
- return nil
-}
-
-// OpenNamespace opens an isolated namespace where all subsequent fields will
-// be added.
-func (m *objectEncoder) OpenNamespace(k string) {
- keyValue := make([]log.KeyValue, 0, 5)
- s := &namespace{
- name: k,
- attrs: keyValue,
- }
- m.cur.next = s
- m.cur = s
-}
-
-func (m *objectEncoder) AddComplex64(k string, v complex64) {
- m.AddComplex128(k, complex128(v))
-}
-
-func (m *objectEncoder) AddTime(k string, v time.Time) {
- m.AddInt64(k, v.UnixNano())
-}
-
-func (m *objectEncoder) AddFloat32(k string, v float32) {
- m.AddFloat64(k, float64(v))
-}
-
-func (m *objectEncoder) AddInt32(k string, v int32) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddInt16(k string, v int16) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddInt8(k string, v int8) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddUint(k string, v uint) {
- m.AddUint64(k, uint64(v))
-}
-
-func (m *objectEncoder) AddUint32(k string, v uint32) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddUint16(k string, v uint16) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddUint8(k string, v uint8) {
- m.AddInt64(k, int64(v))
-}
-
-func (m *objectEncoder) AddUintptr(k string, v uintptr) {
- m.AddUint64(k, uint64(v))
-}
-
-func assignUintValue(v uint64) log.Value {
- const maxInt64 = ^uint64(0) >> 1
- if v > maxInt64 {
- return log.Float64Value(float64(v))
- }
- return log.Int64Value(int64(v)) // nolint:gosec // Overflow checked above.
-}
-
-// arrayEncoder implements [zapcore.ArrayEncoder].
-type arrayEncoder struct {
- elems []log.Value
-}
-
-func newArrayEncoder() *arrayEncoder {
- return &arrayEncoder{
- // Similar to console_encoder which uses capacity of 2:
- // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33.
- elems: make([]log.Value, 0, 2),
- }
-}
-
-func (a *arrayEncoder) AppendArray(v zapcore.ArrayMarshaler) error {
- arr := newArrayEncoder()
- err := v.MarshalLogArray(arr)
- a.elems = append(a.elems, log.SliceValue(arr.elems...))
- return err
-}
-
-func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error {
- // Similar to console_encoder which uses capacity of 2:
- // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33.
- m := newObjectEncoder(2)
- err := v.MarshalLogObject(m)
- m.calculate(m.root)
- a.elems = append(a.elems, log.MapValue(m.root.attrs...))
- return err
-}
-
-func (a *arrayEncoder) AppendReflected(v interface{}) error {
- a.elems = append(a.elems, convertValue(v))
- return nil
-}
-
-func (a *arrayEncoder) AppendByteString(v []byte) {
- a.elems = append(a.elems, log.StringValue(string(v)))
-}
-
-func (a *arrayEncoder) AppendBool(v bool) {
- a.elems = append(a.elems, log.BoolValue(v))
-}
-
-func (a *arrayEncoder) AppendFloat64(v float64) {
- a.elems = append(a.elems, log.Float64Value(v))
-}
-
-func (a *arrayEncoder) AppendFloat32(v float32) {
- a.AppendFloat64(float64(v))
-}
-
-func (a *arrayEncoder) AppendInt(v int) {
- a.elems = append(a.elems, log.IntValue(v))
-}
-
-func (a *arrayEncoder) AppendInt64(v int64) {
- a.elems = append(a.elems, log.Int64Value(v))
-}
-
-func (a *arrayEncoder) AppendString(v string) {
- a.elems = append(a.elems, log.StringValue(v))
-}
-
-func (a *arrayEncoder) AppendComplex128(v complex128) {
- r := log.Float64("r", real(v))
- i := log.Float64("i", imag(v))
- a.elems = append(a.elems, log.MapValue(r, i))
-}
-
-func (a *arrayEncoder) AppendUint64(v uint64) {
- a.elems = append(a.elems, assignUintValue(v))
-}
-
-func (a *arrayEncoder) AppendComplex64(v complex64) { a.AppendComplex128(complex128(v)) }
-func (a *arrayEncoder) AppendDuration(v time.Duration) { a.AppendInt64(v.Nanoseconds()) }
-func (a *arrayEncoder) AppendInt32(v int32) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendInt16(v int16) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendInt8(v int8) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendTime(v time.Time) { a.AppendInt64(v.UnixNano()) }
-func (a *arrayEncoder) AppendUint(v uint) { a.AppendUint64(uint64(v)) }
-func (a *arrayEncoder) AppendUint32(v uint32) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendUint16(v uint16) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendUint8(v uint8) { a.AppendInt64(int64(v)) }
-func (a *arrayEncoder) AppendUintptr(v uintptr) { a.AppendUint64(uint64(v)) }
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go
deleted file mode 100644
index 5c8b2eea7e4..00000000000
--- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap"
-
-// Generate convert:
-//go:generate gotmpl --body=../../internal/shared/logutil/convert_test.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert_test.go
-//go:generate gotmpl --body=../../internal/shared/logutil/convert.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE
index 261eeb9e9f8..f1aee0f1100 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go
index 222b35871fa..942566e6550 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go
@@ -11,11 +11,12 @@ import (
"strings"
"sync"
- "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv"
)
// ScopeName is the instrumentation scope name.
@@ -360,7 +361,10 @@ func (ct *clientTracer) got100Continue() {
if ct.useSpans {
span = ct.span("http.receive")
}
- span.AddEvent("GOT 100 - Continue")
+ // It's possible that Got100Continue is called before GotFirstResponseByte at which point span can be `nil`.
+ if span != nil {
+ span.AddEvent("GOT 100 - Continue")
+ }
}
func (ct *clientTracer) wait100Continue() {
@@ -368,7 +372,10 @@ func (ct *clientTracer) wait100Continue() {
if ct.useSpans {
span = ct.span("http.send")
}
- span.AddEvent("GOT 100 - Wait")
+ // It's possible that Wait100Continue is called before GotFirstResponseByte at which point span can be `nil`.
+ if span != nil {
+ span.AddEvent("GOT 100 - Wait")
+ }
}
func (ct *clientTracer) got1xxResponse(code int, header textproto.MIMEHeader) error {
@@ -376,10 +383,13 @@ func (ct *clientTracer) got1xxResponse(code int, header textproto.MIMEHeader) er
if ct.useSpans {
span = ct.span("http.receive")
}
- span.AddEvent("GOT 1xx", trace.WithAttributes(
- HTTPStatus.Int(code),
- HTTPHeaderMIME.String(sm2s(header)),
- ))
+ // It's possible that Got1xxResponse is called before GotFirstResponseByte at which point span can be `nil`.
+ if span != nil {
+ span.AddEvent("GOT 1xx", trace.WithAttributes(
+ HTTPStatus.Int(code),
+ HTTPHeaderMIME.String(sm2s(header)),
+ ))
+ }
return nil
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go
index 0186b0afb8e..549494192a2 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go
@@ -9,12 +9,13 @@ import (
"context"
"net/http"
- "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/baggage"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv"
)
// Option allows configuration of the httptrace Extract()
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/env.go
index 631466ae6af..040f88f4b8b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/env.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/env.go
@@ -10,13 +10,13 @@ import (
"context"
"fmt"
"net/http"
- "os"
"strings"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv"
)
// OTelSemConvStabilityOptIn is an environment variable.
@@ -32,17 +32,9 @@ type ResponseTelemetry struct {
}
type HTTPServer struct {
- duplicate bool
-
- // Old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
-
- // New metrics
- requestBodySizeHistogram metric.Int64Histogram
- responseBodySizeHistogram metric.Int64Histogram
- requestDurationHistogram metric.Float64Histogram
+ requestBodySizeHistogram httpconv.ServerRequestBodySize
+ responseBodySizeHistogram httpconv.ServerResponseBodySize
+ requestDurationHistogram httpconv.ServerRequestDuration
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@@ -62,20 +54,10 @@ type HTTPServer struct {
// If the primary server name is not known, server should be an empty string.
// The req Host will be used to determine the server instead.
func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
- if s.duplicate {
- return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
}
func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
- if s.duplicate {
- return []attribute.KeyValue{
- OldHTTPServer{}.NetworkTransportAttr(network),
- CurrentHTTPServer{}.NetworkTransportAttr(network),
- }
- }
return []attribute.KeyValue{
CurrentHTTPServer{}.NetworkTransportAttr(network),
}
@@ -85,11 +67,7 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
//
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp)
- if s.duplicate {
- return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.ResponseTraceAttrs(resp)
}
// Route returns the attribute for the route.
@@ -133,42 +111,28 @@ type MetricData struct {
var (
metricAddOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.AddOption{}
},
}
metricRecordOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.RecordOption{}
},
}
)
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
- if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
- attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
- *recordOpts = append(*recordOpts, o)
- s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
- s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
- s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o)
- *recordOpts = (*recordOpts)[:0]
- metricRecordOptionPool.Put(recordOpts)
- }
-
- if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
- attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
- *addOpts = append(*addOpts, o)
- s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
- s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
- s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
- *addOpts = (*addOpts)[:0]
- metricAddOptionPool.Put(addOpts)
- }
+ attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
+ *recordOpts = append(*recordOpts, o)
+ s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...)
+ s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...)
+ s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o)
+ *recordOpts = (*recordOpts)[:0]
+ metricRecordOptionPool.Put(recordOpts)
}
// hasOptIn returns true if the comma-separated version string contains the
@@ -183,61 +147,55 @@ func hasOptIn(version, optIn string) bool {
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- server := HTTPServer{
- duplicate: duplicate,
- }
- server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
- if duplicate {
- server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
- }
+ server := HTTPServer{}
+
+ var err error
+ server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter)
+ handleErr(err)
+
+ server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter)
+ handleErr(err)
+
+ server.requestDurationHistogram, err = httpconv.NewServerRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1,
+ 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10,
+ ),
+ )
+ handleErr(err)
return server
}
type HTTPClient struct {
- duplicate bool
-
- // old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- latencyMeasure metric.Float64Histogram
-
- // new metrics
- requestBodySize metric.Int64Histogram
- requestDuration metric.Float64Histogram
+ requestBodySize httpconv.ClientRequestBodySize
+ requestDuration httpconv.ClientRequestDuration
}
func NewHTTPClient(meter metric.Meter) HTTPClient {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- client := HTTPClient{
- duplicate: duplicate,
- }
- client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
- if duplicate {
- client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
- }
+ client := HTTPClient{}
+
+ var err error
+ client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter)
+ handleErr(err)
+
+ client.requestDuration, err = httpconv.NewClientRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
+ )
+ handleErr(err)
return client
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.RequestTraceAttrs(req)
- if c.duplicate {
- return OldHTTPClient{}.RequestTraceAttrs(req, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp)
- if c.duplicate {
- return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
@@ -277,47 +235,14 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
addOptions: set,
}
- if c.duplicate {
- attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
- set := metric.WithAttributeSet(attribute.NewSet(attributes...))
- opts["old"] = MetricOpts{
- measurement: set,
- addOptions: set,
- }
- }
-
return opts
}
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
- if s.requestBodySize == nil || s.requestDuration == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
- s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
-
- if s.duplicate {
- s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
- s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
- }
-}
-
-func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
- if s.responseBytesCounter == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
+ s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
+ s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
}
func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.TraceAttributes(host)
- if s.duplicate {
- return OldHTTPClient{}.TraceAttributes(host, attrs)
- }
-
- return attrs
+ return CurrentHTTPClient{}.TraceAttributes(host)
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/gen.go
index fd9b8f6ac8f..c1c580a830f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/gen.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/gen.go
@@ -5,10 +5,11 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/
// Generate semconv package:
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=bench_test.go
+//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=common_test.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=env.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=env_test.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=httpconv.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=httpconvtest_test.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=util.go
//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=util_test.go
-//go:generate gotmpl --body=../../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace\" }" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/httpconv.go
index e016f0176fc..a82bdbd0a46 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/httpconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/httpconv.go
@@ -17,9 +17,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type RequestTraceAttrsOpts struct {
@@ -196,7 +194,7 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter
if https {
return semconvNew.URLScheme("https")
}
@@ -247,36 +245,6 @@ func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
-func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
- )
- handleErr(err)
-
- responseBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerResponseBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
- )
- handleErr(err)
- requestDurationHistogram, err := meter.Float64Histogram(
- semconvNew.HTTPServerRequestDurationName,
- metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
-}
-
func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 3
var host string
@@ -472,30 +440,6 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySize, err := meter.Int64Histogram(
- semconvNew.HTTPClientRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
- )
- handleErr(err)
-
- requestDuration, err := meter.Float64Histogram(
- semconvNew.HTTPClientRequestDurationName,
- metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySize, requestDuration
-}
-
func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 2
var h string
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/util.go
index 18dc1c77f45..f058139d1a6 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/util.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/util.go
@@ -14,7 +14,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
// SplitHostPort splits a network address hostport of the form "host",
@@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p) // nolint: gosec // Byte size checked 16 above.
+ return host, int(p) //nolint:gosec // Byte size checked 16 above.
}
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
+func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter
if https {
if port > 0 && port != 443 {
return port
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/v1.20.0.go
deleted file mode 100644
index d5fc58708a2..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv/v1.20.0.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/v120.0.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv"
-
-import (
- "errors"
- "io"
- "net/http"
- "slices"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type OldHTTPServer struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs)
-}
-
-func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
- return semconvutil.NetTransport(network)
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
-func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue {
- if resp.ReadBytes > 0 {
- attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
- }
- if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
- }
- if resp.WriteBytes > 0 {
- attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
- }
- if resp.StatusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
- }
- if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
- }
-
- return attributes
-}
-
-// Route returns the attribute for the route.
-func (o OldHTTPServer) Route(route string) attribute.KeyValue {
- return semconv.HTTPRoute(route)
-}
-
-// HTTPStatusCode returns the attribute for the HTTP status code.
-// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
-func HTTPStatusCode(status int) attribute.KeyValue {
- return semconv.HTTPStatusCode(status)
-}
-
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- var err error
- requestBytesCounter, err := meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- serverLatencyMeasure, err := meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
-}
-
-func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- n := len(additionalAttributes) + 3
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- o.scheme(req.TLS != nil),
- semconv.NetHostName(host))
-
- if hostPort > 0 {
- attributes = append(attributes, semconv.NetHostPort(hostPort))
- }
- if protoName != "" {
- attributes = append(attributes, semconv.NetProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return semconv.HTTPSchemeHTTPS
- }
- return semconv.HTTPSchemeHTTP
-}
-
-type OldHTTPClient struct{}
-
-func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientRequest(req, attrs)
-}
-
-func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientResponse(resp, attrs)
-}
-
-func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.status_code int
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{h, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if port > 0 {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- semconv.NetPeerName(requestHost),
- )
-
- if port > 0 {
- attributes = append(attributes, semconv.NetPeerPort(port))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-// Client HTTP metrics.
-const (
- clientRequestSize = "http.client.request.size" // Incoming request bytes total
- clientResponseSize = "http.client.response.size" // Incoming response bytes total
- clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
-)
-
-func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- requestBytesCounter, err := meter.Int64Counter(
- clientRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- clientResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- latencyMeasure, err := meter.Float64Histogram(
- clientDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of outbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, latencyMeasure
-}
-
-// TraceAttributes returns attributes for httptrace.
-func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue {
- return append(attrs, semconv.NetHostName(host))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/gen.go
deleted file mode 100644
index 4e9c361102e..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil"
-
-// Generate semconvutil package:
-//go:generate gotmpl --body=../../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
-//go:generate gotmpl --body=../../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/httpconv.go
deleted file mode 100644
index 76004b734bf..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/httpconv.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconvutil provides OpenTelemetry semantic convention utilities.
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil"
-
-import (
- "fmt"
- "net/http"
- "slices"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type HTTPServerRequestOptions struct {
- // If set, this is used as value for the "http.client_ip" attribute.
- HTTPClientIP string
-}
-
-// HTTPClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// HTTPClientResponse(resp, ClientRequest(resp.Request)))
-func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientResponse(resp, attrs)
-}
-
-// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length".
-func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientRequest(req, attrs)
-}
-
-// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the
-// related values are defined in req: "net.peer.port".
-func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequestMetrics(req)
-}
-
-// HTTPClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func HTTPClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// HTTPServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if
-// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip".
-func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ServerRequest(server, req, opts, attrs)
-}
-
-// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequestMetrics(server, req)
-}
-
-// HTTPServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func HTTPServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// httpConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type httpConv struct {
- NetConv *netConv
-
- HTTPClientIPKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-var hc = &httpConv{
- NetConv: nc,
-
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// ClientResponse(resp, ClientRequest(resp.Request))
-func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.status_code int
- http.response_content_length int
- */
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
- if n == 0 {
- return attrs
- }
-
- attrs = slices.Grow(attrs, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length", "user_agent.original".
-func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- user_agent.original string
- http.url string
- net.peer.name string
- net.peer.port int
- http.request_content_length int
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response. See ClientResponse.
- http.response_content_length This requires the response. See ClientResponse.
- net.sock.family This requires the socket used.
- net.sock.peer.addr This requires the socket used.
- net.sock.peer.name This requires the socket used.
- net.sock.peer.port This requires the socket used.
- http.resend_count This is something outside of a single request.
- net.protocol.name The value is the Request is ignored, and the go client will always use "http".
- net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
- */
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
- attrs = append(attrs, c.method(req.Method))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- return attrs
-}
-
-// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the related values
-// are defined in req: "net.peer.port".
-func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
-
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if they
-// related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip",
-// "net.protocol.name", "net.protocol.version".
-func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.scheme string
- net.host.name string
- net.host.port int
- net.sock.peer.addr string
- net.sock.peer.port int
- user_agent.original string
- http.client_ip string
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- http.target string Note: doesn't include the query parameter.
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response.
- http.request_content_length This requires the len() of body, which can mutate it.
- http.response_content_length This requires the response.
- http.route This is not available.
- net.sock.peer.name This would require a DNS lookup.
- net.sock.host.addr The request doesn't have access to the underlying socket.
- net.sock.host.port The request doesn't have access to the underlying socket.
-
- */
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
-
- // For client IP, use, in order:
- // 1. The value passed in the options
- // 2. The value in the X-Forwarded-For header
- // 3. The peer address
- clientIP := opts.HTTPClientIP
- if clientIP == "" {
- clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP == "" {
- clientIP = peer
- }
- }
- if clientIP != "" {
- n++
- }
-
- var target string
- if req.URL != nil {
- target = req.URL.Path
- if target != "" {
- n++
- }
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- if target != "" {
- attrs = append(attrs, c.HTTPTargetKey.String(target))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-// ServerRequestMetrics returns metric attributes for an HTTP request received
-// by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.scheme string
- http.route string
- http.method string
- http.status_code int
- net.host.name string
- net.host.port int
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- */
-
- n := 3 // Method, scheme, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.methodMetric(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
- if protoName != "" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-func (c *httpConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) methodMetric(method string) attribute.KeyValue {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/netconv.go
deleted file mode 100644
index 1cb0e4a245e..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil/netconv.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/netconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-// NetTransport returns a trace attribute describing the transport protocol of the
-// passed network. See the net.Dial for information about acceptable network
-// values.
-func NetTransport(network string) attribute.KeyValue {
- return nc.Transport(network)
-}
-
-// netConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type netConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetProtocolName attribute.Key
- NetProtocolVersion attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-var nc = &netConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetProtocolName: semconv.NetProtocolNameKey,
- NetProtocolVersion: semconv.NetProtocolVersionKey,
- NetSockFamilyKey: semconv.NetSockFamilyKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetSockHostAddrKey: semconv.NetSockHostAddrKey,
- NetSockHostPortKey: semconv.NetSockHostPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
-}
-
-func (c *netConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *netConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-func (c *netConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *netConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-// Peer returns attributes for a network peer address.
-func (c *netConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *netConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *netConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
-}
-
-func netProtocol(proto string) (name string, version string) {
- name, version, _ = strings.Cut(proto, "/")
- switch name {
- case "HTTP":
- name = "http"
- case "QUIC":
- name = "quic"
- case "SPDY":
- name = "spdy"
- default:
- name = strings.ToLower(name)
- }
- return name, version
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go
index 74bc899de2c..f3be3d052ef 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go
@@ -5,6 +5,6 @@ package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net
// Version is the current release version of the httptrace instrumentation.
func Version() string {
- return "0.61.0"
+ return "0.63.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
index 261eeb9e9f8..f1aee0f1100 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
index b25641c55d3..521daa25dbf 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
// Get is a convenient replacement for http.Get that adds a span around the request.
func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody)
if err != nil {
return nil, err
}
@@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error)
// Head is a convenient replacement for http.Head that adds a span around the request.
func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody)
if err != nil {
return nil, err
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index 6bd50d4c9b4..38fb79c0328 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -8,9 +8,8 @@ import (
"net/http"
"net/http/httptrace"
- "go.opentelemetry.io/otel/attribute"
-
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index 937f9b4e737..fef83b42fe1 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -8,13 +8,13 @@ import (
"time"
"github.com/felixge/httpsnoop"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
)
// middleware is an http middleware which wraps the next handler in a span.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
index 7cb9693d984..821b80ec41c 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -10,13 +10,13 @@ import (
"context"
"fmt"
"net/http"
- "os"
"strings"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv"
)
// OTelSemConvStabilityOptIn is an environment variable.
@@ -32,17 +32,9 @@ type ResponseTelemetry struct {
}
type HTTPServer struct {
- duplicate bool
-
- // Old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
-
- // New metrics
- requestBodySizeHistogram metric.Int64Histogram
- responseBodySizeHistogram metric.Int64Histogram
- requestDurationHistogram metric.Float64Histogram
+ requestBodySizeHistogram httpconv.ServerRequestBodySize
+ responseBodySizeHistogram httpconv.ServerResponseBodySize
+ requestDurationHistogram httpconv.ServerRequestDuration
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@@ -62,20 +54,10 @@ type HTTPServer struct {
// If the primary server name is not known, server should be an empty string.
// The req Host will be used to determine the server instead.
func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
- if s.duplicate {
- return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
}
func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
- if s.duplicate {
- return []attribute.KeyValue{
- OldHTTPServer{}.NetworkTransportAttr(network),
- CurrentHTTPServer{}.NetworkTransportAttr(network),
- }
- }
return []attribute.KeyValue{
CurrentHTTPServer{}.NetworkTransportAttr(network),
}
@@ -85,11 +67,7 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
//
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp)
- if s.duplicate {
- return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPServer{}.ResponseTraceAttrs(resp)
}
// Route returns the attribute for the route.
@@ -133,42 +111,28 @@ type MetricData struct {
var (
metricAddOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.AddOption{}
},
}
metricRecordOptionPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &[]metric.RecordOption{}
},
}
)
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
- if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
- attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
- *recordOpts = append(*recordOpts, o)
- s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
- s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
- s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o)
- *recordOpts = (*recordOpts)[:0]
- metricRecordOptionPool.Put(recordOpts)
- }
-
- if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
- attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
- *addOpts = append(*addOpts, o)
- s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
- s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
- s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
- *addOpts = (*addOpts)[:0]
- metricAddOptionPool.Put(addOpts)
- }
+ attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
+ *recordOpts = append(*recordOpts, o)
+ s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...)
+ s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...)
+ s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o)
+ *recordOpts = (*recordOpts)[:0]
+ metricRecordOptionPool.Put(recordOpts)
}
// hasOptIn returns true if the comma-separated version string contains the
@@ -183,61 +147,55 @@ func hasOptIn(version, optIn string) bool {
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- server := HTTPServer{
- duplicate: duplicate,
- }
- server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
- if duplicate {
- server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
- }
+ server := HTTPServer{}
+
+ var err error
+ server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter)
+ handleErr(err)
+
+ server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter)
+ handleErr(err)
+
+ server.requestDurationHistogram, err = httpconv.NewServerRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1,
+ 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10,
+ ),
+ )
+ handleErr(err)
return server
}
type HTTPClient struct {
- duplicate bool
-
- // old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- latencyMeasure metric.Float64Histogram
-
- // new metrics
- requestBodySize metric.Int64Histogram
- requestDuration metric.Float64Histogram
+ requestBodySize httpconv.ClientRequestBodySize
+ requestDuration httpconv.ClientRequestDuration
}
func NewHTTPClient(meter metric.Meter) HTTPClient {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- client := HTTPClient{
- duplicate: duplicate,
- }
- client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
- if duplicate {
- client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
- }
+ client := HTTPClient{}
+
+ var err error
+ client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter)
+ handleErr(err)
+
+ client.requestDuration, err = httpconv.NewClientRequestDuration(
+ meter,
+ metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
+ )
+ handleErr(err)
return client
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.RequestTraceAttrs(req)
- if c.duplicate {
- return OldHTTPClient{}.RequestTraceAttrs(req, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp)
- if c.duplicate {
- return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
+ return CurrentHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
@@ -277,47 +235,14 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
addOptions: set,
}
- if c.duplicate {
- attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
- set := metric.WithAttributeSet(attribute.NewSet(attributes...))
- opts["old"] = MetricOpts{
- measurement: set,
- addOptions: set,
- }
- }
-
return opts
}
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
- if s.requestBodySize == nil || s.requestDuration == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
- s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
-
- if s.duplicate {
- s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
- s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
- }
-}
-
-func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
- if s.responseBytesCounter == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
+ s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
+ s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
}
func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.TraceAttributes(host)
- if s.duplicate {
- return OldHTTPClient{}.TraceAttributes(host, attrs)
- }
-
- return attrs
+ return CurrentHTTPClient{}.TraceAttributes(host)
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
index f2cf8a152d3..1bb207b8092 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
@@ -5,10 +5,11 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/
// Generate semconv package:
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
index 53976b0d5a6..28c51a3b389 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -17,9 +17,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type RequestTraceAttrsOpts struct {
@@ -196,7 +194,7 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter
if https {
return semconvNew.URLScheme("https")
}
@@ -247,36 +245,6 @@ func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
-func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
- )
- handleErr(err)
-
- responseBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerResponseBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
- )
- handleErr(err)
- requestDurationHistogram, err := meter.Float64Histogram(
- semconvNew.HTTPServerRequestDurationName,
- metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
-}
-
func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 3
var host string
@@ -472,30 +440,6 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.
return semconvNew.HTTPRequestMethodGet, orig
}
-func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySize, err := meter.Int64Histogram(
- semconvNew.HTTPClientRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
- )
- handleErr(err)
-
- requestDuration, err := meter.Float64Histogram(
- semconvNew.HTTPClientRequestDurationName,
- metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySize, requestDuration
-}
-
func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 2
var h string
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
index bc1f7751dbc..96422ad1ed2 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -14,7 +14,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0"
)
// SplitHostPort splits a network address hostport of the form "host",
@@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p) // nolint: gosec // Byte size checked 16 above.
+ return host, int(p) //nolint:gosec // Byte size checked 16 above.
}
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
+func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter
if https {
if port > 0 && port != 443 {
return port
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
deleted file mode 100644
index ba7fccf1efd..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/v120.0.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "errors"
- "io"
- "net/http"
- "slices"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type OldHTTPServer struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs)
-}
-
-func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
- return semconvutil.NetTransport(network)
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
-func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue {
- if resp.ReadBytes > 0 {
- attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
- }
- if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
- }
- if resp.WriteBytes > 0 {
- attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
- }
- if resp.StatusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
- }
- if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
- }
-
- return attributes
-}
-
-// Route returns the attribute for the route.
-func (o OldHTTPServer) Route(route string) attribute.KeyValue {
- return semconv.HTTPRoute(route)
-}
-
-// HTTPStatusCode returns the attribute for the HTTP status code.
-// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
-func HTTPStatusCode(status int) attribute.KeyValue {
- return semconv.HTTPStatusCode(status)
-}
-
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- var err error
- requestBytesCounter, err := meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- serverLatencyMeasure, err := meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
-}
-
-func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- n := len(additionalAttributes) + 3
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- o.scheme(req.TLS != nil),
- semconv.NetHostName(host))
-
- if hostPort > 0 {
- attributes = append(attributes, semconv.NetHostPort(hostPort))
- }
- if protoName != "" {
- attributes = append(attributes, semconv.NetProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return semconv.HTTPSchemeHTTPS
- }
- return semconv.HTTPSchemeHTTP
-}
-
-type OldHTTPClient struct{}
-
-func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientRequest(req, attrs)
-}
-
-func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientResponse(resp, attrs)
-}
-
-func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.status_code int
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{h, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if port > 0 {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- semconv.NetPeerName(requestHost),
- )
-
- if port > 0 {
- attributes = append(attributes, semconv.NetPeerPort(port))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-// Client HTTP metrics.
-const (
- clientRequestSize = "http.client.request.size" // Incoming request bytes total
- clientResponseSize = "http.client.response.size" // Incoming response bytes total
- clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
-)
-
-func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- requestBytesCounter, err := meter.Int64Counter(
- clientRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- clientResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- latencyMeasure, err := meter.Float64Histogram(
- clientDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of outbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, latencyMeasure
-}
-
-// TraceAttributes returns attributes for httptrace.
-func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue {
- return append(attrs, semconv.NetHostName(host))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
deleted file mode 100644
index 7aa5f99e815..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-// Generate semconvutil package:
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
deleted file mode 100644
index b9973547931..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconvutil provides OpenTelemetry semantic convention utilities.
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "fmt"
- "net/http"
- "slices"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type HTTPServerRequestOptions struct {
- // If set, this is used as value for the "http.client_ip" attribute.
- HTTPClientIP string
-}
-
-// HTTPClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// HTTPClientResponse(resp, ClientRequest(resp.Request)))
-func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientResponse(resp, attrs)
-}
-
-// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length".
-func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientRequest(req, attrs)
-}
-
-// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the
-// related values are defined in req: "net.peer.port".
-func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequestMetrics(req)
-}
-
-// HTTPClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func HTTPClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// HTTPServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if
-// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip".
-func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ServerRequest(server, req, opts, attrs)
-}
-
-// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequestMetrics(server, req)
-}
-
-// HTTPServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func HTTPServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// httpConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type httpConv struct {
- NetConv *netConv
-
- HTTPClientIPKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-var hc = &httpConv{
- NetConv: nc,
-
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// ClientResponse(resp, ClientRequest(resp.Request))
-func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.status_code int
- http.response_content_length int
- */
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
- if n == 0 {
- return attrs
- }
-
- attrs = slices.Grow(attrs, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length", "user_agent.original".
-func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- user_agent.original string
- http.url string
- net.peer.name string
- net.peer.port int
- http.request_content_length int
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response. See ClientResponse.
- http.response_content_length This requires the response. See ClientResponse.
- net.sock.family This requires the socket used.
- net.sock.peer.addr This requires the socket used.
- net.sock.peer.name This requires the socket used.
- net.sock.peer.port This requires the socket used.
- http.resend_count This is something outside of a single request.
- net.protocol.name The value is the Request is ignored, and the go client will always use "http".
- net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
- */
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
- attrs = append(attrs, c.method(req.Method))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- return attrs
-}
-
-// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the related values
-// are defined in req: "net.peer.port".
-func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
-
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if they
-// related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip",
-// "net.protocol.name", "net.protocol.version".
-func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.scheme string
- net.host.name string
- net.host.port int
- net.sock.peer.addr string
- net.sock.peer.port int
- user_agent.original string
- http.client_ip string
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- http.target string Note: doesn't include the query parameter.
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response.
- http.request_content_length This requires the len() of body, which can mutate it.
- http.response_content_length This requires the response.
- http.route This is not available.
- net.sock.peer.name This would require a DNS lookup.
- net.sock.host.addr The request doesn't have access to the underlying socket.
- net.sock.host.port The request doesn't have access to the underlying socket.
-
- */
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
-
- // For client IP, use, in order:
- // 1. The value passed in the options
- // 2. The value in the X-Forwarded-For header
- // 3. The peer address
- clientIP := opts.HTTPClientIP
- if clientIP == "" {
- clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP == "" {
- clientIP = peer
- }
- }
- if clientIP != "" {
- n++
- }
-
- var target string
- if req.URL != nil {
- target = req.URL.Path
- if target != "" {
- n++
- }
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- if target != "" {
- attrs = append(attrs, c.HTTPTargetKey.String(target))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-// ServerRequestMetrics returns metric attributes for an HTTP request received
-// by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.scheme string
- http.route string
- http.method string
- http.status_code int
- net.host.name string
- net.host.port int
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- */
-
- n := 3 // Method, scheme, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.methodMetric(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
- if protoName != "" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-func (c *httpConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) methodMetric(method string) attribute.KeyValue {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
deleted file mode 100644
index df97255e418..00000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/netconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-// NetTransport returns a trace attribute describing the transport protocol of the
-// passed network. See the net.Dial for information about acceptable network
-// values.
-func NetTransport(network string) attribute.KeyValue {
- return nc.Transport(network)
-}
-
-// netConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type netConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetProtocolName attribute.Key
- NetProtocolVersion attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-var nc = &netConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetProtocolName: semconv.NetProtocolNameKey,
- NetProtocolVersion: semconv.NetProtocolVersionKey,
- NetSockFamilyKey: semconv.NetSockFamilyKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetSockHostAddrKey: semconv.NetSockHostAddrKey,
- NetSockHostPortKey: semconv.NetSockHostPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
-}
-
-func (c *netConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *netConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-func (c *netConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *netConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-// Peer returns attributes for a network peer address.
-func (c *netConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *netConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *netConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
-}
-
-func netProtocol(proto string) (name string, version string) {
- name, version, _ = strings.Cut(proto, "/")
- switch name {
- case "HTTP":
- name = "http"
- case "QUIC":
- name = "quic"
- case "SPDY":
- name = "spdy"
- default:
- name = strings.ToLower(name)
- }
- return name, version
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index 44b86ad8609..514ae6753b7 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -11,14 +11,14 @@ import (
"sync/atomic"
"time"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
-
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
)
// Transport implements the http.RoundTripper interface and wraps
@@ -129,6 +129,37 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r)
+
+ // Defer metrics recording function to record the metrics on error or no error.
+ defer func() {
+ metricAttributes := semconv.MetricAttributes{
+ Req: r,
+ AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
+ }
+
+ if err == nil {
+ metricAttributes.StatusCode = res.StatusCode
+ }
+
+ metricOpts := t.semconv.MetricOptions(metricAttributes)
+
+ metricData := semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ }
+
+ if err == nil {
+ readRecordFunc := func(int64) {}
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+ }
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ metricData.ElapsedTime = elapsedTime
+
+ t.semconv.RecordMetrics(ctx, metricData, metricOpts)
+ }()
+
if err != nil {
// set error type attribute if the error is part of the predefined
// error types.
@@ -141,35 +172,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
span.SetStatus(codes.Error, err.Error())
span.End()
- return res, err
- }
-
- // metrics
- metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
- Req: r,
- StatusCode: res.StatusCode,
- AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
- })
- // For handling response bytes we leverage a callback when the client reads the http response
- readRecordFunc := func(n int64) {
- t.semconv.RecordResponseSize(ctx, n, metricOpts)
+ return res, err
}
// traces
span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
span.SetStatus(t.semconv.Status(res.StatusCode))
- res.Body = newWrappedBody(span, readRecordFunc, res.Body)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- t.semconv.RecordMetrics(ctx, semconv.MetricData{
- RequestSize: bw.BytesRead(),
- ElapsedTime: elapsedTime,
- }, metricOpts)
-
return res, nil
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index 6be4c1fde2c..dfb53cf1f3a 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.61.0"
+ return "0.63.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
index 261eeb9e9f8..f1aee0f1100 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
index 3f0a518ae0f..30446bd28b6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter {
}
// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
-func (e *Exporter) MarshalLog() interface{} {
+func (e *Exporter) MarshalLog() any {
return struct {
Type string
Client Client
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
index ca4544f0dae..d9bfd6e1765 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -6,9 +6,10 @@
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
- commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
index 2e7690e43a2..43359c89449 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -4,8 +4,9 @@
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
- "go.opentelemetry.io/otel/sdk/instrumentation"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+
+ "go.opentelemetry.io/otel/sdk/instrumentation"
)
func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
index db7b698a566..526bb5e070b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -4,8 +4,9 @@
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
- "go.opentelemetry.io/otel/sdk/resource"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+
+ "go.opentelemetry.io/otel/sdk/resource"
)
// Resource transforms a Resource into an OTLP Resource.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
index bf27ef0220e..379bc8170df 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr
import (
"math"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/instrumentation"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
@@ -154,7 +155,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
for _, otLink := range links {
// This redefinition is necessary to prevent otLink.*ID[:] copies
// being reused -- in short we need a new otLink per iteration.
- otLink := otLink
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
@@ -189,7 +189,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
events := make([]*tracepb.Span_Event, len(es))
// Transform message events
- for i := 0; i < len(es); i++ {
+ for i := range es {
events[i] = &tracepb.Span_Event{
Name: es[i].Name,
TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
index 261eeb9e9f8..f1aee0f1100 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 8409b5f8f95..4b4cc76f4a9 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -9,6 +9,8 @@ import (
"sync"
"time"
+ coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -20,8 +22,6 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
- coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
- tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type client struct {
@@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
)
if c.exportTimeout > 0 {
- ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
} else {
ctx, cancel = context.WithCancel(parent)
}
@@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
-func (c *client) MarshalLog() interface{} {
+func (c *client) MarshalLog() any {
return struct {
Type string
Endpoint string
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 506ca00b61f..4f47117a58a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
return cfg
}
-// cleanPath returns a path with all spaces trimmed and all redundancies
-// removed. If urlPath is empty or cleaning it results in an empty string,
+// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
- tmp := path.Clean(strings.TrimSpace(urlPath))
- if tmp == "." {
+ tmp := strings.TrimSpace(urlPath)
+ if tmp == "" || tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 777e68a7bbd..259a898ae77 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
- return ctx.Err()
+ return context.Cause(ctx)
}
case <-timer.C:
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index 5f78bfdfb06..3b79c1a0b5c 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.36.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/log/DESIGN.md
deleted file mode 100644
index 47d39d34bf4..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/DESIGN.md
+++ /dev/null
@@ -1,634 +0,0 @@
-# Logs API
-
-## Abstract
-
-`go.opentelemetry.io/otel/log` provides
-[Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/).
-
-The prototype was created in
-[#4725](https://github.com/open-telemetry/opentelemetry-go/pull/4725).
-
-## Background
-
-The key challenge is to create a performant API compliant with the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/)
-with an intuitive and user friendly design.
-Performance is seen as one of the most important characteristics of logging libraries in Go.
-
-## Design
-
-This proposed design aims to:
-
-- be specification compliant,
-- be similar to Trace and Metrics API,
-- take advantage of both OpenTelemetry and `slog` experience to achieve acceptable performance.
-
-### Module structure
-
-The API is published as a single `go.opentelemetry.io/otel/log` Go module.
-
-The package structure is similar to Trace API and Metrics API.
-The Go module consists of the following packages:
-
-- `go.opentelemetry.io/otel/log`
-- `go.opentelemetry.io/otel/log/embedded`
-- `go.opentelemetry.io/otel/log/logtest`
-- `go.opentelemetry.io/otel/log/noop`
-
-Rejected alternative:
-
-- [Reuse slog](#reuse-slog)
-
-### LoggerProvider
-
-The [`LoggerProvider` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#loggerprovider)
-is defined as `LoggerProvider` interface in [provider.go](provider.go).
-
-The specification may add new operations to `LoggerProvider`.
-The interface may have methods added without a package major version bump.
-This embeds `embedded.LoggerProvider` to help inform an API implementation
-author about this non-standard API evolution.
-This approach is already used in Trace API and Metrics API.
-
-#### LoggerProvider.Logger
-
-The `Logger` method implements the [`Get a Logger` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#get-a-logger).
-
-The required `name` parameter is accepted as a `string` method argument.
-
-The `LoggerOption` options are defined to support optional parameters.
-
-Implementation requirements:
-
-- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements)
- the method to be safe to be called concurrently.
-
-- The method should use some default name if the passed name is empty
- in order to meet the [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logger-creation)
- to return a working logger when an invalid name is passed
- as well as to resemble the behavior of getting tracers and meters.
-
-`Logger` can be extended by adding new `LoggerOption` options
-and adding new exported fields to the `LoggerConfig` struct.
-This design is already used in Trace API for getting tracers
-and in Metrics API for getting meters.
-
-Rejected alternative:
-
-- [Passing struct as parameter to LoggerProvider.Logger](#passing-struct-as-parameter-to-loggerproviderlogger).
-
-### Logger
-
-The [`Logger` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#logger)
-is defined as `Logger` interface in [logger.go](logger.go).
-
-The specification may add new operations to `Logger`.
-The interface may have methods added without a package major version bump.
-This embeds `embedded.Logger` to help inform an API implementation
-author about this non-standard API evolution.
-This approach is already used in Trace API and Metrics API.
-
-### Logger.Emit
-
-The `Emit` method implements the [`Emit a LogRecord` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord).
-
-[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/)
-is accepted as a `context.Context` method argument.
-
-Calls to `Emit` are supposed to be on the hot path.
-Therefore, in order to reduce the number of heap allocations,
-the [`LogRecord` abstraction](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord),
-is defined as `Record` struct in [record.go](record.go).
-
-[`Timestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-timestamp)
-is accessed using following methods:
-
-```go
-func (r *Record) Timestamp() time.Time
-func (r *Record) SetTimestamp(t time.Time)
-```
-
-[`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp)
-is accessed using following methods:
-
-```go
-func (r *Record) ObservedTimestamp() time.Time
-func (r *Record) SetObservedTimestamp(t time.Time)
-```
-
-[`EventName`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-eventname)
-is accessed using following methods:
-
-```go
-func (r *Record) EventName() string
-func (r *Record) SetEventName(s string)
-```
-
-[`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber)
-is accessed using following methods:
-
-```go
-func (r *Record) Severity() Severity
-func (r *Record) SetSeverity(s Severity)
-```
-
-`Severity` type is defined in [severity.go](severity.go).
-The constants are are based on
-[Displaying Severity recommendation](https://opentelemetry.io/docs/specs/otel/logs/data-model/#displaying-severity).
-Additionally, `Severity[Level]` constants are defined to make the API more readable and user friendly.
-
-[`SeverityText`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitytext)
-is accessed using following methods:
-
-```go
-func (r *Record) SeverityText() string
-func (r *Record) SetSeverityText(s string)
-```
-
-[`Body`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
-is accessed using following methods:
-
-```go
-func (r *Record) Body() Value
-func (r *Record) SetBody(v Value)
-```
-
-[Log record attributes](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes)
-are accessed using following methods:
-
-```go
-func (r *Record) WalkAttributes(f func(KeyValue) bool)
-func (r *Record) AddAttributes(attrs ...KeyValue)
-```
-
-`Record` has a `AttributesLen` method that returns
-the number of attributes to allow slice preallocation
-when converting records to a different representation:
-
-```go
-func (r *Record) AttributesLen() int
-```
-
-The records attributes design and implementation is based on
-[`slog.Record`](https://pkg.go.dev/log/slog#Record).
-It allows achieving high-performance access and manipulation of the attributes
-while keeping the API user friendly.
-It relieves the user from making his own improvements
-for reducing the number of allocations when passing attributes.
-
-The abstractions described in
-[the specification](https://opentelemetry.io/docs/specs/otel/logs/#new-first-party-application-logs)
-are defined in [keyvalue.go](keyvalue.go).
-
-`Value` is representing `any`.
-`KeyValue` is representing a key(string)-value(`any`) pair.
-
-`Kind` is an enumeration used for specifying the underlying value type.
-`KindEmpty` is used for an empty (zero) value.
-`KindBool` is used for boolean value.
-`KindFloat64` is used for a double precision floating point (IEEE 754-1985) value.
-`KindInt64` is used for a signed integer value.
-`KindString` is used for a string value.
-`KindBytes` is used for a slice of bytes (in spec: A byte array).
-`KindSlice` is used for a slice of values (in spec: an array (a list) of any values).
-`KindMap` is used for a slice of key-value pairs (in spec: `map`).
-
-These types are defined in `go.opentelemetry.io/otel/log` package
-as they are tightly coupled with the API and different from common attributes.
-
-The internal implementation of `Value` is based on
-[`slog.Value`](https://pkg.go.dev/log/slog#Value)
-and the API is mostly inspired by
-[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value).
-The benchmarks[^1] show that the implementation is more performant than
-[`attribute.Value`](https://pkg.go.dev/go.opentelemetry.io/otel/attribute#Value).
-
-The value accessors (`func (v Value) As[Kind]` methods) must not panic,
-as it would violate the [specification](https://opentelemetry.io/docs/specs/otel/error-handling/):
-
-> API methods MUST NOT throw unhandled exceptions when used incorrectly by end
-> users. The API and SDK SHOULD provide safe defaults for missing or invalid
-> arguments. [...] Whenever the library suppresses an error that would otherwise
-> have been exposed to the user, the library SHOULD log the error using
-> language-specific conventions.
-
-Therefore, the value accessors should return a zero value
-and log an error when a bad accessor is called.
-
-The `Severity`, `Kind`, `Value`, `KeyValue` may implement
-the [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer) interface.
-However, it is not needed for the first stable release
-and the `String` methods can be added later.
-
-The caller must not subsequently mutate the record passed to `Emit`.
-This would allow the implementation to not clone the record,
-but simply retain, modify or discard it.
-The implementation may still choose to clone the record or copy its attributes
-if it needs to retain or modify it,
-e.g. in case of asynchronous processing to eliminate the possibility of data races,
-because the user can technically reuse the record and add new attributes
-after the call (even when the documentation says that the caller must not do it).
-
-Implementation requirements:
-
-- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#concurrency-requirements)
- the method to be safe to be called concurrently.
-
-- The method must not interrupt the record processing if the context is canceled
- per ["ignoring context cancellation" guideline](../CONTRIBUTING.md#ignoring-context-cancellation).
-
-- The [specification requires](https://opentelemetry.io/docs/specs/otel/logs/api/#emit-a-logrecord)
- use the current time as observed timestamp if the passed is empty.
-
-- The method should handle the trace context passed via `ctx` argument in order to meet the
- [specification's SDK requirement](https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord)
- to populate the trace context fields from the resolved context.
-
-`Emit` can be extended by adding new exported fields to the `Record` struct.
-
-Rejected alternatives:
-
-- [Record as interface](#record-as-interface)
-- [Options as parameter to Logger.Emit](#options-as-parameter-to-loggeremit)
-- [Passing record as pointer to Logger.Emit](#passing-record-as-pointer-to-loggeremit)
-- [Logger.WithAttributes](#loggerwithattributes)
-- [Record attributes as slice](#record-attributes-as-slice)
-- [Use any instead of defining Value](#use-any-instead-of-defining-value)
-- [Severity type encapsulating number and text](#severity-type-encapsulating-number-and-text)
-- [Reuse attribute package](#reuse-attribute-package)
-- [Mix receiver types for Record](#mix-receiver-types-for-record)
-- [Add XYZ method to Logger](#add-xyz-method-to-logger)
-- [Rename KeyValue to Attr](#rename-keyvalue-to-attr)
-
-### Logger.Enabled
-
-The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/api/#enabled).
-
-[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/)
-is accepted as a `context.Context` method argument.
-
-Calls to `Enabled` are supposed to be on the hot path and the list of arguments
-can be extendend in future. Therefore, in order to reduce the number of heap
-allocations and make it possible to handle new arguments, `Enabled` accepts
-a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second
-method argument.
-
-The `EnabledParameters` uses fields, instead of getters and setters, to allow
-simpler usage which allows configuring the `EnabledParameters` in the same line
-where `Enabled` is called.
-
-### noop package
-
-The `go.opentelemetry.io/otel/log/noop` package provides
-[Logs API No-Op Implementation](https://opentelemetry.io/docs/specs/otel/logs/noop/).
-
-### Trace context correlation
-
-The bridge implementation should do its best to pass
-the `ctx` containing the trace context from the caller
-so it can later be passed via `Logger.Emit`.
-
-It is not expected that users (caller or bridge implementation) reconstruct
-a `context.Context`. Reconstructing a `context.Context` with
-[`trace.ContextWithSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#ContextWithSpanContext)
-and [`trace.NewSpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#NewSpanContext)
-would usually involve more memory allocations.
-
-The logging libraries which have recording methods that accepts `context.Context`,
-such us [`slog`](https://pkg.go.dev/log/slog),
-[`logrus`](https://pkg.go.dev/github.com/sirupsen/logrus),
-[`zerolog`](https://pkg.go.dev/github.com/rs/zerolog),
-makes passing the trace context trivial.
-
-However, some libraries do not accept a `context.Context` in their recording methods.
-Structured logging libraries,
-such as [`logr`](https://pkg.go.dev/github.com/go-logr/logr)
-and [`zap`](https://pkg.go.dev/go.uber.org/zap),
-offer passing `any` type as a log attribute/field.
-Therefore, their bridge implementations can define a "special" log attributes/field
-that will be used to capture the trace context.
-
-[The prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725)
-has bridge implementations that handle trace context correlation efficiently.
-
-## Benchmarking
-
-The benchmarks take inspiration from [`slog`](https://pkg.go.dev/log/slog),
-because for the Go team it was also critical to create API that would be fast
-and interoperable with existing logging packages.[^2][^3]
-
-The benchmark results can be found in [the prototype](https://github.com/open-telemetry/opentelemetry-go/pull/4725).
-
-## Rejected alternatives
-
-### Reuse slog
-
-The API must not be coupled to [`slog`](https://pkg.go.dev/log/slog),
-nor any other logging library.
-
-The API needs to evolve orthogonally to `slog`.
-
-`slog` is not compliant with the [Logs API](https://opentelemetry.io/docs/specs/otel/logs/api/).
-and we cannot expect the Go team to make `slog` compliant with it.
-
-The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge).
-
-You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/).
-
-### Record as interface
-
-`Record` is defined as a `struct` because of the following reasons.
-
-Log record is a value object without any behavior.
-It is used as data input for Logger methods.
-
-The log record resembles the instrument config structs like [metric.Float64CounterConfig](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Float64CounterConfig).
-
-Using `struct` instead of `interface` improves the performance as e.g.
-indirect calls are less optimized,
-usage of interfaces tend to increase heap allocations.[^3]
-
-### Options as parameter to Logger.Emit
-
-One of the initial ideas was to have:
-
-```go
-type Logger interface{
- embedded.Logger
- Emit(ctx context.Context, options ...RecordOption)
-}
-```
-
-The main reason was that design would be similar
-to the [Meter API](https://pkg.go.dev/go.opentelemetry.io/otel/metric#Meter)
-for creating instruments.
-
-However, passing `Record` directly, instead of using options,
-is more performant as it reduces heap allocations.[^4]
-
-Another advantage of passing `Record` is that API would not have functions like `NewRecord(options...)`,
-which would be used by the SDK and not by the users.
-
-Finally, the definition would be similar to [`slog.Handler.Handle`](https://pkg.go.dev/log/slog#Handler)
-that was designed to provide optimization opportunities.[^2]
-
-### Passing record as pointer to Logger.Emit
-
-So far the benchmarks do not show differences that would
-favor passing the record via pointer (and vice versa).
-
-Passing via value feels safer because of the following reasons.
-
-The user would not be able to pass `nil`.
-Therefore, it reduces the possibility to have a nil pointer dereference.
-
-It should reduce the possibility of a heap allocation.
-
-It follows the design of [`slog.Handler`](https://pkg.go.dev/log/slog#Handler).
-
-If follows one of Google's Go Style Decisions
-to prefer [passing values](https://google.github.io/styleguide/go/decisions#pass-values).
-
-### Passing struct as parameter to LoggerProvider.Logger
-
-Similarly to `Logger.Emit`, we could have something like:
-
-```go
-type LoggerProvider interface{
- embedded.LoggerProvider
- Logger(name string, config LoggerConfig)
-}
-```
-
-The drawback of this idea would be that this would be
-a different design from Trace and Metrics API.
-
-The performance of acquiring a logger is not as critical
-as the performance of emitting a log record. While a single
-HTTP/RPC handler could write hundreds of logs, it should not
-create a new logger for each log entry.
-The bridge implementation should reuse loggers whenever possible.
-
-### Logger.WithAttributes
-
-We could add `WithAttributes` to the `Logger` interface.
-Then `Record` could be a simple struct with only exported fields.
-The idea was that the SDK would implement the performance improvements
-instead of doing it in the API.
-This would allow having different optimization strategies.
-
-During the analysis[^5], it occurred that the main problem of this proposal
-is that the variadic slice passed to an interface method is always heap allocated.
-
-Moreover, the logger returned by `WithAttribute` was allocated on the heap.
-
-Lastly, the proposal was not specification compliant.
-
-### Record attributes as slice
-
-One of the proposals[^6] was to have `Record` as a simple struct:
-
-```go
-type Record struct {
- Timestamp time.Time
- ObservedTimestamp time.Time
- EventName string
- Severity Severity
- SeverityText string
- Body Value
- Attributes []KeyValue
-}
-```
-
-The bridge implementations could use [`sync.Pool`](https://pkg.go.dev/sync#Pool)
-for reducing the number of allocations when passing attributes.
-
-The benchmarks results were better.
-
-In such a design, most bridges would have a `sync.Pool`
-to reduce the number of heap allocations.
-However, the `sync.Pool` will not work correctly with API implementations
-that would take ownership of the record
-(e.g. implementations that do not copy records for asynchronous processing).
-The current design, even in case of improper API implementation,
-has lower chances of encountering a bug as most bridges would
-create a record, pass it, and forget about it.
-
-For reference, here is the reason why `slog` does not use `sync.Pool`[^3]
-as well:
-
-> We can use a sync pool for records though we decided not to.
-You can but it's a bad idea for us. Why?
-Because users have control of Records.
-Handler writers can get their hands on a record
-and we'd have to ask them to free it
-or try to free it magically at some some point.
-But either way, they could get themselves in trouble by freeing it twice
-or holding on to one after they free it.
-That's a use after free bug and that's why `zerolog` was problematic for us.
-`zerolog` as as part of its speed exposes a pool allocated value to users
-if you use `zerolog` the normal way, that you'll see in all the examples,
-you will never encounter a problem.
-But if you do something a little out of the ordinary you can get
-use after free bugs and we just didn't want to put that in the standard library.
-
-Therefore, we decided to not follow the proposal as it is
-less user friendly (users and bridges would use e.g. a `sync.Pool` to reduce
-the number of heap allocation), less safe (more prone to use after free bugs
-and race conditions), and the benchmark differences were not significant.
-
-### Use any instead of defining Value
-
-[Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
-defines Body to be `any`.
-One could propose to define `Body` (and attribute values) as `any`
-instead of a defining a new type (`Value`).
-
-First of all, [`any` type defined in the specification](https://opentelemetry.io/docs/specs/otel/logs/data-model/#type-any)
-is not the same as `any` (`interface{}`) in Go.
-
-Moreover, using `any` as a field would decrease the performance.[^7]
-
-Notice it will be still possible to add following kind and factories
-in a backwards compatible way:
-
-```go
-const KindMap Kind
-
-func AnyValue(value any) KeyValue
-
-func Any(key string, value any) KeyValue
-```
-
-However, currently, it would not be specification compliant.
-
-### Severity type encapsulating number and text
-
-We could combine severity into a single field defining a type:
-
-```go
-type Severity struct {
- Number SeverityNumber
- Text string
-}
-```
-
-However, the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition)
-define it as independent fields.
-It should be more user friendly to have them separated.
-Especially when having getter and setter methods, setting one value
-when the other is already set would be unpleasant.
-
-### Reuse attribute package
-
-It was tempting to reuse the existing
-[https://pkg.go.dev/go.opentelemetry.io/otel/attribute] package
-for defining log attributes and body.
-
-However, this would be wrong because [the log attribute definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-attributes)
-is different from [the common attribute definition](https://opentelemetry.io/docs/specs/otel/common/#attribute).
-
-Moreover, it there is nothing telling that [the body definition](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-body)
-has anything in common with a common attribute value.
-
-Therefore, we define new types representing the abstract types defined
-in the [Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#definitions-used-in-this-document).
-
-### Mix receiver types for Record
-
-Methods of [`slog.Record`](https://pkg.go.dev/log/slog#Record)
-have different receiver types.
-
-In `log/slog` GitHub issue we can only find that the reason is:[^8]
-
->> some receiver of Record struct is by value
-> Passing Records by value means they incur no heap allocation.
-> That improves performance overall, even though they are copied.
-
-However, the benchmarks do not show any noticeable differences.[^9]
-
-The compiler is smart-enough to not make a heap allocation for any of these methods.
-The use of a pointer receiver does not cause any heap allocation.
-From Go FAQ:[^10]
-
-> In the current compilers, if a variable has its address taken,
-> that variable is a candidate for allocation on the heap.
-> However, a basic escape analysis recognizes some cases
-> when such variables will not live past the return from the function
-> and can reside on the stack.
-
-The [Understanding Allocations: the Stack and the Heap](https://www.youtube.com/watch?v=ZMZpH4yT7M0)
-presentation by Jacob Walker describes the escape analysis with details.
-
-Moreover, also from Go FAQ:[^10]
-
-> Also, if a local variable is very large,
-> it might make more sense to store it on the heap rather than the stack.
-
-Therefore, even if we use a value receiver and the value is very large
-it may be heap allocated.
-
-Both [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments#receiver-type)
-and [Google's Go Style Decisions](https://google.github.io/styleguide/go/decisions#receiver-type)
-highly recommend making the methods for a type either all pointer methods
-or all value methods. Google's Go Style Decisions even goes further and says:
-
-> There is a lot of misinformation about whether passing a value or a pointer
-> to a function can affect performance.
-> The compiler can choose to pass pointers to values on the stack
-> as well as copying values on the stack,
-> but these considerations should not outweigh the readability
-> and correctness of the code in most circumstances.
-> When the performance does matter, it is important to profile both approaches
-> with a realistic benchmark before deciding that one approach outperforms the other.
-
-Because, the benchmarks[^9] do not proof any performance difference
-and the general recommendation is to not mix receiver types,
-we decided to use pointer receivers for all `Record` methods.
-
-### Add XYZ method to Logger
-
-The `Logger` does not have methods like `SetSeverity`, etc.
-as the Logs API needs to follow (be compliant with)
-the [specification](https://opentelemetry.io/docs/specs/otel/logs/api/)
-
-### Rename KeyValue to Attr
-
-There was a proposal to rename `KeyValue` to `Attr` (or `Attribute`).[^11]
-New developers may not intuitively know that `log.KeyValue` is an attribute in
-the OpenTelemetry parlance.
-
-During the discussion we agreed to keep the `KeyValue` name.
-
-The type is used in multiple semantics:
-
-- as a log attribute,
-- as a map item,
-- as a log record Body.
-
-As for map item semantics, this type is a key-value pair, not an attribute.
-Naming the type as `Attr` would convey semantical meaning
-that would not be correct for a map.
-
-We expect that most of the Logs API users will be OpenTelemetry contributors.
-We plan to implement bridges for the most popular logging libraries ourselves.
-Given we will all have the context needed to disambiguate these overlapping
-names, developers' confusion should not be an issue.
-
-For bridges not developed by us,
-developers will likely look at our existing bridges for inspiration.
-Our correct use of these types will be a reference to them.
-
-At last, we provide `ValueFromAttribute` and `KeyValueFromAttribute`
-to offer reuse of `attribute.Value` and `attribute.KeyValue`.
-
-[^1]: [Handle structured body and attributes](https://github.com/pellared/opentelemetry-go/pull/7)
-[^2]: Jonathan Amsterdam, [The Go Blog: Structured Logging with slog](https://go.dev/blog/slog)
-[^3]: Jonathan Amsterdam, [GopherCon Europe 2023: A Fast Structured Logging Package](https://www.youtube.com/watch?v=tC4Jt3i62ns)
-[^4]: [Emit definition discussion with benchmarks](https://github.com/open-telemetry/opentelemetry-go/pull/4725#discussion_r1400869566)
-[^5]: [Logger.WithAttributes analysis](https://github.com/pellared/opentelemetry-go/pull/3)
-[^6]: [Record attributes as field and use sync.Pool for reducing allocations](https://github.com/pellared/opentelemetry-go/pull/4) and [Record attributes based on slog.Record](https://github.com/pellared/opentelemetry-go/pull/6)
-[^7]: [Record.Body as any](https://github.com/pellared/opentelemetry-go/pull/5)
-[^8]: [log/slog: structured, leveled logging](https://github.com/golang/go/issues/56345#issuecomment-1302563756)
-[^9]: [Record with pointer receivers only](https://github.com/pellared/opentelemetry-go/pull/8)
-[^10]: [Go FAQ: Stack or heap](https://go.dev/doc/faq#stack_or_heap)
-[^11]: [Rename KeyValue to Attr discussion](https://github.com/open-telemetry/opentelemetry-go/pull/4809#discussion_r1476080093)
diff --git a/vendor/go.opentelemetry.io/otel/log/LICENSE b/vendor/go.opentelemetry.io/otel/log/LICENSE
deleted file mode 100644
index 261eeb9e9f8..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/log/README.md b/vendor/go.opentelemetry.io/otel/log/README.md
deleted file mode 100644
index 3f714271191..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Log API
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/log)
diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go
deleted file mode 100644
index b7a085c63d7..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/doc.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package log provides the OpenTelemetry Logs API.
-
-This API is separate from its implementation so the instrumentation built from
-it is reusable. See [go.opentelemetry.io/otel/sdk/log] for the official
-OpenTelemetry implementation of this API.
-
-The log package provides the OpenTelemetry Logs API, which serves as a standard
-interface for generating and managing log records within the OpenTelemetry ecosystem.
-This package allows users to emit LogRecords, enabling structured, context-rich logging
-that can be easily integrated with observability tools. It ensures that log data is captured
-in a way that is consistent with OpenTelemetry's data model.
-
-This package can be used to create bridges between existing logging libraries and OpenTelemetry.
-Log bridges allow integrating the existing logging setups with OpenTelemetry.
-Log bridges can be found in the [registry].
-
-# API Implementations
-
-This package does not conform to the standard Go versioning policy, all of its
-interfaces may have methods added to them without a package major version bump.
-This non-standard API evolution could surprise an uninformed implementation
-author. They could unknowingly build their implementation in a way that would
-result in a runtime panic for their users that update to the new API.
-
-The API is designed to help inform an instrumentation author about this
-non-standard API evolution. It requires them to choose a default behavior for
-unimplemented interface methods. There are three behavior choices they can
-make:
-
- - Compilation failure
- - Panic
- - Default to another implementation
-
-All interfaces in this API embed a corresponding interface from
-[go.opentelemetry.io/otel/log/embedded]. If an author wants the default
-behavior of their implementations to be a compilation failure, signaling to
-their users they need to update to the latest version of that implementation,
-they need to embed the corresponding interface from
-[go.opentelemetry.io/otel/log/embedded] in their implementation. For example,
-
- import "go.opentelemetry.io/otel/log/embedded"
-
- type LoggerProvider struct {
- embedded.LoggerProvider
- // ...
- }
-
-If an author wants the default behavior of their implementations to a panic,
-they need to embed the API interface directly.
-
- import "go.opentelemetry.io/otel/log"
-
- type LoggerProvider struct {
- log.LoggerProvider
- // ...
- }
-
-This is not a recommended behavior as it could lead to publishing packages that
-contain runtime panics when users update other package that use newer versions
-of [go.opentelemetry.io/otel/log].
-
-Finally, an author can embed another implementation in theirs. The embedded
-implementation will be used for methods not defined by the author. For example,
-an author who wants to default to silently dropping the call can use
-[go.opentelemetry.io/otel/log/noop]:
-
- import "go.opentelemetry.io/otel/log/noop"
-
- type LoggerProvider struct {
- noop.LoggerProvider
- // ...
- }
-
-It is strongly recommended that authors only embed
-go.opentelemetry.io/otel/log/noop if they choose this default behavior. That
-implementation is the only one OpenTelemetry authors can guarantee will fully
-implement all the API interfaces when a user updates their API.
-
-[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge
-*/
-package log // import "go.opentelemetry.io/otel/log"
diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/README.md b/vendor/go.opentelemetry.io/otel/log/embedded/README.md
deleted file mode 100644
index bae4ac68f0b..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/embedded/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Log Embedded
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/log/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
deleted file mode 100644
index 9b401b2b17f..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package embedded provides interfaces embedded within the [OpenTelemetry Logs
-// Bridge API].
-//
-// Implementers of the [OpenTelemetry Logs API] can embed the relevant
-// type from this package into their implementation directly. Doing so will
-// result in a compilation error for users when the [OpenTelemetry Logs Bridge
-// API] is extended (which is something that can happen without a major version
-// bump of the API package).
-//
-// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
-package embedded // import "go.opentelemetry.io/otel/log/embedded"
-
-// LoggerProvider is embedded in the [Logs API LoggerProvider].
-//
-// Embed this interface in your implementation of the [Logs API
-// LoggerProvider] if you want users to experience a compilation error,
-// signaling they need to update to your latest implementation, when the [Logs
-// Bridge API LoggerProvider] interface is extended (which is something that
-// can happen without a major version bump of the API package).
-//
-// [Logs API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider
-type LoggerProvider interface{ loggerProvider() }
-
-// Logger is embedded in [Logs API Logger].
-//
-// Embed this interface in your implementation of the [Logs API Logger]
-// if you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the [Logs API Logger]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-//
-// [Logs API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger
-type Logger interface{ logger() }
diff --git a/vendor/go.opentelemetry.io/otel/log/global/README.md b/vendor/go.opentelemetry.io/otel/log/global/README.md
deleted file mode 100644
index 11e5afefc01..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/global/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Log Global
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/log/global)
diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go
deleted file mode 100644
index bfdb1847908..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/global/log.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-/*
-Package global provides access to a global implementation of the OpenTelemetry
-Logs API.
-
-This package is experimental. It will be deprecated and removed when the [log]
-package becomes stable. Its functionality will be migrated to
-go.opentelemetry.io/otel.
-*/
-package global // import "go.opentelemetry.io/otel/log/global"
-
-import (
- "go.opentelemetry.io/otel/log"
- "go.opentelemetry.io/otel/log/internal/global"
-)
-
-// Logger returns a [log.Logger] configured with the provided name and options
-// from the globally configured [log.LoggerProvider].
-//
-// If this is called before a global LoggerProvider is configured, the returned
-// Logger will be a No-Op implementation of a Logger. When a global
-// LoggerProvider is registered for the first time, the returned Logger is
-// updated in-place to report to this new LoggerProvider. There is no need to
-// call this function again for an updated instance.
-//
-// This is a convenience function. It is equivalent to:
-//
-// GetLoggerProvider().Logger(name, options...)
-func Logger(name string, options ...log.LoggerOption) log.Logger {
- return GetLoggerProvider().Logger(name, options...)
-}
-
-// GetLoggerProvider returns the globally configured [log.LoggerProvider].
-//
-// If a global LoggerProvider has not been configured with [SetLoggerProvider],
-// the returned Logger will be a No-Op implementation of a LoggerProvider. When
-// a global LoggerProvider is registered for the first time, the returned
-// LoggerProvider and all of its created Loggers are updated in-place. There is
-// no need to call this function again for an updated instance.
-func GetLoggerProvider() log.LoggerProvider {
- return global.GetLoggerProvider()
-}
-
-// SetLoggerProvider configures provider as the global [log.LoggerProvider].
-func SetLoggerProvider(provider log.LoggerProvider) {
- global.SetLoggerProvider(provider)
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go
deleted file mode 100644
index e463acbf193..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package global is the internal implementation of the OpenTelemetry global
-// Logs API.
-package global // import "go.opentelemetry.io/otel/log/internal/global"
-
-import (
- "context"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/log"
- "go.opentelemetry.io/otel/log/embedded"
-)
-
-// instLib defines the instrumentation library a logger is created for.
-//
-// Do not use sdk/instrumentation (API cannot depend on the SDK).
-type instLib struct {
- name string
- version string
- schemaURL string
- attrs attribute.Set
-}
-
-type loggerProvider struct {
- embedded.LoggerProvider
-
- mu sync.Mutex
- loggers map[instLib]*logger
- delegate log.LoggerProvider
-}
-
-// Compile-time guarantee loggerProvider implements LoggerProvider.
-var _ log.LoggerProvider = (*loggerProvider)(nil)
-
-func (p *loggerProvider) Logger(name string, options ...log.LoggerOption) log.Logger {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Logger(name, options...)
- }
-
- cfg := log.NewLoggerConfig(options...)
- key := instLib{
- name: name,
- version: cfg.InstrumentationVersion(),
- schemaURL: cfg.SchemaURL(),
- attrs: cfg.InstrumentationAttributes(),
- }
-
- if p.loggers == nil {
- l := &logger{name: name, options: options}
- p.loggers = map[instLib]*logger{key: l}
- return l
- }
-
- if l, ok := p.loggers[key]; ok {
- return l
- }
-
- l := &logger{name: name, options: options}
- p.loggers[key] = l
- return l
-}
-
-func (p *loggerProvider) setDelegate(provider log.LoggerProvider) {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- p.delegate = provider
- for _, l := range p.loggers {
- l.setDelegate(provider)
- }
- p.loggers = nil // Only set logger delegates once.
-}
-
-type logger struct {
- embedded.Logger
-
- name string
- options []log.LoggerOption
-
- delegate atomic.Value // log.Logger
-}
-
-// Compile-time guarantee logger implements Logger.
-var _ log.Logger = (*logger)(nil)
-
-func (l *logger) Emit(ctx context.Context, r log.Record) {
- if del, ok := l.delegate.Load().(log.Logger); ok {
- del.Emit(ctx, r)
- }
-}
-
-func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool {
- var enabled bool
- if del, ok := l.delegate.Load().(log.Logger); ok {
- enabled = del.Enabled(ctx, param)
- }
- return enabled
-}
-
-func (l *logger) setDelegate(provider log.LoggerProvider) {
- l.delegate.Store(provider.Logger(l.name, l.options...))
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go
deleted file mode 100644
index dbe1c2fbfb6..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package global // import "go.opentelemetry.io/otel/log/internal/global"
-
-import (
- "errors"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/log"
-)
-
-var (
- globalLoggerProvider = defaultLoggerProvider()
-
- delegateLoggerOnce sync.Once
-)
-
-func defaultLoggerProvider() *atomic.Value {
- v := &atomic.Value{}
- v.Store(loggerProviderHolder{provider: &loggerProvider{}})
- return v
-}
-
-type loggerProviderHolder struct {
- provider log.LoggerProvider
-}
-
-// GetLoggerProvider returns the global LoggerProvider.
-func GetLoggerProvider() log.LoggerProvider {
- return globalLoggerProvider.Load().(loggerProviderHolder).provider
-}
-
-// SetLoggerProvider sets the global LoggerProvider.
-func SetLoggerProvider(provider log.LoggerProvider) {
- current := GetLoggerProvider()
- if _, cOk := current.(*loggerProvider); cOk {
- if _, mpOk := provider.(*loggerProvider); mpOk && current == provider {
- err := errors.New("invalid delegation: LoggerProvider self-delegation")
- global.Error(err, "No delegate will be configured")
- return
- }
- }
-
- delegateLoggerOnce.Do(func() {
- if def, ok := current.(*loggerProvider); ok {
- def.setDelegate(provider)
- }
- })
- globalLoggerProvider.Store(loggerProviderHolder{provider: provider})
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go
deleted file mode 100644
index 87d1a827554..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:generate stringer -type=Kind -trimprefix=Kind
-
-package log // import "go.opentelemetry.io/otel/log"
-
-import (
- "bytes"
- "cmp"
- "errors"
- "fmt"
- "math"
- "slices"
- "strconv"
- "unsafe"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// errKind is logged when a Value is decoded to an incompatible type.
-var errKind = errors.New("invalid Kind")
-
-// Kind is the kind of a [Value].
-type Kind int
-
-// Kind values.
-const (
- KindEmpty Kind = iota
- KindBool
- KindFloat64
- KindInt64
- KindString
- KindBytes
- KindSlice
- KindMap
-)
-
-// A Value represents a structured log value.
-// A zero value is valid and represents an empty value.
-type Value struct {
- // Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
-
- // num holds the value for Int64, Float64, and Bool. It holds the length
- // for String, Bytes, Slice, Map.
- num uint64
- // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
- // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
- // then the value of Value is in num as described above. Otherwise, it
- // contains the value wrapped in the appropriate type.
- any any
-}
-
-type (
- // sliceptr represents a value in Value.any for KindString Values.
- stringptr *byte
- // bytesptr represents a value in Value.any for KindBytes Values.
- bytesptr *byte
- // sliceptr represents a value in Value.any for KindSlice Values.
- sliceptr *Value
- // mapptr represents a value in Value.any for KindMap Values.
- mapptr *KeyValue
-)
-
-// StringValue returns a new [Value] for a string.
-func StringValue(v string) Value {
- return Value{
- num: uint64(len(v)),
- any: stringptr(unsafe.StringData(v)),
- }
-}
-
-// IntValue returns a [Value] for an int.
-func IntValue(v int) Value { return Int64Value(int64(v)) }
-
-// Int64Value returns a [Value] for an int64.
-func Int64Value(v int64) Value {
- // This can be later converted back to int64 (overflow not checked).
- return Value{num: uint64(v), any: KindInt64} // nolint:gosec
-}
-
-// Float64Value returns a [Value] for a float64.
-func Float64Value(v float64) Value {
- return Value{num: math.Float64bits(v), any: KindFloat64}
-}
-
-// BoolValue returns a [Value] for a bool.
-func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
- var n uint64
- if v {
- n = 1
- }
- return Value{num: n, any: KindBool}
-}
-
-// BytesValue returns a [Value] for a byte slice. The passed slice must not be
-// changed after it is passed.
-func BytesValue(v []byte) Value {
- return Value{
- num: uint64(len(v)),
- any: bytesptr(unsafe.SliceData(v)),
- }
-}
-
-// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
-// not be changed after it is passed.
-func SliceValue(vs ...Value) Value {
- return Value{
- num: uint64(len(vs)),
- any: sliceptr(unsafe.SliceData(vs)),
- }
-}
-
-// MapValue returns a new [Value] for a slice of key-value pairs. The passed
-// slice must not be changed after it is passed.
-func MapValue(kvs ...KeyValue) Value {
- return Value{
- num: uint64(len(kvs)),
- any: mapptr(unsafe.SliceData(kvs)),
- }
-}
-
-// AsString returns the value held by v as a string.
-func (v Value) AsString() string {
- if sp, ok := v.any.(stringptr); ok {
- return unsafe.String(sp, v.num)
- }
- global.Error(errKind, "AsString", "Kind", v.Kind())
- return ""
-}
-
-// asString returns the value held by v as a string. It will panic if the Value
-// is not KindString.
-func (v Value) asString() string {
- return unsafe.String(v.any.(stringptr), v.num)
-}
-
-// AsInt64 returns the value held by v as an int64.
-func (v Value) AsInt64() int64 {
- if v.Kind() != KindInt64 {
- global.Error(errKind, "AsInt64", "Kind", v.Kind())
- return 0
- }
- return v.asInt64()
-}
-
-// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
-// this will return garbage.
-func (v Value) asInt64() int64 {
- // Assumes v.num was a valid int64 (overflow not checked).
- return int64(v.num) // nolint: gosec
-}
-
-// AsBool returns the value held by v as a bool.
-func (v Value) AsBool() bool {
- if v.Kind() != KindBool {
- global.Error(errKind, "AsBool", "Kind", v.Kind())
- return false
- }
- return v.asBool()
-}
-
-// asBool returns the value held by v as a bool. If v is not of KindBool, this
-// will return garbage.
-func (v Value) asBool() bool { return v.num == 1 }
-
-// AsFloat64 returns the value held by v as a float64.
-func (v Value) AsFloat64() float64 {
- if v.Kind() != KindFloat64 {
- global.Error(errKind, "AsFloat64", "Kind", v.Kind())
- return 0
- }
- return v.asFloat64()
-}
-
-// asFloat64 returns the value held by v as a float64. If v is not of
-// KindFloat64, this will return garbage.
-func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
-
-// AsBytes returns the value held by v as a []byte.
-func (v Value) AsBytes() []byte {
- if sp, ok := v.any.(bytesptr); ok {
- return unsafe.Slice((*byte)(sp), v.num)
- }
- global.Error(errKind, "AsBytes", "Kind", v.Kind())
- return nil
-}
-
-// asBytes returns the value held by v as a []byte. It will panic if the Value
-// is not KindBytes.
-func (v Value) asBytes() []byte {
- return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
-}
-
-// AsSlice returns the value held by v as a []Value.
-func (v Value) AsSlice() []Value {
- if sp, ok := v.any.(sliceptr); ok {
- return unsafe.Slice((*Value)(sp), v.num)
- }
- global.Error(errKind, "AsSlice", "Kind", v.Kind())
- return nil
-}
-
-// asSlice returns the value held by v as a []Value. It will panic if the Value
-// is not KindSlice.
-func (v Value) asSlice() []Value {
- return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
-}
-
-// AsMap returns the value held by v as a []KeyValue.
-func (v Value) AsMap() []KeyValue {
- if sp, ok := v.any.(mapptr); ok {
- return unsafe.Slice((*KeyValue)(sp), v.num)
- }
- global.Error(errKind, "AsMap", "Kind", v.Kind())
- return nil
-}
-
-// asMap returns the value held by v as a []KeyValue. It will panic if the
-// Value is not KindMap.
-func (v Value) asMap() []KeyValue {
- return unsafe.Slice((*KeyValue)(v.any.(mapptr)), v.num)
-}
-
-// Kind returns the Kind of v.
-func (v Value) Kind() Kind {
- switch x := v.any.(type) {
- case Kind:
- return x
- case stringptr:
- return KindString
- case bytesptr:
- return KindBytes
- case sliceptr:
- return KindSlice
- case mapptr:
- return KindMap
- default:
- return KindEmpty
- }
-}
-
-// Empty returns if v does not hold any value.
-func (v Value) Empty() bool { return v.Kind() == KindEmpty }
-
-// Equal returns if v is equal to w.
-func (v Value) Equal(w Value) bool {
- k1 := v.Kind()
- k2 := w.Kind()
- if k1 != k2 {
- return false
- }
- switch k1 {
- case KindInt64, KindBool:
- return v.num == w.num
- case KindString:
- return v.asString() == w.asString()
- case KindFloat64:
- return v.asFloat64() == w.asFloat64()
- case KindSlice:
- return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
- case KindMap:
- sv := sortMap(v.asMap())
- sw := sortMap(w.asMap())
- return slices.EqualFunc(sv, sw, KeyValue.Equal)
- case KindBytes:
- return bytes.Equal(v.asBytes(), w.asBytes())
- case KindEmpty:
- return true
- default:
- global.Error(errKind, "Equal", "Kind", k1)
- return false
- }
-}
-
-func sortMap(m []KeyValue) []KeyValue {
- sm := make([]KeyValue, len(m))
- copy(sm, m)
- slices.SortFunc(sm, func(a, b KeyValue) int {
- return cmp.Compare(a.Key, b.Key)
- })
-
- return sm
-}
-
-// String returns Value's value as a string, formatted like [fmt.Sprint].
-//
-// The returned string is meant for debugging;
-// the string representation is not stable.
-func (v Value) String() string {
- switch v.Kind() {
- case KindString:
- return v.asString()
- case KindInt64:
- // Assumes v.num was a valid int64 (overflow not checked).
- return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
- case KindFloat64:
- return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
- case KindBool:
- return strconv.FormatBool(v.asBool())
- case KindBytes:
- return fmt.Sprint(v.asBytes()) // nolint:staticcheck // Use fmt.Sprint to encode as slice.
- case KindMap:
- return fmt.Sprint(v.asMap())
- case KindSlice:
- return fmt.Sprint(v.asSlice())
- case KindEmpty:
- return ""
- default:
- // Try to handle this as gracefully as possible.
- //
- // Don't panic here. The goal here is to have developers find this
- // first if a slog.Kind is is not handled. It is
- // preferable to have user's open issue asking why their attributes
- // have a "unhandled: " prefix than say that their code is panicking.
- return fmt.Sprintf("", v.Kind())
- }
-}
-
-// A KeyValue is a key-value pair used to represent a log attribute (a
-// superset of [go.opentelemetry.io/otel/attribute.KeyValue]) and map item.
-type KeyValue struct {
- Key string
- Value Value
-}
-
-// Equal returns if a is equal to b.
-func (a KeyValue) Equal(b KeyValue) bool {
- return a.Key == b.Key && a.Value.Equal(b.Value)
-}
-
-// String returns a KeyValue for a string value.
-func String(key, value string) KeyValue {
- return KeyValue{key, StringValue(value)}
-}
-
-// Int64 returns a KeyValue for an int64 value.
-func Int64(key string, value int64) KeyValue {
- return KeyValue{key, Int64Value(value)}
-}
-
-// Int returns a KeyValue for an int value.
-func Int(key string, value int) KeyValue {
- return KeyValue{key, IntValue(value)}
-}
-
-// Float64 returns a KeyValue for a float64 value.
-func Float64(key string, value float64) KeyValue {
- return KeyValue{key, Float64Value(value)}
-}
-
-// Bool returns a KeyValue for a bool value.
-func Bool(key string, value bool) KeyValue {
- return KeyValue{key, BoolValue(value)}
-}
-
-// Bytes returns a KeyValue for a []byte value.
-// The passed slice must not be changed after it is passed.
-func Bytes(key string, value []byte) KeyValue {
- return KeyValue{key, BytesValue(value)}
-}
-
-// Slice returns a KeyValue for a []Value value.
-// The passed slice must not be changed after it is passed.
-func Slice(key string, value ...Value) KeyValue {
- return KeyValue{key, SliceValue(value...)}
-}
-
-// Map returns a KeyValue for a map value.
-// The passed slice must not be changed after it is passed.
-func Map(key string, value ...KeyValue) KeyValue {
- return KeyValue{key, MapValue(value...)}
-}
-
-// Empty returns a KeyValue with an empty value.
-func Empty(key string) KeyValue {
- return KeyValue{key, Value{}}
-}
-
-// String returns key-value pair as a string, formatted like "key:value".
-//
-// The returned string is meant for debugging;
-// the string representation is not stable.
-func (a KeyValue) String() string {
- return fmt.Sprintf("%s:%s", a.Key, a.Value)
-}
-
-// ValueFromAttribute converts [attribute.Value] to [Value].
-func ValueFromAttribute(value attribute.Value) Value {
- switch value.Type() {
- case attribute.INVALID:
- return Value{}
- case attribute.BOOL:
- return BoolValue(value.AsBool())
- case attribute.BOOLSLICE:
- val := value.AsBoolSlice()
- res := make([]Value, 0, len(val))
- for _, v := range val {
- res = append(res, BoolValue(v))
- }
- return SliceValue(res...)
- case attribute.INT64:
- return Int64Value(value.AsInt64())
- case attribute.INT64SLICE:
- val := value.AsInt64Slice()
- res := make([]Value, 0, len(val))
- for _, v := range val {
- res = append(res, Int64Value(v))
- }
- return SliceValue(res...)
- case attribute.FLOAT64:
- return Float64Value(value.AsFloat64())
- case attribute.FLOAT64SLICE:
- val := value.AsFloat64Slice()
- res := make([]Value, 0, len(val))
- for _, v := range val {
- res = append(res, Float64Value(v))
- }
- return SliceValue(res...)
- case attribute.STRING:
- return StringValue(value.AsString())
- case attribute.STRINGSLICE:
- val := value.AsStringSlice()
- res := make([]Value, 0, len(val))
- for _, v := range val {
- res = append(res, StringValue(v))
- }
- return SliceValue(res...)
- }
- // This code should never be reached
- // as log attributes are a superset of standard attributes.
- panic("unknown attribute type")
-}
-
-// KeyValueFromAttribute converts [attribute.KeyValue] to [KeyValue].
-func KeyValueFromAttribute(kv attribute.KeyValue) KeyValue {
- return KeyValue{
- Key: string(kv.Key),
- Value: ValueFromAttribute(kv.Value),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/kind_string.go b/vendor/go.opentelemetry.io/otel/log/kind_string.go
deleted file mode 100644
index bdfaa18665c..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/kind_string.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Code generated by "stringer -type=Kind -trimprefix=Kind"; DO NOT EDIT.
-
-package log
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[KindEmpty-0]
- _ = x[KindBool-1]
- _ = x[KindFloat64-2]
- _ = x[KindInt64-3]
- _ = x[KindString-4]
- _ = x[KindBytes-5]
- _ = x[KindSlice-6]
- _ = x[KindMap-7]
-}
-
-const _Kind_name = "EmptyBoolFloat64Int64StringBytesSliceMap"
-
-var _Kind_index = [...]uint8{0, 5, 9, 16, 21, 27, 32, 37, 40}
-
-func (i Kind) String() string {
- if i < 0 || i >= Kind(len(_Kind_index)-1) {
- return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go
deleted file mode 100644
index 1205f08e2c9..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/logger.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package log // import "go.opentelemetry.io/otel/log"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/log/embedded"
-)
-
-// Logger emits log records.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Logger interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Logger
-
- // Emit emits a log record.
- //
- // The record may be held by the implementation. Callers should not mutate
- // the record after passed.
- //
- // Implementations of this method need to be safe for a user to call
- // concurrently.
- Emit(ctx context.Context, record Record)
-
- // Enabled returns whether the Logger emits for the given context and
- // param.
- //
- // This is useful for users that want to know if a [Record]
- // will be processed or dropped before they perform complex operations to
- // construct the [Record].
- //
- // The passed param is likely to be a partial record information being
- // provided (e.g a param with only the Severity set).
- // If a Logger needs more information than is provided, it
- // is said to be in an indeterminate state (see below).
- //
- // The returned value will be true when the Logger will emit for the
- // provided context and param, and will be false if the Logger will not
- // emit. The returned value may be true or false in an indeterminate state.
- // An implementation should default to returning true for an indeterminate
- // state, but may return false if valid reasons in particular circumstances
- // exist (e.g. performance, correctness).
- //
- // The param should not be held by the implementation. A copy should be
- // made if the param needs to be held after the call returns.
- //
- // Implementations of this method need to be safe for a user to call
- // concurrently.
- Enabled(ctx context.Context, param EnabledParameters) bool
-}
-
-// LoggerOption applies configuration options to a [Logger].
-type LoggerOption interface {
- // applyLogger is used to set a LoggerOption value of a LoggerConfig.
- applyLogger(LoggerConfig) LoggerConfig
-}
-
-// LoggerConfig contains options for a [Logger].
-type LoggerConfig struct {
- // Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
-
- version string
- schemaURL string
- attrs attribute.Set
-}
-
-// NewLoggerConfig returns a new [LoggerConfig] with all the options applied.
-func NewLoggerConfig(options ...LoggerOption) LoggerConfig {
- var c LoggerConfig
- for _, opt := range options {
- c = opt.applyLogger(c)
- }
- return c
-}
-
-// InstrumentationVersion returns the version of the library providing
-// instrumentation.
-func (cfg LoggerConfig) InstrumentationVersion() string {
- return cfg.version
-}
-
-// InstrumentationAttributes returns the attributes associated with the library
-// providing instrumentation.
-func (cfg LoggerConfig) InstrumentationAttributes() attribute.Set {
- return cfg.attrs
-}
-
-// SchemaURL returns the schema URL of the library providing instrumentation.
-func (cfg LoggerConfig) SchemaURL() string {
- return cfg.schemaURL
-}
-
-type loggerOptionFunc func(LoggerConfig) LoggerConfig
-
-func (fn loggerOptionFunc) applyLogger(cfg LoggerConfig) LoggerConfig {
- return fn(cfg)
-}
-
-// WithInstrumentationVersion returns a [LoggerOption] that sets the
-// instrumentation version of a [Logger].
-func WithInstrumentationVersion(version string) LoggerOption {
- return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
- config.version = version
- return config
- })
-}
-
-// WithInstrumentationAttributes returns a [LoggerOption] that sets the
-// instrumentation attributes of a [Logger].
-//
-// The passed attributes will be de-duplicated.
-func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption {
- return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
- config.attrs = attribute.NewSet(attr...)
- return config
- })
-}
-
-// WithSchemaURL returns a [LoggerOption] that sets the schema URL for a
-// [Logger].
-func WithSchemaURL(schemaURL string) LoggerOption {
- return loggerOptionFunc(func(config LoggerConfig) LoggerConfig {
- config.schemaURL = schemaURL
- return config
- })
-}
-
-// EnabledParameters represents payload for [Logger]'s Enabled method.
-type EnabledParameters struct {
- Severity Severity
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/provider.go b/vendor/go.opentelemetry.io/otel/log/provider.go
deleted file mode 100644
index 5c8ca328f87..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/provider.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package log // import "go.opentelemetry.io/otel/log"
-
-import "go.opentelemetry.io/otel/log/embedded"
-
-// LoggerProvider provides access to [Logger].
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type LoggerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.LoggerProvider
-
- // Logger returns a new [Logger] with the provided name and configuration.
- //
- // The name needs to uniquely identify the source of logged code. It is
- // recommended that name is the Go package name of the library using a log
- // bridge (note: this is not the name of the bridge package). Most
- // commonly, this means a bridge will need to accept this value from its
- // users.
- //
- // If name is empty, implementations need to provide a default name.
- //
- // The version of the packages using a bridge can be critical information
- // to include when logging. The bridge should accept this version
- // information and use the [WithInstrumentationVersion] option to configure
- // the Logger appropriately.
- //
- // Implementations of this method need to be safe for a user to call
- // concurrently.
- Logger(name string, options ...LoggerOption) Logger
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go
deleted file mode 100644
index 4d2f32d0fb0..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/record.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package log // import "go.opentelemetry.io/otel/log"
-
-import (
- "slices"
- "time"
-)
-
-// attributesInlineCount is the number of attributes that are efficiently
-// stored in an array within a Record. This value is borrowed from slog which
-// performed a quantitative survey of log library use and found this value to
-// cover 95% of all use-cases (https://go.dev/blog/slog#performance).
-const attributesInlineCount = 5
-
-// Record represents a log record.
-// A log record with non-empty event name is interpreted as an event record.
-type Record struct {
- // Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
-
- eventName string
- timestamp time.Time
- observedTimestamp time.Time
- severity Severity
- severityText string
- body Value
-
- // The fields below are for optimizing the implementation of Attributes and
- // AddAttributes. This design is borrowed from the slog Record type:
- // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/log/slog/record.go;l=20
-
- // Allocation optimization: an inline array sized to hold
- // the majority of log calls (based on examination of open-source
- // code). It holds the start of the list of attributes.
- front [attributesInlineCount]KeyValue
-
- // The number of attributes in front.
- nFront int
-
- // The list of attributes except for those in front.
- // Invariants:
- // - len(back) > 0 if nFront == len(front)
- // - Unused array elements are zero-ed. Used to detect mistakes.
- back []KeyValue
-}
-
-// EventName returns the event name.
-// A log record with non-empty event name is interpreted as an event record.
-func (r *Record) EventName() string {
- return r.eventName
-}
-
-// SetEventName sets the event name.
-// A log record with non-empty event name is interpreted as an event record.
-func (r *Record) SetEventName(s string) {
- r.eventName = s
-}
-
-// Timestamp returns the time when the log record occurred.
-func (r *Record) Timestamp() time.Time {
- return r.timestamp
-}
-
-// SetTimestamp sets the time when the log record occurred.
-func (r *Record) SetTimestamp(t time.Time) {
- r.timestamp = t
-}
-
-// ObservedTimestamp returns the time when the log record was observed.
-func (r *Record) ObservedTimestamp() time.Time {
- return r.observedTimestamp
-}
-
-// SetObservedTimestamp sets the time when the log record was observed.
-func (r *Record) SetObservedTimestamp(t time.Time) {
- r.observedTimestamp = t
-}
-
-// Severity returns the [Severity] of the log record.
-func (r *Record) Severity() Severity {
- return r.severity
-}
-
-// SetSeverity sets the [Severity] level of the log record.
-func (r *Record) SetSeverity(level Severity) {
- r.severity = level
-}
-
-// SeverityText returns severity (also known as log level) text. This is the
-// original string representation of the severity as it is known at the source.
-func (r *Record) SeverityText() string {
- return r.severityText
-}
-
-// SetSeverityText sets severity (also known as log level) text. This is the
-// original string representation of the severity as it is known at the source.
-func (r *Record) SetSeverityText(text string) {
- r.severityText = text
-}
-
-// Body returns the body of the log record.
-func (r *Record) Body() Value {
- return r.body
-}
-
-// SetBody sets the body of the log record.
-func (r *Record) SetBody(v Value) {
- r.body = v
-}
-
-// WalkAttributes walks all attributes the log record holds by calling f for
-// each on each [KeyValue] in the [Record]. Iteration stops if f returns false.
-func (r *Record) WalkAttributes(f func(KeyValue) bool) {
- for i := 0; i < r.nFront; i++ {
- if !f(r.front[i]) {
- return
- }
- }
- for _, a := range r.back {
- if !f(a) {
- return
- }
- }
-}
-
-// AddAttributes adds attributes to the log record.
-func (r *Record) AddAttributes(attrs ...KeyValue) {
- var i int
- for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
- a := attrs[i]
- r.front[r.nFront] = a
- r.nFront++
- }
-
- r.back = slices.Grow(r.back, len(attrs[i:]))
- r.back = append(r.back, attrs[i:]...)
-}
-
-// AttributesLen returns the number of attributes in the log record.
-func (r *Record) AttributesLen() int {
- return r.nFront + len(r.back)
-}
diff --git a/vendor/go.opentelemetry.io/otel/log/severity.go b/vendor/go.opentelemetry.io/otel/log/severity.go
deleted file mode 100644
index 0240fd5acbd..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/severity.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-//go:generate stringer -type=Severity -linecomment
-
-package log // import "go.opentelemetry.io/otel/log"
-
-// Severity represents a log record severity (also known as log level). Smaller
-// numerical values correspond to less severe log records (such as debug
-// events), larger numerical values correspond to more severe log records (such
-// as errors and critical events).
-type Severity int
-
-// Severity values defined by OpenTelemetry.
-const (
- // SeverityUndefined represents an unset Severity.
- SeverityUndefined Severity = 0 // UNDEFINED
-
- // A fine-grained debugging log record. Typically disabled in default
- // configurations.
- SeverityTrace1 Severity = 1 // TRACE
- SeverityTrace2 Severity = 2 // TRACE2
- SeverityTrace3 Severity = 3 // TRACE3
- SeverityTrace4 Severity = 4 // TRACE4
-
- // A debugging log record.
- SeverityDebug1 Severity = 5 // DEBUG
- SeverityDebug2 Severity = 6 // DEBUG2
- SeverityDebug3 Severity = 7 // DEBUG3
- SeverityDebug4 Severity = 8 // DEBUG4
-
- // An informational log record. Indicates that an event happened.
- SeverityInfo1 Severity = 9 // INFO
- SeverityInfo2 Severity = 10 // INFO2
- SeverityInfo3 Severity = 11 // INFO3
- SeverityInfo4 Severity = 12 // INFO4
-
- // A warning log record. Not an error but is likely more important than an
- // informational event.
- SeverityWarn1 Severity = 13 // WARN
- SeverityWarn2 Severity = 14 // WARN2
- SeverityWarn3 Severity = 15 // WARN3
- SeverityWarn4 Severity = 16 // WARN4
-
- // An error log record. Something went wrong.
- SeverityError1 Severity = 17 // ERROR
- SeverityError2 Severity = 18 // ERROR2
- SeverityError3 Severity = 19 // ERROR3
- SeverityError4 Severity = 20 // ERROR4
-
- // A fatal log record such as application or system crash.
- SeverityFatal1 Severity = 21 // FATAL
- SeverityFatal2 Severity = 22 // FATAL2
- SeverityFatal3 Severity = 23 // FATAL3
- SeverityFatal4 Severity = 24 // FATAL4
-
- // Convenience definitions for the base severity of each level.
- SeverityTrace = SeverityTrace1
- SeverityDebug = SeverityDebug1
- SeverityInfo = SeverityInfo1
- SeverityWarn = SeverityWarn1
- SeverityError = SeverityError1
- SeverityFatal = SeverityFatal1
-)
diff --git a/vendor/go.opentelemetry.io/otel/log/severity_string.go b/vendor/go.opentelemetry.io/otel/log/severity_string.go
deleted file mode 100644
index 4c20fa5e8aa..00000000000
--- a/vendor/go.opentelemetry.io/otel/log/severity_string.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Code generated by "stringer -type=Severity -linecomment"; DO NOT EDIT.
-
-package log
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[SeverityUndefined-0]
- _ = x[SeverityTrace1-1]
- _ = x[SeverityTrace2-2]
- _ = x[SeverityTrace3-3]
- _ = x[SeverityTrace4-4]
- _ = x[SeverityDebug1-5]
- _ = x[SeverityDebug2-6]
- _ = x[SeverityDebug3-7]
- _ = x[SeverityDebug4-8]
- _ = x[SeverityInfo1-9]
- _ = x[SeverityInfo2-10]
- _ = x[SeverityInfo3-11]
- _ = x[SeverityInfo4-12]
- _ = x[SeverityWarn1-13]
- _ = x[SeverityWarn2-14]
- _ = x[SeverityWarn3-15]
- _ = x[SeverityWarn4-16]
- _ = x[SeverityError1-17]
- _ = x[SeverityError2-18]
- _ = x[SeverityError3-19]
- _ = x[SeverityError4-20]
- _ = x[SeverityFatal1-21]
- _ = x[SeverityFatal2-22]
- _ = x[SeverityFatal3-23]
- _ = x[SeverityFatal4-24]
-}
-
-const _Severity_name = "UNDEFINEDTRACETRACE2TRACE3TRACE4DEBUGDEBUG2DEBUG3DEBUG4INFOINFO2INFO3INFO4WARNWARN2WARN3WARN4ERRORERROR2ERROR3ERROR4FATALFATAL2FATAL3FATAL4"
-
-var _Severity_index = [...]uint8{0, 9, 14, 20, 26, 32, 37, 43, 49, 55, 59, 64, 69, 74, 78, 83, 88, 93, 98, 104, 110, 116, 121, 127, 133, 139}
-
-func (i Severity) String() string {
- if i < 0 || i >= Severity(len(_Severity_index)-1) {
- return "Severity(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Severity_name[_Severity_index[i]:_Severity_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
deleted file mode 100644
index 82e1f46b4ea..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
deleted file mode 100644
index 6685c392b50..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
+++ /dev/null
@@ -1,1198 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes HTTP attributes.
-const (
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions. It represents the hTTP request method.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions. It represents the [HTTP
- // response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If and only if one was
- // received/sent.)
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions. It represents the hTTP request method.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions. It represents the [HTTP response
-// status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTP Server spans attributes
-const (
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions. It represents the URI scheme identifying the used
- // protocol.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route (path template in
- // the format used by the respective server framework). See note below
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if it's available)
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
- // if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions. It represents the URI scheme identifying the used
-// protocol.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route (path template in the
-// format used by the respective server framework). See note below
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the name identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'click', 'exception'
- EventNameKey = attribute.Key("event.name")
-
- // EventDomainKey is the attribute Key conforming to the "event.domain"
- // semantic conventions. It represents the domain identifies the business
- // context for the events.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: Events across different domains may have same `event.name`, yet be
- // unrelated events.
- EventDomainKey = attribute.Key("event.domain")
-)
-
-var (
- // Events from browser apps
- EventDomainBrowser = EventDomainKey.String("browser")
- // Events from mobile apps
- EventDomainDevice = EventDomainKey.String("device")
- // Events from Kubernetes
- EventDomainK8S = EventDomainKey.String("k8s")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the name identifies the event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions. It represents the transport protocol used. See
- // note below.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
-
- // NetProtocolNameKey is the attribute Key conforming to the
- // "net.protocol.name" semantic conventions. It represents the application
- // layer protocol used. The value SHOULD be normalized to lowercase.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- NetProtocolNameKey = attribute.Key("net.protocol.name")
-
- // NetProtocolVersionKey is the attribute Key conforming to the
- // "net.protocol.version" semantic conventions. It represents the version
- // of the application layer protocol used. See note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `net.protocol.version` refers to the version of the protocol used
- // and might be different from the protocol client's version. If the HTTP
- // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
- // this attribute should be set to `1.1`.
- NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions. It represents the remote
- // socket peer name.
- //
- // Type: string
- // RequirementLevel: Recommended (If available and different from
- // `net.peer.name` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 'proxy.example.com'
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions. It represents the remote
- // socket peer address: IPv4 or IPv6 for internet protocols, path for local
- // communication,
- // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '127.0.0.1', '/tmp/mysql.sock'
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions. It represents the remote
- // socket peer port.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 16456
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions. It represents the protocol
- // [address
- // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
- // which is used for communication.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If different than `inet` and if
- // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
- // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
- // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
- // instrumentations that follow previous versions of this document.)
- // Stability: stable
- // Examples: 'inet6', 'bluetooth'
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions. It represents the logical remote hostname, see
- // note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
- // extra DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions. It represents the logical remote port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions. It represents the logical local hostname or
- // similar, see note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions. It represents the logical local port number,
- // preferably the one that the peer used to connect
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 8080
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions. It represents the local
- // socket address. Useful in case of a multi-IP host.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '192.168.0.1'
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions. It represents the local
- // socket port number.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If defined for the address
- // family and if different than `net.host.port` and if `net.sock.host.addr`
- // is set. In other cases, it is still recommended to set this.)
- // Stability: stable
- // Examples: 35555
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // IPv4 address
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions. It represents the application
-// layer protocol used. The value SHOULD be normalized to lowercase.
-func NetProtocolName(val string) attribute.KeyValue {
- return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions. It represents the version of
-// the application layer protocol used. See note below.
-func NetProtocolVersion(val string) attribute.KeyValue {
- return NetProtocolVersionKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions. It represents the remote socket
-// peer name.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions. It represents the remote socket
-// peer address: IPv4 or IPv6 for internet protocols, path for local
-// communication,
-// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions. It represents the remote socket
-// peer port.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions. It represents the logical remote
-// hostname, see note below.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions. It represents the logical remote port
-// number
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions. It represents the logical local
-// hostname or similar, see note below.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions. It represents the logical local port
-// number, preferably the one that the peer used to connect
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions. It represents the local socket
-// address. Useful in case of a multi-IP host.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions. It represents the local socket
-// port number.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetHostConnectionTypeKey is the attribute Key conforming to the
- // "net.host.connection.type" semantic conventions. It represents the
- // internet connection type currently being used by the host.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
-
- // NetHostConnectionSubtypeKey is the attribute Key conforming to the
- // "net.host.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
-
- // NetHostCarrierNameKey is the attribute Key conforming to the
- // "net.host.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
-
- // NetHostCarrierMccKey is the attribute Key conforming to the
- // "net.host.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
-
- // NetHostCarrierMncKey is the attribute Key conforming to the
- // "net.host.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
-
- // NetHostCarrierIccKey is the attribute Key conforming to the
- // "net.host.carrier.icc" semantic conventions. It represents the ISO
- // 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// NetHostCarrierName returns an attribute KeyValue conforming to the
-// "net.host.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetHostCarrierName(val string) attribute.KeyValue {
- return NetHostCarrierNameKey.String(val)
-}
-
-// NetHostCarrierMcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mcc" semantic conventions. It represents the mobile
-// carrier country code.
-func NetHostCarrierMcc(val string) attribute.KeyValue {
- return NetHostCarrierMccKey.String(val)
-}
-
-// NetHostCarrierMnc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mnc" semantic conventions. It represents the mobile
-// carrier network code.
-func NetHostCarrierMnc(val string) attribute.KeyValue {
- return NetHostCarrierMncKey.String(val)
-}
-
-// NetHostCarrierIcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetHostCarrierIcc(val string) attribute.KeyValue {
- return NetHostCarrierIccKey.String(val)
-}
-
-// Semantic conventions for HTTP client and server Spans.
-const (
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions. It represents the
- // size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions. It represents the
- // size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-)
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions. It represents the size
-// of the request payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions. It represents the size
-// of the response payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// Semantic convention describing per-message attributes populated on messaging
-// spans or links.
-const (
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the [conversation ID](#conversations) identifying the conversation to
- // which the message belongs, represented as a string. Sometimes called
- // "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
- // the "messaging.message.payload_size_bytes" semantic conventions. It
- // represents the (uncompressed) size of the message payload in bytes. Also
- // use this attribute if it is unknown whether the compressed or
- // uncompressed payload size is reported.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
-
- // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
- // conforming to the "messaging.message.payload_compressed_size_bytes"
- // semantic conventions. It represents the compressed size of the message
- // payload in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
-)
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the [conversation ID](#conversations) identifying the
-// conversation to which the message belongs, represented as a string.
-// Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
-// to the "messaging.message.payload_size_bytes" semantic conventions. It
-// represents the (uncompressed) size of the message payload in bytes. Also use
-// this attribute if it is unknown whether the compressed or uncompressed
-// payload size is reported.
-func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadSizeBytesKey.Int(val)
-}
-
-// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
-// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
-// conventions. It represents the compressed size of the message payload in
-// bytes.
-func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
-}
-
-// Semantic convention for attributes that describe messaging destination on
-// broker
-const (
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker does not have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-)
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// Semantic convention for attributes that describe messaging source on broker
-const (
- // MessagingSourceNameKey is the attribute Key conforming to the
- // "messaging.source.name" semantic conventions. It represents the message
- // source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Source name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker does not have such notion, the source name SHOULD uniquely
- // identify the broker.
- MessagingSourceNameKey = attribute.Key("messaging.source.name")
-
- // MessagingSourceTemplateKey is the attribute Key conforming to the
- // "messaging.source.template" semantic conventions. It represents the low
- // cardinality representation of the messaging source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Source names could be constructed from templates. An example would
- // be a source name involving a user name or product id. Although the
- // source name in this case is of high cardinality, the underlying template
- // is of low cardinality and can be effectively used for grouping and
- // aggregation.
- MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
-
- // MessagingSourceTemporaryKey is the attribute Key conforming to the
- // "messaging.source.temporary" semantic conventions. It represents a
- // boolean that is true if the message source is temporary and might not
- // exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
-
- // MessagingSourceAnonymousKey is the attribute Key conforming to the
- // "messaging.source.anonymous" semantic conventions. It represents a
- // boolean that is true if the message source is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
-)
-
-// MessagingSourceName returns an attribute KeyValue conforming to the
-// "messaging.source.name" semantic conventions. It represents the message
-// source name
-func MessagingSourceName(val string) attribute.KeyValue {
- return MessagingSourceNameKey.String(val)
-}
-
-// MessagingSourceTemplate returns an attribute KeyValue conforming to the
-// "messaging.source.template" semantic conventions. It represents the low
-// cardinality representation of the messaging source name
-func MessagingSourceTemplate(val string) attribute.KeyValue {
- return MessagingSourceTemplateKey.String(val)
-}
-
-// MessagingSourceTemporary returns an attribute KeyValue conforming to the
-// "messaging.source.temporary" semantic conventions. It represents a boolean
-// that is true if the message source is temporary and might not exist anymore
-// after messages are processed.
-func MessagingSourceTemporary(val bool) attribute.KeyValue {
- return MessagingSourceTemporaryKey.Bool(val)
-}
-
-// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
-// "messaging.source.anonymous" semantic conventions. It represents a boolean
-// that is true if the message source is anonymous (could be unnamed or have
-// auto-generated name).
-func MessagingSourceAnonymous(val bool) attribute.KeyValue {
- return MessagingSourceAnonymousKey.Bool(val)
-}
-
-// Attributes for RabbitMQ
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If not empty.)
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// Attributes for Apache Kafka
-const (
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaClientIDKey is the attribute Key conforming to the
- // "messaging.kafka.client_id" semantic conventions. It represents the
- // client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
- // "messaging.kafka.source.partition" semantic conventions. It represents
- // the partition the message is received from.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If value is `true`. When
- // missing, the value is assumed to be `false`.)
- // Stability: stable
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaClientID returns an attribute KeyValue conforming to the
-// "messaging.kafka.client_id" semantic conventions. It represents the client
-// ID for the Consumer or Producer that is handling the message.
-func MessagingKafkaClientID(val string) attribute.KeyValue {
- return MessagingKafkaClientIDKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
-// the "messaging.kafka.source.partition" semantic conventions. It represents
-// the partition the message is received from.
-func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
- return MessagingKafkaSourcePartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// Attributes for Apache RocketMQ
-const (
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqClientIDKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_id" semantic conventions. It represents the
- // unique identifier for each client.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delay time level is not specified.)
- // Stability: stable
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delivery timestamp is not specified.)
- // Stability: stable
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
- // Stability: stable
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
-// "messaging.rocketmq.client_id" semantic conventions. It represents the
-// unique identifier for each client.
-func MessagingRocketmqClientID(val string) attribute.KeyValue {
- return MessagingRocketmqClientIDKey.String(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
deleted file mode 100644
index 0d1f55a8fe9..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.20.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
deleted file mode 100644
index 63776393217..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
-
-// The attributes used to report a single exception associated with a span.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example above](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
deleted file mode 100644
index f40c97825aa..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
deleted file mode 100644
index 9c1840631b6..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
deleted file mode 100644
index 3d44dae2750..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
+++ /dev/null
@@ -1,2060 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
-// on Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// Heroku dyno metadata
-const (
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-)
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageTagKey is the attribute Key conforming to the
- // "container.image.tag" semantic conventions. It represents the container
- // image tag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageTag returns an attribute KeyValue conforming to the
-// "container.image.tag" semantic conventions. It represents the container
-// image tag.
-func ContainerImageTag(val string) attribute.KeyValue {
- return ContainerImageTagKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment
-// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
-// deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// The device on which the process represented by this resource is running.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of
- // the device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// A host is defined as a general computing instance.
-const (
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID. For Cloud, this
- // value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID. For
-// Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image as defined in [Version
-// Attributes](README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// A Kubernetes Cluster.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// A Kubernetes Node object.
-const (
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// A Kubernetes Namespace.
-const (
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// A Kubernetes Pod object.
-const (
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// A container in a
-// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// A Kubernetes ReplicaSet object.
-const (
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// A Kubernetes Deployment object.
-const (
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// A Kubernetes StatefulSet object.
-const (
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// A Kubernetes DaemonSet object.
-const (
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// A Kubernetes Job object.
-const (
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// A Kubernetes CronJob object.
-const (
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// The single (language) runtime instance which is monitored.
-const (
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-k8s-pod-deployment-1',
- // '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetryAutoVersionKey is the attribute Key conforming to the
- // "telemetry.auto.version" semantic conventions. It represents the version
- // string of the auto instrumentation agent, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-// TelemetryAutoVersion returns an attribute KeyValue conforming to the
-// "telemetry.auto.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent, if used.
-func TelemetryAutoVersion(val string) attribute.KeyValue {
- return TelemetryAutoVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OTelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions. It represents the deprecated,
- // use the `otel.scope.name` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OTelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions. It represents the
- // deprecated, use the `otel.scope.version` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions. It represents the deprecated, use
-// the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
- return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions. It represents the deprecated,
-// use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
- return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
deleted file mode 100644
index 95d0210e38f..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
deleted file mode 100644
index 90b1b0452cc..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-)
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The attributes used to perform database client calls.
-const (
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable.)
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: Recommended (Should be collected by default only if
- // there is sanitization that excludes sensitive information.)
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- DBStatementKey = attribute.Key("db.statement")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If `db.statement` is not
- // applicable.)
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
- // longer required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// Call-level attributes for Cassandra
-const (
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary table that the operation is acting upon, including the keyspace
- // name (if applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary table that the operation is acting upon, including the keyspace name
-// (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// Call-level attributes for Redis
-const (
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If other than the default
- // database (`0`).)
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// Call-level attributes for MongoDB
-const (
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the collection
-// being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// Call-level attributes for SQL databases
-const (
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// Call-level attributes for Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (when performing one of the
- // operations in this list)
- // Stability: stable
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
- // default))
- // Stability: stable
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBContainerKey is the attribute Key conforming to the
- // "db.cosmosdb.container" semantic conventions. It represents the cosmos
- // DB container name.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if available)
- // Stability: stable
- // Examples: 'anystring'
- DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (if response was received)
- // Stability: stable
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (when response was received and
- // contained sub-code.)
- // Stability: stable
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: ConditionallyRequired (when available)
- // Stability: stable
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
- return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
- //
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](../../resource/semantic_conventions/README.md#service)
- // of the remote service. SHOULD be equal to the actual `service.name`
- // resource attribute of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](../../resource/semantic_conventions/README.md#service) of
-// the remote service. SHOULD be equal to the actual `service.name` resource
-// attribute of the remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-)
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// Semantic Convention for HTTP Client
-const (
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions. It represents the full HTTP request URL in the form
- // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
- // not transmitted over HTTP, but if it is known, it should be included
- // nevertheless.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the
- // attribute's value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPResendCountKey is the attribute Key conforming to the
- // "http.resend_count" semantic conventions. It represents the ordinal
- // number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Recommended (if and only if request was retried.)
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPResendCountKey = attribute.Key("http.resend_count")
-)
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions. It represents the full HTTP request URL in the form
-// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
-// transmitted over HTTP, but if it is known, it should be included
-// nevertheless.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPResendCount returns an attribute KeyValue conforming to the
-// "http.resend_count" semantic conventions. It represents the ordinal number
-// of request resending attempt (for any reason, including redirects).
-func HTTPResendCount(val int) attribute.KeyValue {
- return HTTPResendCountKey.Int(val)
-}
-
-// Semantic Convention for HTTP Server
-const (
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions. It represents the full request target as passed in
- // a HTTP request line or equivalent.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '/users/12314/?q=ddds'
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
- // semantic conventions. It represents the IP address of the original
- // client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.sock.peer.addr`, which
- // would
- // identify the network-level peer, which may be a proxy.
- //
- // This attribute should be set when a source of information different
- // from the one used for `net.sock.peer.addr`, is available even if that
- // other
- // source just confirms the same value as `net.sock.peer.addr`.
- // Rationale: For `net.sock.peer.addr`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions. It represents the full request target as passed in a
-// HTTP request line or equivalent.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPClientIP returns an attribute KeyValue conforming to the
-// "http.client_ip" semantic conventions. It represents the IP address of the
-// original client behind all proxies, if known (e.g. from
-// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
-func HTTPClientIP(val string) attribute.KeyValue {
- return HTTPClientIPKey.String(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// General attributes used in messaging systems.
-const (
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents a string
- // identifying the messaging system.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation as defined in the [Operation
- // names](#operation-names) section above.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the span describes an
- // operation on a batch of messages.)
- // Stability: stable
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-)
-
-var (
- // publish
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// MessagingSystem returns an attribute KeyValue conforming to the
-// "messaging.system" semantic conventions. It represents a string identifying
-// the messaging system.
-func MessagingSystem(val string) attribute.KeyValue {
- return MessagingSystemKey.String(val)
-}
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// Semantic convention for a consumer of messages received from a messaging
-// system
-const (
- // MessagingConsumerIDKey is the attribute Key conforming to the
- // "messaging.consumer.id" semantic conventions. It represents the
- // identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
- // both are present, or only `messaging.kafka.consumer.group`. For brokers,
- // such as RabbitMQ and Artemis, set it to the `client_id` of the client
- // consuming the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
-)
-
-// MessagingConsumerID returns an attribute KeyValue conforming to the
-// "messaging.consumer.id" semantic conventions. It represents the identifier
-// for the consumer receiving a message. For Kafka, set it to
-// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
-// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
-// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
-// message.
-func MessagingConsumerID(val string) attribute.KeyValue {
- return MessagingConsumerIDKey.String(val)
-}
-
-// Semantic conventions for remote procedure calls.
-const (
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// Tech-specific attributes for gRPC.
-const (
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // does not specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If other than the default
- // version (`1.0`))
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If response is not successful.)
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// does not specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// Tech-specific attributes for Connect RPC.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If response is not successful
- // and if error code available.)
- // Stability: stable
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
deleted file mode 100644
index 2de1fc3c6be..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.26.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
deleted file mode 100644
index d8dc822b263..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
+++ /dev/null
@@ -1,8996 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: stable
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
- // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
- // It represents the aSP.NET Core exception middleware handling result
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'handled', 'unhandled'
- AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-
- // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
- // "aspnetcore.routing.match_status" semantic conventions. It represents
- // the match result - success or failure
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'success', 'failure'
- AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-var (
- // Exception was handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
- // Exception was not handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
- // Exception handling was skipped because the response had started
- AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
- // Exception handling didn't run because the request was aborted
- AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
-)
-
-var (
- // Match succeeded
- AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
- // Match failed
- AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// Generic attributes for AWS services.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes for AWS DynamoDB.
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// Attributes for AWS Elastic Container Service (ECS).
-const (
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID
- // MUST be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
- // populated.)
- // Stability: experimental
- // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
- // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
- // running [ECS
- // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
- // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family
- // name of the [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
- // used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the
-// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
-// ECS task. The ID MUST be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS
-// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
-// used to create the ECS task.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Attributes for AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Attributes for AWS Logs.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Attributes for AWS Lambda.
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for AWS S3.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// The web browser attributes
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Attributes for CloudEvents.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerCPUStateKey is the attribute Key conforming to the
- // "container.cpu.state" semantic conventions. It represents the CPU state
- // for this data point.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'user', 'kernel'
- ContainerCPUStateKey = attribute.Key("container.cpu.state")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assigned by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `` section of the full name for example from
- // `registry.example.com/my-org/my-image:`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-var (
- // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
- ContainerCPUStateUser = ContainerCPUStateKey.String("user")
- // When CPU is used by the system (host OS)
- ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
- // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
- ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `` section of the full name for example from
-// `registry.example.com/my-org/my-image:`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// This group defines the attributes used to describe telemetry in the context
-// of databases.
-const (
- // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
- // "db.client.connections.pool.name" semantic conventions. It represents
- // the name of the connection pool; unique within the instrumented
- // application. In case the connection pool implementation doesn't provide
- // a name, instrumentation should use a combination of `server.address` and
- // `server.port` attributes formatted as `server.address:server.port`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myDataSource'
- DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
-
- // DBClientConnectionsStateKey is the attribute Key conforming to the
- // "db.client.connections.state" semantic conventions. It represents the
- // state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle'
- DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
-
- // DBCollectionNameKey is the attribute Key conforming to the
- // "db.collection.name" semantic conventions. It represents the name of a
- // collection (table, container) within the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: If the collection name is parsed from the query, it SHOULD match
- // the value provided in the query and may be qualified with the schema and
- // database name.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBCollectionNameKey = attribute.Key("db.collection.name")
-
- // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
- // semantic conventions. It represents the name of the database, fully
- // qualified within the server address and port.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'test.users'
- // Note: If a database system has multiple namespace components, they
- // SHOULD be concatenated (potentially using database system specific
- // conventions) from most general to most specific namespace component, and
- // more specific namespaces SHOULD NOT be captured without the more general
- // namespaces, to ensure that "startswith" queries for the more general
- // namespaces will be valid.
- // Semantic conventions for individual database systems SHOULD document
- // what `db.namespace` means in the context of that system.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBNamespaceKey = attribute.Key("db.namespace")
-
- // DBOperationNameKey is the attribute Key conforming to the
- // "db.operation.name" semantic conventions. It represents the name of the
- // operation or command being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: It is RECOMMENDED to capture the value as provided by the
- // application without attempting to do any case normalization.
- DBOperationNameKey = attribute.Key("db.operation.name")
-
- // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
- // semantic conventions. It represents the database query being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
- // "WuValue"'
- DBQueryTextKey = attribute.Key("db.query.text")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents the database management system (DBMS) product
- // as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual DBMS may differ from the one identified by the client.
- // For example, when using PostgreSQL client libraries to connect to a
- // CockroachDB, the `db.system` is set to `postgresql` based on the
- // instrumentation's best knowledge.
- DBSystemKey = attribute.Key("db.system")
-)
-
-var (
- // idle
- DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
- // used
- DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
-// the "db.client.connections.pool.name" semantic conventions. It represents
-// the name of the connection pool; unique within the instrumented application.
-// In case the connection pool implementation doesn't provide a name,
-// instrumentation should use a combination of `server.address` and
-// `server.port` attributes formatted as `server.address:server.port`.
-func DBClientConnectionsPoolName(val string) attribute.KeyValue {
- return DBClientConnectionsPoolNameKey.String(val)
-}
-
-// DBCollectionName returns an attribute KeyValue conforming to the
-// "db.collection.name" semantic conventions. It represents the name of a
-// collection (table, container) within the database.
-func DBCollectionName(val string) attribute.KeyValue {
- return DBCollectionNameKey.String(val)
-}
-
-// DBNamespace returns an attribute KeyValue conforming to the
-// "db.namespace" semantic conventions. It represents the name of the database,
-// fully qualified within the server address and port.
-func DBNamespace(val string) attribute.KeyValue {
- return DBNamespaceKey.String(val)
-}
-
-// DBOperationName returns an attribute KeyValue conforming to the
-// "db.operation.name" semantic conventions. It represents the name of the
-// operation or command being executed.
-func DBOperationName(val string) attribute.KeyValue {
- return DBOperationNameKey.String(val)
-}
-
-// DBQueryText returns an attribute KeyValue conforming to the
-// "db.query.text" semantic conventions. It represents the database query being
-// executed.
-func DBQueryText(val string) attribute.KeyValue {
- return DBQueryTextKey.String(val)
-}
-
-// This group defines attributes for Cassandra.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// This group defines attributes for Azure Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// This group defines attributes for Elasticsearch.
-const (
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-)
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// Attributes for software deployments.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// Attributes that represents an occurrence of a lifecycle transition on the
-// Android platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the deprecated use the
- // `device.app.lifecycle` event definition including `android.state` as a
- // payload field instead.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report a DNS query.
-const (
- // DNSQuestionNameKey is the attribute Key conforming to the
- // "dns.question.name" semantic conventions. It represents the name being
- // queried.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.example.com', 'opentelemetry.io'
- // Note: If the name field contains non-printable characters (below 32 or
- // above 126), those characters should be represented as escaped base 10
- // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
- // carriage returns, and line feeds should be converted to \t, \r, and \n
- // respectively.
- DNSQuestionNameKey = attribute.Key("dns.question.name")
-)
-
-// DNSQuestionName returns an attribute KeyValue conforming to the
-// "dns.question.name" semantic conventions. It represents the name being
-// queried.
-func DNSQuestionName(val string) attribute.KeyValue {
- return DNSQuestionNameKey.String(val)
-}
-
-// Attributes for operations with an authenticated and/or authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable, and SHOULD have low
- // cardinality.
- //
- // When `error.type` is set to a type (e.g., an exception type), its
- // canonical class name identifying the type within the artifact SHOULD be
- // used.
- //
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// FaaS attributes
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// Attributes for Feature Flags.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// Describes file attributes.
-const (
- // FileDirectoryKey is the attribute Key conforming to the "file.directory"
- // semantic conventions. It represents the directory where the file is
- // located. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/user', 'C:\\Program Files\\MyApp'
- FileDirectoryKey = attribute.Key("file.directory")
-
- // FileExtensionKey is the attribute Key conforming to the "file.extension"
- // semantic conventions. It represents the file extension, excluding the
- // leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: When the file name has multiple extensions (example.tar.gz), only
- // the last one should be captured ("gz", not "tar.gz").
- FileExtensionKey = attribute.Key("file.extension")
-
- // FileNameKey is the attribute Key conforming to the "file.name" semantic
- // conventions. It represents the name of the file including the extension,
- // without the directory.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.png'
- FileNameKey = attribute.Key("file.name")
-
- // FilePathKey is the attribute Key conforming to the "file.path" semantic
- // conventions. It represents the full path to the file, including the file
- // name. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/alice/example.png', 'C:\\Program
- // Files\\MyApp\\myapp.exe'
- FilePathKey = attribute.Key("file.path")
-
- // FileSizeKey is the attribute Key conforming to the "file.size" semantic
- // conventions. It represents the file size in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- FileSizeKey = attribute.Key("file.size")
-)
-
-// FileDirectory returns an attribute KeyValue conforming to the
-// "file.directory" semantic conventions. It represents the directory where the
-// file is located. It should include the drive letter, when appropriate.
-func FileDirectory(val string) attribute.KeyValue {
- return FileDirectoryKey.String(val)
-}
-
-// FileExtension returns an attribute KeyValue conforming to the
-// "file.extension" semantic conventions. It represents the file extension,
-// excluding the leading dot.
-func FileExtension(val string) attribute.KeyValue {
- return FileExtensionKey.String(val)
-}
-
-// FileName returns an attribute KeyValue conforming to the "file.name"
-// semantic conventions. It represents the name of the file including the
-// extension, without the directory.
-func FileName(val string) attribute.KeyValue {
- return FileNameKey.String(val)
-}
-
-// FilePath returns an attribute KeyValue conforming to the "file.path"
-// semantic conventions. It represents the full path to the file, including the
-// file name. It should include the drive letter, when appropriate.
-func FilePath(val string) attribute.KeyValue {
- return FilePathKey.String(val)
-}
-
-// FileSize returns an attribute KeyValue conforming to the "file.size"
-// semantic conventions. It represents the file size in bytes.
-func FileSize(val int) attribute.KeyValue {
- return FileSizeKey.Int(val)
-}
-
-// Attributes for Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Attributes for Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// The attributes used to describe telemetry in the context of LLM (Large
-// Language Models) requests and responses.
-const (
- // GenAiCompletionKey is the attribute Key conforming to the
- // "gen_ai.completion" semantic conventions. It represents the full
- // response received from the LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
- // Paris.'}]"
- // Note: It's RECOMMENDED to format completions as JSON string matching
- // [OpenAI messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiCompletionKey = attribute.Key("gen_ai.completion")
-
- // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
- // semantic conventions. It represents the full prompt sent to an LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'user', 'content': 'What is the capital of
- // France?'}]"
- // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
- // messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiPromptKey = attribute.Key("gen_ai.prompt")
-
- // GenAiRequestMaxTokensKey is the attribute Key conforming to the
- // "gen_ai.request.max_tokens" semantic conventions. It represents the
- // maximum number of tokens the LLM generates for a request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
-
- // GenAiRequestModelKey is the attribute Key conforming to the
- // "gen_ai.request.model" semantic conventions. It represents the name of
- // the LLM a request is being made to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4'
- GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
-
- // GenAiRequestTemperatureKey is the attribute Key conforming to the
- // "gen_ai.request.temperature" semantic conventions. It represents the
- // temperature setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0.0
- GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
-
- // GenAiRequestTopPKey is the attribute Key conforming to the
- // "gen_ai.request.top_p" semantic conventions. It represents the top_p
- // sampling setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0
- GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
-
- // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
- // "gen_ai.response.finish_reasons" semantic conventions. It represents the
- // array of reasons the model stopped generating tokens, corresponding to
- // each generation received.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'stop'
- GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
-
- // GenAiResponseIDKey is the attribute Key conforming to the
- // "gen_ai.response.id" semantic conventions. It represents the unique
- // identifier for the completion.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'chatcmpl-123'
- GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
-
- // GenAiResponseModelKey is the attribute Key conforming to the
- // "gen_ai.response.model" semantic conventions. It represents the name of
- // the LLM a response was generated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4-0613'
- GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
-
- // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as
- // identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'openai'
- // Note: The actual GenAI product may differ from the one identified by the
- // client. For example, when using OpenAI client libraries to communicate
- // with Mistral, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge.
- GenAiSystemKey = attribute.Key("gen_ai.system")
-
- // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM response (completion).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 180
- GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
-
- // GenAiUsagePromptTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM prompt.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
-)
-
-var (
- // OpenAI
- GenAiSystemOpenai = GenAiSystemKey.String("openai")
-)
-
-// GenAiCompletion returns an attribute KeyValue conforming to the
-// "gen_ai.completion" semantic conventions. It represents the full response
-// received from the LLM.
-func GenAiCompletion(val string) attribute.KeyValue {
- return GenAiCompletionKey.String(val)
-}
-
-// GenAiPrompt returns an attribute KeyValue conforming to the
-// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
-// an LLM.
-func GenAiPrompt(val string) attribute.KeyValue {
- return GenAiPromptKey.String(val)
-}
-
-// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
-// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
-// number of tokens the LLM generates for a request.
-func GenAiRequestMaxTokens(val int) attribute.KeyValue {
- return GenAiRequestMaxTokensKey.Int(val)
-}
-
-// GenAiRequestModel returns an attribute KeyValue conforming to the
-// "gen_ai.request.model" semantic conventions. It represents the name of the
-// LLM a request is being made to.
-func GenAiRequestModel(val string) attribute.KeyValue {
- return GenAiRequestModelKey.String(val)
-}
-
-// GenAiRequestTemperature returns an attribute KeyValue conforming to the
-// "gen_ai.request.temperature" semantic conventions. It represents the
-// temperature setting for the LLM request.
-func GenAiRequestTemperature(val float64) attribute.KeyValue {
- return GenAiRequestTemperatureKey.Float64(val)
-}
-
-// GenAiRequestTopP returns an attribute KeyValue conforming to the
-// "gen_ai.request.top_p" semantic conventions. It represents the top_p
-// sampling setting for the LLM request.
-func GenAiRequestTopP(val float64) attribute.KeyValue {
- return GenAiRequestTopPKey.Float64(val)
-}
-
-// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
-// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
-// array of reasons the model stopped generating tokens, corresponding to each
-// generation received.
-func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
- return GenAiResponseFinishReasonsKey.StringSlice(val)
-}
-
-// GenAiResponseID returns an attribute KeyValue conforming to the
-// "gen_ai.response.id" semantic conventions. It represents the unique
-// identifier for the completion.
-func GenAiResponseID(val string) attribute.KeyValue {
- return GenAiResponseIDKey.String(val)
-}
-
-// GenAiResponseModel returns an attribute KeyValue conforming to the
-// "gen_ai.response.model" semantic conventions. It represents the name of the
-// LLM a response was generated from.
-func GenAiResponseModel(val string) attribute.KeyValue {
- return GenAiResponseModelKey.String(val)
-}
-
-// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
-// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
-// number of tokens used in the LLM response (completion).
-func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
- return GenAiUsageCompletionTokensKey.Int(val)
-}
-
-// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
-// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
-// of tokens used in the LLM prompt.
-func GenAiUsagePromptTokens(val int) attribute.KeyValue {
- return GenAiUsagePromptTokensKey.Int(val)
-}
-
-// Attributes for GraphQL.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// Attributes for the Android platform on which the Android application is
-// running.
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1', 'r1p1'
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val string) attribute.KeyValue {
- return HostCPUSteppingKey.String(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPConnectionStateKey is the attribute Key conforming to the
- // "http.connection.state" semantic conventions. It represents the state of
- // the HTTP connection in the HTTP connection pool.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'active', 'idle'
- HTTPConnectionStateKey = attribute.Key("http.connection.state")
-
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPRequestSizeKey is the attribute Key conforming to the
- // "http.request.size" semantic conventions. It represents the total size
- // of the request in bytes. This should be the total number of bytes sent
- // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
- // and HTTP/3), headers, and request body if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPRequestSizeKey = attribute.Key("http.request.size")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseSizeKey is the attribute Key conforming to the
- // "http.response.size" semantic conventions. It represents the total size
- // of the response in bytes. This should be the total number of bytes sent
- // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
- // HTTP/3), headers, and response body and trailers if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPResponseSizeKey = attribute.Key("http.response.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // active state
- HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
- // idle state
- HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPRequestSize returns an attribute KeyValue conforming to the
-// "http.request.size" semantic conventions. It represents the total size of
-// the request in bytes. This should be the total number of bytes sent over the
-// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and request body if any.
-func HTTPRequestSize(val int) attribute.KeyValue {
- return HTTPRequestSizeKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseSize returns an attribute KeyValue conforming to the
-// "http.response.size" semantic conventions. It represents the total size of
-// the response in bytes. This should be the total number of bytes sent over
-// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and response body and trailers if any.
-func HTTPResponseSize(val int) attribute.KeyValue {
- return HTTPResponseSizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Java Virtual machine related attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-
- // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
- // semantic conventions. It represents the name of the garbage collector
- // action.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'end of minor GC', 'end of major GC'
- // Note: Garbage collector action is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
- JvmGcActionKey = attribute.Key("jvm.gc.action")
-
- // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
- // semantic conventions. It represents the name of the garbage collector.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Young Generation', 'G1 Old Generation'
- // Note: Garbage collector name is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
- JvmGcNameKey = attribute.Key("jvm.gc.name")
-
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-
- // JvmThreadDaemonKey is the attribute Key conforming to the
- // "jvm.thread.daemon" semantic conventions. It represents the whether the
- // thread is daemon or not.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
-
- // JvmThreadStateKey is the attribute Key conforming to the
- // "jvm.thread.state" semantic conventions. It represents the state of the
- // thread.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'runnable', 'blocked'
- JvmThreadStateKey = attribute.Key("jvm.thread.state")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-var (
- // A thread that has not yet started is in this state
- JvmThreadStateNew = JvmThreadStateKey.String("new")
- // A thread executing in the Java virtual machine is in this state
- JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
- // A thread that is blocked waiting for a monitor lock is in this state
- JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
- // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
- JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
- // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
- JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
- // A thread that has exited is in this state
- JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// JvmGcAction returns an attribute KeyValue conforming to the
-// "jvm.gc.action" semantic conventions. It represents the name of the garbage
-// collector action.
-func JvmGcAction(val string) attribute.KeyValue {
- return JvmGcActionKey.String(val)
-}
-
-// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
-// semantic conventions. It represents the name of the garbage collector.
-func JvmGcName(val string) attribute.KeyValue {
- return JvmGcNameKey.String(val)
-}
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// JvmThreadDaemon returns an attribute KeyValue conforming to the
-// "jvm.thread.daemon" semantic conventions. It represents the whether the
-// thread is daemon or not.
-func JvmThreadDaemon(val bool) attribute.KeyValue {
- return JvmThreadDaemonKey.Bool(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
- // conforming to the "k8s.container.status.last_terminated_reason" semantic
- // conventions. It represents the last terminated reason of the Container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Evicted', 'Error'
- K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
-// conforming to the "k8s.container.status.last_terminated_reason" semantic
-// conventions. It represents the last terminated reason of the Container.
-func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
- return K8SContainerStatusLastTerminatedReasonKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// Attributes for a file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// The generic attributes that may be used in any Log Record.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client.id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client.id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationPartitionIDKey is the attribute Key conforming to
- // the "messaging.destination.partition.id" semantic conventions. It
- // represents the identifier of the partition messages are sent to or
- // received from, unique within the `messaging.destination.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1'
- MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationNameKey is the attribute Key conforming to the
- // "messaging.operation.name" semantic conventions. It represents the
- // system-specific name of the messaging operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack', 'nack', 'send'
- MessagingOperationNameKey = attribute.Key("messaging.operation.name")
-
- // MessagingOperationTypeKey is the attribute Key conforming to the
- // "messaging.operation.type" semantic conventions. It represents a string
- // identifying the type of the messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents the messaging
- // system as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual messaging system may differ from the one known by the
- // client. For example, when using Kafka client libraries to communicate
- // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
- // the instrumentation's best knowledge.
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
- // One or more messages are delivered to or processed by a consumer
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
- // One or more messages are settled
- MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
- // Azure Event Hubs
- MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
- // Azure Service Bus
- MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client.id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationPartitionID returns an attribute KeyValue conforming
-// to the "messaging.destination.partition.id" semantic conventions. It
-// represents the identifier of the partition messages are sent to or received
-// from, unique within the `messaging.destination.name`.
-func MessagingDestinationPartitionID(val string) attribute.KeyValue {
- return MessagingDestinationPartitionIDKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingOperationName returns an attribute KeyValue conforming to the
-// "messaging.operation.name" semantic conventions. It represents the
-// system-specific name of the messaging operation.
-func MessagingOperationName(val string) attribute.KeyValue {
- return MessagingOperationNameKey.String(val)
-}
-
-// This group describes attributes specific to Apache Kafka.
-const (
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// This group describes attributes specific to RabbitMQ.
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
- // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
- // It represents the rabbitMQ message delivery tag
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 123
- MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
-// conventions. It represents the rabbitMQ message delivery tag
-func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
- return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
-}
-
-// This group describes attributes specific to RocketMQ.
-const (
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// This group describes attributes specific to GCP Pub/Sub.
-const (
- // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
- // It represents the ack deadline in seconds set for the modify ack
- // deadline request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
-
- // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
- // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
- // represents the ack id for a given message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack_id'
- MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
-
- // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
- // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
- // semantic conventions. It represents the delivery attempt for a given
- // message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-)
-
-// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
-// conventions. It represents the ack deadline in seconds set for the modify
-// ack deadline request.
-func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
-// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
-// represents the ack id for a given message.
-func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckIDKey.String(val)
-}
-
-// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
-// conventions. It represents the delivery attempt for a given message.
-func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// This group describes attributes specific to Azure Service Bus.
-const (
- // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
- // conforming to the "messaging.servicebus.destination.subscription_name"
- // semantic conventions. It represents the name of the subscription in the
- // topic messages are received from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mySubscription'
- MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
-
- // MessagingServicebusDispositionStatusKey is the attribute Key conforming
- // to the "messaging.servicebus.disposition_status" semantic conventions.
- // It represents the describes the [settlement
- // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
-
- // MessagingServicebusMessageDeliveryCountKey is the attribute Key
- // conforming to the "messaging.servicebus.message.delivery_count" semantic
- // conventions. It represents the number of deliveries that have been
- // attempted for this message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
-
- // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
- // conforming to the "messaging.servicebus.message.enqueued_time" semantic
- // conventions. It represents the UTC epoch seconds at which the message
- // has been accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
-)
-
-var (
- // Message is completed
- MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
- // Message is abandoned
- MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
- // Message is sent to dead letter queue
- MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
- // Message is deferred
- MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
-)
-
-// MessagingServicebusDestinationSubscriptionName returns an attribute
-// KeyValue conforming to the
-// "messaging.servicebus.destination.subscription_name" semantic conventions.
-// It represents the name of the subscription in the topic messages are
-// received from.
-func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
- return MessagingServicebusDestinationSubscriptionNameKey.String(val)
-}
-
-// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.delivery_count" semantic
-// conventions. It represents the number of deliveries that have been attempted
-// for this message.
-func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
- return MessagingServicebusMessageDeliveryCountKey.Int(val)
-}
-
-// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
-}
-
-// This group describes attributes specific to Azure Event Hubs.
-const (
- // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
- // the "messaging.eventhubs.consumer.group" semantic conventions. It
- // represents the name of the consumer group the event consumer is
- // associated with.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'indexer'
- MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
-
- // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
- // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
- // It represents the UTC epoch seconds at which the message has been
- // accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
-)
-
-// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
-// to the "messaging.eventhubs.consumer.group" semantic conventions. It
-// represents the name of the consumer group the event consumer is associated
-// with.
-func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
- return MessagingEventhubsConsumerGroupKey.String(val)
-}
-
-// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // actual version of the protocol used for network communication.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.1', '2'
- // Note: If protocol version is subject to negotiation (for example using
- // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
- // SHOULD be set to the negotiated version. If the actual protocol version
- // is not known, this attribute SHOULD NOT be set.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the actual
-// version of the protocol used for network communication.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// Attributes used by the OpenTracing Shim layer.
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// Attributes reserved for OpenTelemetry
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessContextSwitchTypeKey is the attribute Key conforming to the
- // "process.context_switch_type" semantic conventions. It represents the
- // specifies whether the context switches for this data point were
- // voluntary or involuntary.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
-
- // ProcessCreationTimeKey is the attribute Key conforming to the
- // "process.creation.time" semantic conventions. It represents the date and
- // time the process was created, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:25:34.853Z'
- ProcessCreationTimeKey = attribute.Key("process.creation.time")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessExitCodeKey is the attribute Key conforming to the
- // "process.exit.code" semantic conventions. It represents the exit code of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 127
- ProcessExitCodeKey = attribute.Key("process.exit.code")
-
- // ProcessExitTimeKey is the attribute Key conforming to the
- // "process.exit.time" semantic conventions. It represents the date and
- // time the process exited, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:26:12.315Z'
- ProcessExitTimeKey = attribute.Key("process.exit.time")
-
- // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
- // "process.group_leader.pid" semantic conventions. It represents the PID
- // of the process's group leader. This is also the process group ID (PGID)
- // of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 23
- ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
-
- // ProcessInteractiveKey is the attribute Key conforming to the
- // "process.interactive" semantic conventions. It represents the whether
- // the process is connected to an interactive shell.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessInteractiveKey = attribute.Key("process.interactive")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessPagingFaultTypeKey is the attribute Key conforming to the
- // "process.paging.fault_type" semantic conventions. It represents the type
- // of page fault for this data point. Type `major` is for major/hard page
- // faults, and `minor` is for minor/soft page faults.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRealUserIDKey is the attribute Key conforming to the
- // "process.real_user.id" semantic conventions. It represents the real user
- // ID (RUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000
- ProcessRealUserIDKey = attribute.Key("process.real_user.id")
-
- // ProcessRealUserNameKey is the attribute Key conforming to the
- // "process.real_user.name" semantic conventions. It represents the
- // username of the real user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessRealUserNameKey = attribute.Key("process.real_user.name")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessSavedUserIDKey is the attribute Key conforming to the
- // "process.saved_user.id" semantic conventions. It represents the saved
- // user ID (SUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1002
- ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
-
- // ProcessSavedUserNameKey is the attribute Key conforming to the
- // "process.saved_user.name" semantic conventions. It represents the
- // username of the saved user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
-
- // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
- // "process.session_leader.pid" semantic conventions. It represents the PID
- // of the process's session leader. This is also the session ID (SID) of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 14
- ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
-
- // ProcessUserIDKey is the attribute Key conforming to the
- // "process.user.id" semantic conventions. It represents the effective user
- // ID (EUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1001
- ProcessUserIDKey = attribute.Key("process.user.id")
-
- // ProcessUserNameKey is the attribute Key conforming to the
- // "process.user.name" semantic conventions. It represents the username of
- // the effective user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessUserNameKey = attribute.Key("process.user.name")
-
- // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
- // semantic conventions. It represents the virtual process identifier.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12
- // Note: The process ID within a PID namespace. This is not necessarily
- // unique across all processes on the host but it is unique within the
- // process namespace that the process exists within.
- ProcessVpidKey = attribute.Key("process.vpid")
-)
-
-var (
- // voluntary
- ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
- // involuntary
- ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
-)
-
-var (
- // major
- ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
- // minor
- ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCreationTime returns an attribute KeyValue conforming to the
-// "process.creation.time" semantic conventions. It represents the date and
-// time the process was created, in ISO 8601 format.
-func ProcessCreationTime(val string) attribute.KeyValue {
- return ProcessCreationTimeKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessExitCode returns an attribute KeyValue conforming to the
-// "process.exit.code" semantic conventions. It represents the exit code of the
-// process.
-func ProcessExitCode(val int) attribute.KeyValue {
- return ProcessExitCodeKey.Int(val)
-}
-
-// ProcessExitTime returns an attribute KeyValue conforming to the
-// "process.exit.time" semantic conventions. It represents the date and time
-// the process exited, in ISO 8601 format.
-func ProcessExitTime(val string) attribute.KeyValue {
- return ProcessExitTimeKey.String(val)
-}
-
-// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
-// "process.group_leader.pid" semantic conventions. It represents the PID of
-// the process's group leader. This is also the process group ID (PGID) of the
-// process.
-func ProcessGroupLeaderPID(val int) attribute.KeyValue {
- return ProcessGroupLeaderPIDKey.Int(val)
-}
-
-// ProcessInteractive returns an attribute KeyValue conforming to the
-// "process.interactive" semantic conventions. It represents the whether the
-// process is connected to an interactive shell.
-func ProcessInteractive(val bool) attribute.KeyValue {
- return ProcessInteractiveKey.Bool(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRealUserID returns an attribute KeyValue conforming to the
-// "process.real_user.id" semantic conventions. It represents the real user ID
-// (RUID) of the process.
-func ProcessRealUserID(val int) attribute.KeyValue {
- return ProcessRealUserIDKey.Int(val)
-}
-
-// ProcessRealUserName returns an attribute KeyValue conforming to the
-// "process.real_user.name" semantic conventions. It represents the username of
-// the real user of the process.
-func ProcessRealUserName(val string) attribute.KeyValue {
- return ProcessRealUserNameKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessSavedUserID returns an attribute KeyValue conforming to the
-// "process.saved_user.id" semantic conventions. It represents the saved user
-// ID (SUID) of the process.
-func ProcessSavedUserID(val int) attribute.KeyValue {
- return ProcessSavedUserIDKey.Int(val)
-}
-
-// ProcessSavedUserName returns an attribute KeyValue conforming to the
-// "process.saved_user.name" semantic conventions. It represents the username
-// of the saved user.
-func ProcessSavedUserName(val string) attribute.KeyValue {
- return ProcessSavedUserNameKey.String(val)
-}
-
-// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
-// "process.session_leader.pid" semantic conventions. It represents the PID of
-// the process's session leader. This is also the session ID (SID) of the
-// process.
-func ProcessSessionLeaderPID(val int) attribute.KeyValue {
- return ProcessSessionLeaderPIDKey.Int(val)
-}
-
-// ProcessUserID returns an attribute KeyValue conforming to the
-// "process.user.id" semantic conventions. It represents the effective user ID
-// (EUID) of the process.
-func ProcessUserID(val int) attribute.KeyValue {
- return ProcessUserIDKey.Int(val)
-}
-
-// ProcessUserName returns an attribute KeyValue conforming to the
-// "process.user.name" semantic conventions. It represents the username of the
-// effective user of the process.
-func ProcessUserName(val string) attribute.KeyValue {
- return ProcessUserNameKey.String(val)
-}
-
-// ProcessVpid returns an attribute KeyValue conforming to the
-// "process.vpid" semantic conventions. It represents the virtual process
-// identifier.
-func ProcessVpid(val int) attribute.KeyValue {
- return ProcessVpidKey.Int(val)
-}
-
-// Attributes for process CPU
-const (
- // ProcessCPUStateKey is the attribute Key conforming to the
- // "process.cpu.state" semantic conventions. It represents the CPU state of
- // the process.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessCPUStateKey = attribute.Key("process.cpu.state")
-)
-
-var (
- // system
- ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
- // user
- ProcessCPUStateUser = ProcessCPUStateKey.String("user")
- // wait
- ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
-)
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMessageCompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
-
- // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- RPCMessageIDKey = attribute.Key("rpc.message.id")
-
- // RPCMessageTypeKey is the attribute Key conforming to the
- // "rpc.message.type" semantic conventions. It represents the whether this
- // is a received or sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageTypeKey = attribute.Key("rpc.message.type")
-
- // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // sent
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- // received
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
-// "rpc.message.compressed_size" semantic conventions. It represents the
-// compressed size of the message in bytes.
-func RPCMessageCompressedSize(val int) attribute.KeyValue {
- return RPCMessageCompressedSizeKey.Int(val)
-}
-
-// RPCMessageID returns an attribute KeyValue conforming to the
-// "rpc.message.id" semantic conventions. It represents the mUST be calculated
-// as two different counters starting from `1` one for sent messages and one
-// for received message.
-func RPCMessageID(val int) attribute.KeyValue {
- return RPCMessageIDKey.Int(val)
-}
-
-// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
-// the "rpc.message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func RPCMessageUncompressedSize(val int) attribute.KeyValue {
- return RPCMessageUncompressedSizeKey.Int(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to
- // distinguish instances of the same service that exist at the same time
- // (e.g. instances of a horizontally scaled
- // service).
- //
- // Implementations, such as SDKs, are recommended to generate a random
- // Version 1 or Version 4 [RFC
- // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
- // inherent unique ID as the source of
- // this value if stability is desirable. In that case, the ID SHOULD be
- // used as source of a UUID Version 5 and
- // SHOULD use the following UUID as the namespace:
- // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
- //
- // UUIDs are typically recommended, as only an opaque value for the
- // purposes of identifying a service instance is
- // needed. Similar to what can be seen in the man page for the
- // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
- // file, the underlying
- // data, such as pod name and namespace should be treated as confidential,
- // being the user's choice to expose it
- // or not via another resource attribute.
- //
- // For applications running behind an application server (like unicorn), we
- // do not recommend using one identifier
- // for all processes participating in the application. Instead, it's
- // recommended each division (e.g. a worker
- // thread in unicorn) to have its own instance.id.
- //
- // It's not recommended for a Collector to set `service.instance.id` if it
- // can't unambiguously determine the
- // service instance that is generating that telemetry. For instance,
- // creating an UUID based on `pod.name` will
- // likely be wrong, as the Collector might not know from which container
- // within that pod the telemetry originated.
- // However, Collectors can set the `service.instance.id` if they can
- // unambiguously determine the service instance
- // for that telemetry. This is typically the case for scraping receivers,
- // as they know the target address and
- // port.
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
- // `process.executable.name` is not available, the value MUST be set to
- // `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Describes System attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process attributes
-const (
- // SystemProcessStatusKey is the attribute Key conforming to the
- // "system.process.status" semantic conventions. It represents the process
- // state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessStatusKey = attribute.Key("system.process.status")
-)
-
-var (
- // running
- SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
- // sleeping
- SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
- // stopped
- SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
- // defunct
- SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
-)
-
-// Attributes for telemetry SDK.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLDomainKey is the attribute Key conforming to the "url.domain"
- // semantic conventions. It represents the domain extracted from the
- // `url.full`, such as "opentelemetry.io".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
- // '[1080:0:0:0:8:800:200C:417A]'
- // Note: In some cases a URL may refer to an IP and/or port directly,
- // without a domain name. In this case, the IP address would go to the
- // domain field. If the URL contains a [literal IPv6
- // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
- // `[` and `]`, the `[` and `]` characters should also be captured in the
- // domain field.
- URLDomainKey = attribute.Key("url.domain")
-
- // URLExtensionKey is the attribute Key conforming to the "url.extension"
- // semantic conventions. It represents the file extension extracted from
- // the `url.full`, excluding the leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: The file extension is only set if it exists, as not every url has
- // a file extension. When the file name has multiple extensions
- // `example.tar.gz`, only the last one should be captured `gz`, not
- // `tar.gz`.
- URLExtensionKey = attribute.Key("url.extension")
-
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed). Sensitive content provided in `url.full` SHOULD be
- // scrubbed when instrumentations can identify it.
- URLFullKey = attribute.Key("url.full")
-
- // URLOriginalKey is the attribute Key conforming to the "url.original"
- // semantic conventions. It represents the unmodified original URL as seen
- // in the event source.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // 'search?q=OpenTelemetry'
- // Note: In network monitoring, the observed URL may be a full URL, whereas
- // in access logs, the URL is often just represented as a path. This field
- // is meant to represent the URL as it was observed, complete or not.
- // `url.original` might contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case password and
- // username SHOULD NOT be redacted and attribute's value SHOULD remain the
- // same.
- URLOriginalKey = attribute.Key("url.original")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLPathKey = attribute.Key("url.path")
-
- // URLPortKey is the attribute Key conforming to the "url.port" semantic
- // conventions. It represents the port extracted from the `url.full`
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 443
- URLPortKey = attribute.Key("url.port")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLRegisteredDomainKey is the attribute Key conforming to the
- // "url.registered_domain" semantic conventions. It represents the highest
- // registered url domain, stripped of the subdomain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.com', 'foo.co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org). For example, the registered domain for
- // `foo.example.com` is `example.com`. Trying to approximate this by simply
- // taking the last two labels will not work well for TLDs such as `co.uk`.
- URLRegisteredDomainKey = attribute.Key("url.registered_domain")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-
- // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
- // semantic conventions. It represents the subdomain portion of a fully
- // qualified domain name includes all of the names except the host name
- // under the registered_domain. In a partially qualified domain, or if the
- // qualification level of the full name cannot be determined, subdomain
- // contains all of the names below the registered domain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'east', 'sub2.sub1'
- // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
- // the domain has multiple levels of subdomain, such as
- // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
- // with no trailing period.
- URLSubdomainKey = attribute.Key("url.subdomain")
-
- // URLTemplateKey is the attribute Key conforming to the "url.template"
- // semantic conventions. It represents the low-cardinality template of an
- // [absolute path
- // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
- URLTemplateKey = attribute.Key("url.template")
-
- // URLTopLevelDomainKey is the attribute Key conforming to the
- // "url.top_level_domain" semantic conventions. It represents the effective
- // top level domain (eTLD), also known as the domain suffix, is the last
- // part of the domain name. For example, the top level domain for
- // example.com is `com`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com', 'co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org).
- URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
-)
-
-// URLDomain returns an attribute KeyValue conforming to the "url.domain"
-// semantic conventions. It represents the domain extracted from the
-// `url.full`, such as "opentelemetry.io".
-func URLDomain(val string) attribute.KeyValue {
- return URLDomainKey.String(val)
-}
-
-// URLExtension returns an attribute KeyValue conforming to the
-// "url.extension" semantic conventions. It represents the file extension
-// extracted from the `url.full`, excluding the leading dot.
-func URLExtension(val string) attribute.KeyValue {
- return URLExtensionKey.String(val)
-}
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLOriginal returns an attribute KeyValue conforming to the
-// "url.original" semantic conventions. It represents the unmodified original
-// URL as seen in the event source.
-func URLOriginal(val string) attribute.KeyValue {
- return URLOriginalKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLPort returns an attribute KeyValue conforming to the "url.port"
-// semantic conventions. It represents the port extracted from the `url.full`
-func URLPort(val int) attribute.KeyValue {
- return URLPortKey.Int(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLRegisteredDomain returns an attribute KeyValue conforming to the
-// "url.registered_domain" semantic conventions. It represents the highest
-// registered url domain, stripped of the subdomain.
-func URLRegisteredDomain(val string) attribute.KeyValue {
- return URLRegisteredDomainKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// URLSubdomain returns an attribute KeyValue conforming to the
-// "url.subdomain" semantic conventions. It represents the subdomain portion of
-// a fully qualified domain name includes all of the names except the host name
-// under the registered_domain. In a partially qualified domain, or if the
-// qualification level of the full name cannot be determined, subdomain
-// contains all of the names below the registered domain.
-func URLSubdomain(val string) attribute.KeyValue {
- return URLSubdomainKey.String(val)
-}
-
-// URLTemplate returns an attribute KeyValue conforming to the
-// "url.template" semantic conventions. It represents the low-cardinality
-// template of an [absolute path
-// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
-func URLTemplate(val string) attribute.KeyValue {
- return URLTemplateKey.String(val)
-}
-
-// URLTopLevelDomain returns an attribute KeyValue conforming to the
-// "url.top_level_domain" semantic conventions. It represents the effective top
-// level domain (eTLD), also known as the domain suffix, is the last part of
-// the domain name. For example, the top level domain for example.com is `com`.
-func URLTopLevelDomain(val string) attribute.KeyValue {
- return URLTopLevelDomainKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentNameKey is the attribute Key conforming to the
- // "user_agent.name" semantic conventions. It represents the name of the
- // user-agent extracted from original. Usually refers to the browser's
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Safari', 'YourApp'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
- // from original string. In the case of using a user-agent for non-browser
- // products, such as microservices with multiple names/versions inside the
- // `user_agent.original`, the most significant name SHOULD be selected. In
- // such a scenario it should align with `user_agent.version`
- UserAgentNameKey = attribute.Key("user_agent.name")
-
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
- // grpc-java-okhttp/1.27.2'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-
- // UserAgentVersionKey is the attribute Key conforming to the
- // "user_agent.version" semantic conventions. It represents the version of
- // the user-agent extracted from original. Usually refers to the browser's
- // version
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.1.2', '1.0.0'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's
- // version from original string. In the case of using a user-agent for
- // non-browser products, such as microservices with multiple names/versions
- // inside the `user_agent.original`, the most significant version SHOULD be
- // selected. In such a scenario it should align with `user_agent.name`
- UserAgentVersionKey = attribute.Key("user_agent.version")
-)
-
-// UserAgentName returns an attribute KeyValue conforming to the
-// "user_agent.name" semantic conventions. It represents the name of the
-// user-agent extracted from original. Usually refers to the browser's name.
-func UserAgentName(val string) attribute.KeyValue {
- return UserAgentNameKey.String(val)
-}
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// UserAgentVersion returns an attribute KeyValue conforming to the
-// "user_agent.version" semantic conventions. It represents the version of the
-// user-agent extracted from original. Usually refers to the browser's version
-func UserAgentVersion(val string) attribute.KeyValue {
- return UserAgentVersionKey.String(val)
-}
-
-// The attributes used to describe the packaged software running the
-// application code.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
deleted file mode 100644
index d031bbea784..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.26.0
-// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
deleted file mode 100644
index bfaee0d56e3..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
deleted file mode 100644
index fcdb9f48596..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
-
- // ContainerCPUTime is the metric conforming to the "container.cpu.time"
- // semantic conventions. It represents the total CPU time consumed.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ContainerCPUTimeName = "container.cpu.time"
- ContainerCPUTimeUnit = "s"
- ContainerCPUTimeDescription = "Total CPU time consumed"
-
- // ContainerMemoryUsage is the metric conforming to the
- // "container.memory.usage" semantic conventions. It represents the memory
- // usage of the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerMemoryUsageName = "container.memory.usage"
- ContainerMemoryUsageUnit = "By"
- ContainerMemoryUsageDescription = "Memory usage of the container."
-
- // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
- // conventions. It represents the disk bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerDiskIoName = "container.disk.io"
- ContainerDiskIoUnit = "By"
- ContainerDiskIoDescription = "Disk bytes for the container."
-
- // ContainerNetworkIo is the metric conforming to the "container.network.io"
- // semantic conventions. It represents the network bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerNetworkIoName = "container.network.io"
- ContainerNetworkIoUnit = "By"
- ContainerNetworkIoDescription = "Network bytes for the container."
-
- // DBClientOperationDuration is the metric conforming to the
- // "db.client.operation.duration" semantic conventions. It represents the
- // duration of database client operations.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientOperationDurationName = "db.client.operation.duration"
- DBClientOperationDurationUnit = "s"
- DBClientOperationDurationDescription = "Duration of database client operations."
-
- // DBClientConnectionCount is the metric conforming to the
- // "db.client.connection.count" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionCountName = "db.client.connection.count"
- DBClientConnectionCountUnit = "{connection}"
- DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionIdleMax is the metric conforming to the
- // "db.client.connection.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
- DBClientConnectionIdleMaxUnit = "{connection}"
- DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionIdleMin is the metric conforming to the
- // "db.client.connection.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMinName = "db.client.connection.idle.min"
- DBClientConnectionIdleMinUnit = "{connection}"
- DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionMax is the metric conforming to the
- // "db.client.connection.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionMaxName = "db.client.connection.max"
- DBClientConnectionMaxUnit = "{connection}"
- DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionPendingRequests is the metric conforming to the
- // "db.client.connection.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
- DBClientConnectionPendingRequestsUnit = "{request}"
- DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionTimeouts is the metric conforming to the
- // "db.client.connection.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
- DBClientConnectionTimeoutsUnit = "{timeout}"
- DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionCreateTime is the metric conforming to the
- // "db.client.connection.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionCreateTimeName = "db.client.connection.create_time"
- DBClientConnectionCreateTimeUnit = "s"
- DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionWaitTime is the metric conforming to the
- // "db.client.connection.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
- DBClientConnectionWaitTimeUnit = "s"
- DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionUseTime is the metric conforming to the
- // "db.client.connection.use_time" semantic conventions. It represents the time
- // between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionUseTimeName = "db.client.connection.use_time"
- DBClientConnectionUseTimeUnit = "s"
- DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the
- // deprecated, use `db.client.connection.count` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.min` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the deprecated, use `db.client.connection.pending_requests` instead.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // deprecated, use `db.client.connection.timeouts` instead.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.create_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
- // changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Stable
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Stable
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Stable
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Stable
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingProcessDuration is the metric conforming to the
- // "messaging.process.duration" semantic conventions. It represents the
- // measures the duration of process operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingProcessDurationName = "messaging.process.duration"
- MessagingProcessDurationUnit = "s"
- MessagingProcessDurationDescription = "Measures the duration of process operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingProcessMessages is the metric conforming to the
- // "messaging.process.messages" semantic conventions. It represents the
- // measures the number of processed messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingProcessMessagesName = "messaging.process.messages"
- MessagingProcessMessagesUnit = "{message}"
- MessagingProcessMessagesDescription = "Measures the number of processed messages."
-
- // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
- // conventions. It represents the total CPU seconds broken down by different
- // states.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ProcessCPUTimeName = "process.cpu.time"
- ProcessCPUTimeUnit = "s"
- ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
-
- // ProcessCPUUtilization is the metric conforming to the
- // "process.cpu.utilization" semantic conventions. It represents the difference
- // in process.cpu.time since the last measurement, divided by the elapsed time
- // and number of CPUs available to the process.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- ProcessCPUUtilizationName = "process.cpu.utilization"
- ProcessCPUUtilizationUnit = "1"
- ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
-
- // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
- // semantic conventions. It represents the amount of physical memory in use.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryUsageName = "process.memory.usage"
- ProcessMemoryUsageUnit = "By"
- ProcessMemoryUsageDescription = "The amount of physical memory in use."
-
- // ProcessMemoryVirtual is the metric conforming to the
- // "process.memory.virtual" semantic conventions. It represents the amount of
- // committed virtual memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryVirtualName = "process.memory.virtual"
- ProcessMemoryVirtualUnit = "By"
- ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
-
- // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
- // conventions. It represents the disk bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessDiskIoName = "process.disk.io"
- ProcessDiskIoUnit = "By"
- ProcessDiskIoDescription = "Disk bytes transferred."
-
- // ProcessNetworkIo is the metric conforming to the "process.network.io"
- // semantic conventions. It represents the network bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessNetworkIoName = "process.network.io"
- ProcessNetworkIoUnit = "By"
- ProcessNetworkIoDescription = "Network bytes transferred."
-
- // ProcessThreadCount is the metric conforming to the "process.thread.count"
- // semantic conventions. It represents the process threads count.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Experimental
- ProcessThreadCountName = "process.thread.count"
- ProcessThreadCountUnit = "{thread}"
- ProcessThreadCountDescription = "Process threads count."
-
- // ProcessOpenFileDescriptorCount is the metric conforming to the
- // "process.open_file_descriptor.count" semantic conventions. It represents the
- // number of file descriptors in use by the process.
- // Instrument: updowncounter
- // Unit: {count}
- // Stability: Experimental
- ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
- ProcessOpenFileDescriptorCountUnit = "{count}"
- ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
-
- // ProcessContextSwitches is the metric conforming to the
- // "process.context_switches" semantic conventions. It represents the number of
- // times the process has been context switched.
- // Instrument: counter
- // Unit: {count}
- // Stability: Experimental
- ProcessContextSwitchesName = "process.context_switches"
- ProcessContextSwitchesUnit = "{count}"
- ProcessContextSwitchesDescription = "Number of times the process has been context switched."
-
- // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
- // semantic conventions. It represents the number of page faults the process
- // has made.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- ProcessPagingFaultsName = "process.paging.faults"
- ProcessPagingFaultsUnit = "{fault}"
- ProcessPagingFaultsDescription = "Number of page faults the process has made."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryShared is the metric conforming to the "system.memory.shared"
- // semantic conventions. It represents the shared memory used (mostly by
- // tmpfs).
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemorySharedName = "system.memory.shared"
- SystemMemorySharedUnit = "By"
- SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessCount is the metric conforming to the "system.process.count"
- // semantic conventions. It represents the total number of processes in each
- // state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCountName = "system.process.count"
- SystemProcessCountUnit = "{process}"
- SystemProcessCountDescription = "Total number of processes in each state"
-
- // SystemProcessCreated is the metric conforming to the
- // "system.process.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCreatedName = "system.process.created"
- SystemProcessCreatedUnit = "{process}"
- SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
deleted file mode 100644
index 4c87c7adcc7..00000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
new file mode 100644
index 00000000000..55bde895ddd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
@@ -0,0 +1,1641 @@
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package httpconv provides types and functionality for OpenTelemetry semantic
+// conventions in the "http" namespace.
+package httpconv
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+var (
+ addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
+ recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
+)
+
+// ErrorTypeAttr is an attribute conforming to the error.type semantic
+// conventions. It represents the describes a class of error the operation ended
+// with.
+type ErrorTypeAttr string
+
+var (
+ // ErrorTypeOther is a fallback error value to be used when the instrumentation
+ // doesn't define a custom value.
+ ErrorTypeOther ErrorTypeAttr = "_OTHER"
+)
+
+// ConnectionStateAttr is an attribute conforming to the http.connection.state
+// semantic conventions. It represents the state of the HTTP connection in the
+// HTTP connection pool.
+type ConnectionStateAttr string
+
+var (
+ // ConnectionStateActive is the active state.
+ ConnectionStateActive ConnectionStateAttr = "active"
+ // ConnectionStateIdle is the idle state.
+ ConnectionStateIdle ConnectionStateAttr = "idle"
+)
+
+// RequestMethodAttr is an attribute conforming to the http.request.method
+// semantic conventions. It represents the HTTP request method.
+type RequestMethodAttr string
+
+var (
+ // RequestMethodConnect is the CONNECT method.
+ RequestMethodConnect RequestMethodAttr = "CONNECT"
+ // RequestMethodDelete is the DELETE method.
+ RequestMethodDelete RequestMethodAttr = "DELETE"
+ // RequestMethodGet is the GET method.
+ RequestMethodGet RequestMethodAttr = "GET"
+ // RequestMethodHead is the HEAD method.
+ RequestMethodHead RequestMethodAttr = "HEAD"
+ // RequestMethodOptions is the OPTIONS method.
+ RequestMethodOptions RequestMethodAttr = "OPTIONS"
+ // RequestMethodPatch is the PATCH method.
+ RequestMethodPatch RequestMethodAttr = "PATCH"
+ // RequestMethodPost is the POST method.
+ RequestMethodPost RequestMethodAttr = "POST"
+ // RequestMethodPut is the PUT method.
+ RequestMethodPut RequestMethodAttr = "PUT"
+ // RequestMethodTrace is the TRACE method.
+ RequestMethodTrace RequestMethodAttr = "TRACE"
+ // RequestMethodOther is the any HTTP method that the instrumentation has no
+ // prior knowledge of.
+ RequestMethodOther RequestMethodAttr = "_OTHER"
+)
+
+// UserAgentSyntheticTypeAttr is an attribute conforming to the
+// user_agent.synthetic.type semantic conventions. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+type UserAgentSyntheticTypeAttr string
+
+var (
+ // UserAgentSyntheticTypeBot is the bot source.
+ UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot"
+ // UserAgentSyntheticTypeTest is the synthetic test source.
+ UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test"
+)
+
+// ClientActiveRequests is an instrument used to record metric values conforming
+// to the "http.client.active_requests" semantic conventions. It represents the
+// number of active HTTP requests.
+type ClientActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientActiveRequests returns a new ClientActiveRequests instrument.
+func NewClientActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientActiveRequests) Name() string {
+ return "http.client.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientActiveRequests) Description() string {
+ return "Number of active HTTP requests."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrRequestMethod returns an optional attribute for the "http.request.method"
+// semantic convention. It represents the HTTP request method.
+func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue {
+ return attribute.String("http.request.method", string(val))
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientConnectionDuration is an instrument used to record metric values
+// conforming to the "http.client.connection.duration" semantic conventions. It
+// represents the duration of the successfully established outbound HTTP
+// connections.
+type ClientConnectionDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
+func NewClientConnectionDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientConnectionDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.connection.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientConnectionDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientConnectionDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientConnectionDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientConnectionDuration) Name() string {
+ return "http.client.connection.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientConnectionDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientConnectionDuration) Description() string {
+ return "The duration of the successfully established outbound HTTP connections."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientConnectionDuration) Record(
+ ctx context.Context,
+ val float64,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientOpenConnections is an instrument used to record metric values conforming
+// to the "http.client.open_connections" semantic conventions. It represents the
+// number of outbound HTTP connections that are currently active or idle on the
+// client.
+type ClientOpenConnections struct {
+ metric.Int64UpDownCounter
+}
+
+// NewClientOpenConnections returns a new ClientOpenConnections instrument.
+func NewClientOpenConnections(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ClientOpenConnections, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.client.open_connections",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
+ metric.WithUnit("{connection}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
+ }
+ return ClientOpenConnections{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientOpenConnections) Name() string {
+ return "http.client.open_connections"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientOpenConnections) Unit() string {
+ return "{connection}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientOpenConnections) Description() string {
+ return "Number of outbound HTTP connections that are currently active or idle on the client."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The connectionState is the state of the HTTP connection in the HTTP connection
+// pool.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientOpenConnections) Add(
+ ctx context.Context,
+ incr int64,
+ connectionState ConnectionStateAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.connection.state", string(connectionState)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrNetworkPeerAddress returns an optional attribute for the
+// "network.peer.address" semantic convention. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue {
+ return attribute.String("network.peer.address", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestBodySize is an instrument used to record metric values conforming
+// to the "http.client.request.body.size" semantic conventions. It represents the
+// size of HTTP client request bodies.
+type ClientRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
+func NewClientRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestBodySize) Name() string {
+ return "http.client.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestBodySize) Description() string {
+ return "Size of HTTP client request bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ClientRequestDuration is an instrument used to record metric values conforming
+// to the "http.client.request.duration" semantic conventions. It represents the
+// duration of HTTP client requests.
+type ClientRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewClientRequestDuration returns a new ClientRequestDuration instrument.
+func NewClientRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ClientRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.client.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP client requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ClientRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientRequestDuration) Name() string {
+ return "http.client.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientRequestDuration) Description() string {
+ return "Duration of HTTP client requests."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+func (m ClientRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// ClientResponseBodySize is an instrument used to record metric values
+// conforming to the "http.client.response.body.size" semantic conventions. It
+// represents the size of HTTP client response bodies.
+type ClientResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
+func NewClientResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ClientResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.client.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ClientResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ClientResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ClientResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ClientResponseBodySize) Name() string {
+ return "http.client.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ClientResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ClientResponseBodySize) Description() string {
+ return "Size of HTTP client response bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The serverAddress is the server domain name if available without reverse DNS
+// lookup; otherwise, IP address or Unix domain socket name.
+//
+// The serverPort is the server port number.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ serverAddress string,
+ serverPort int,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("server.address", serverAddress),
+ attribute.Int("server.port", serverPort),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrURLTemplate returns an optional attribute for the "url.template" semantic
+// convention. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue {
+ return attribute.String("url.template", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrURLScheme returns an optional attribute for the "url.scheme" semantic
+// convention. It represents the [URI scheme] component identifying the used
+// protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue {
+ return attribute.String("url.scheme", val)
+}
+
+// ServerActiveRequests is an instrument used to record metric values conforming
+// to the "http.server.active_requests" semantic conventions. It represents the
+// number of active HTTP server requests.
+type ServerActiveRequests struct {
+ metric.Int64UpDownCounter
+}
+
+// NewServerActiveRequests returns a new ServerActiveRequests instrument.
+func NewServerActiveRequests(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (ServerActiveRequests, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "http.server.active_requests",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP server requests."),
+ metric.WithUnit("{request}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
+ }
+ return ServerActiveRequests{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerActiveRequests) Name() string {
+ return "http.server.active_requests"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerActiveRequests) Unit() string {
+ return "{request}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerActiveRequests) Description() string {
+ return "Number of active HTTP server requests."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerActiveRequests) Add(
+ ctx context.Context,
+ incr int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// ServerRequestBodySize is an instrument used to record metric values conforming
+// to the "http.server.request.body.size" semantic conventions. It represents the
+// size of HTTP server request bodies.
+type ServerRequestBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
+func NewServerRequestBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerRequestBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.request.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server request bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerRequestBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestBodySize) Name() string {
+ return "http.server.request.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestBodySize) Description() string {
+ return "Size of HTTP server request bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerRequestBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerRequestDuration is an instrument used to record metric values conforming
+// to the "http.server.request.duration" semantic conventions. It represents the
+// duration of HTTP server requests.
+type ServerRequestDuration struct {
+ metric.Float64Histogram
+}
+
+// NewServerRequestDuration returns a new ServerRequestDuration instrument.
+func NewServerRequestDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (ServerRequestDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "http.server.request.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP server requests."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerRequestDuration{noop.Float64Histogram{}}, err
+ }
+ return ServerRequestDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerRequestDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerRequestDuration) Name() string {
+ return "http.server.request.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerRequestDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerRequestDuration) Description() string {
+ return "Duration of HTTP server requests."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func (m ServerRequestDuration) Record(
+ ctx context.Context,
+ val float64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
+
+// ServerResponseBodySize is an instrument used to record metric values
+// conforming to the "http.server.response.body.size" semantic conventions. It
+// represents the size of HTTP server response bodies.
+type ServerResponseBodySize struct {
+ metric.Int64Histogram
+}
+
+// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
+func NewServerResponseBodySize(
+ m metric.Meter,
+ opt ...metric.Int64HistogramOption,
+) (ServerResponseBodySize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, nil
+ }
+
+ i, err := m.Int64Histogram(
+ "http.server.response.body.size",
+ append([]metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server response bodies."),
+ metric.WithUnit("By"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return ServerResponseBodySize{noop.Int64Histogram{}}, err
+ }
+ return ServerResponseBodySize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m ServerResponseBodySize) Inst() metric.Int64Histogram {
+ return m.Int64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (ServerResponseBodySize) Name() string {
+ return "http.server.response.body.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (ServerResponseBodySize) Unit() string {
+ return "By"
+}
+
+// Description returns the semantic convention description of the instrument
+func (ServerResponseBodySize) Description() string {
+ return "Size of HTTP server response bodies."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// The requestMethod is the HTTP request method.
+//
+// The urlScheme is the the [URI scheme] component identifying the used protocol.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerResponseBodySize) Record(
+ ctx context.Context,
+ val int64,
+ requestMethod RequestMethodAttr,
+ urlScheme string,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ append(
+ attrs,
+ attribute.String("http.request.method", string(requestMethod)),
+ attribute.String("url.scheme", urlScheme),
+ )...,
+ ),
+ )
+
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// The size of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrRoute returns an optional attribute for the "http.route" semantic
+// convention. It represents the matched route, that is, the path template in the
+// format used by the respective server framework.
+func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue {
+ return attribute.String("http.route", val)
+}
+
+// AttrNetworkProtocolName returns an optional attribute for the
+// "network.protocol.name" semantic convention. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.name", val)
+}
+
+// AttrNetworkProtocolVersion returns an optional attribute for the
+// "network.protocol.version" semantic convention. It represents the actual
+// version of the protocol used for network communication.
+func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue {
+ return attribute.String("network.protocol.version", val)
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the name of the local HTTP server that
+// received the request.
+func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the port of the local HTTP server that received the
+// request.
+func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// AttrUserAgentSyntheticType returns an optional attribute for the
+// "user_agent.synthetic.type" semantic convention. It represents the specifies
+// the category of synthetic traffic, such as tests or bots.
+func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
+ return attribute.String("user_agent.synthetic.type", string(val))
+}
\ No newline at end of file
diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json
index 1949c777bf2..7d8b3f719f1 100644
--- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json
+++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json
@@ -16,10 +16,35 @@
"discoveryVersion": "v1",
"documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials",
"endpoints": [
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://iamcredentials.asia-east1.rep.googleapis.com/",
+ "location": "asia-east1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://iamcredentials.europe-west1.rep.googleapis.com/",
+ "location": "europe-west1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://iamcredentials.us-central1.rep.googleapis.com/",
+ "location": "us-central1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://iamcredentials.us-east1.rep.googleapis.com/",
+ "location": "us-east1"
+ },
{
"description": "Regional Endpoint",
"endpointUrl": "https://iamcredentials.us-east7.rep.googleapis.com/",
"location": "us-east7"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://iamcredentials.us-west1.rep.googleapis.com/",
+ "location": "us-west1"
}
],
"fullyEncodeReservedExpansion": true,
@@ -315,7 +340,7 @@
}
}
},
- "revision": "20250417",
+ "revision": "20250911",
"rootUrl": "https://iamcredentials.googleapis.com/",
"schemas": {
"GenerateAccessTokenRequest": {
diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go
index d74fe2a2998..91108d3273f 100644
--- a/vendor/google.golang.org/api/internal/gensupport/resumable.go
+++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go
@@ -126,39 +126,162 @@ func (rx *ResumableUpload) reportProgress(old, updated int64) {
}
}
-// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
-func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
- chunk, off, size, err := rx.Media.Chunk()
-
- done := err == io.EOF
- if !done && err != nil {
- return nil, err
+// transferChunk performs a single HTTP request to upload a single chunk.
+// It uses a goroutine to perform the upload and a timer to enforce ChunkTransferTimeout.
+func (rx *ResumableUpload) transferChunk(ctx context.Context, chunk io.Reader, off, size int64, done bool) (*http.Response, error) {
+ // If no timeout is specified, perform the request synchronously without a timer.
+ if rx.ChunkTransferTimeout == 0 {
+ res, err := rx.doUploadRequest(ctx, chunk, off, size, done)
+ if err != nil {
+ return res, err
+ }
+ return res, nil
}
- res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
- if err != nil {
- return res, err
+ // Start a timer for the ChunkTransferTimeout duration.
+ timer := time.NewTimer(rx.ChunkTransferTimeout)
+
+ // A struct to hold the result from the goroutine.
+ type uploadResult struct {
+ res *http.Response
+ err error
}
- // We sent "X-GUploader-No-308: yes" (see comment elsewhere in
- // this file), so we don't expect to get a 308.
- if res.StatusCode == 308 {
- return nil, errors.New("unexpected 308 response status code")
+ // A buffered channel to receive the result of the upload.
+ resultCh := make(chan uploadResult, 1)
+
+ // Create a cancellable context for the upload request. This allows us to
+ // abort the request if the timer fires first.
+ rCtx, cancel := context.WithCancel(ctx)
+ // NOTE: We do NOT use `defer cancel()` here. The context must remain valid
+ // for the caller to read the response body of a successful request.
+ // Cancellation is handled manually on timeout paths.
+
+ // Starting the chunk upload in parallel.
+ go func() {
+ res, err := rx.doUploadRequest(rCtx, chunk, off, size, done)
+ resultCh <- uploadResult{res: res, err: err}
+ }()
+
+ // Wait for timer to fire or result channel to have the uploadResult or ctx to be cancelled.
+ select {
+ // Note: Calling cancel() will guarantee that the goroutine finishes,
+ // so these two cases will never block forever on draining the resultCh.
+ case <-ctx.Done():
+ // Context is cancelled for the overall upload.
+ cancel()
+ // Drain resultCh.
+ <-resultCh
+ return nil, ctx.Err()
+ case <-timer.C:
+ // Chunk Transfer timer fired before resultCh so we return context.DeadlineExceeded.
+ cancel()
+ // Drain resultCh.
+ <-resultCh
+ return nil, context.DeadlineExceeded
+ case result := <-resultCh:
+ // Handle the result from the upload.
+ if result.err != nil {
+ return result.res, result.err
+ }
+ return result.res, nil
}
+}
- if res.StatusCode == http.StatusOK {
- rx.reportProgress(off, off+int64(size))
+// uploadChunkWithRetries attempts to upload a single chunk, with retries
+// within ChunkRetryDeadline if ChunkTransferTimeout is non-zero.
+func (rx *ResumableUpload) uploadChunkWithRetries(ctx context.Context, chunk io.Reader, off, size int64, done bool) (*http.Response, error) {
+ // Configure error retryable criteria.
+ shouldRetry := rx.Retry.errorFunc()
+
+ // Configure single chunk retry deadline.
+ chunkRetryDeadline := defaultRetryDeadline
+ if rx.ChunkRetryDeadline != 0 {
+ chunkRetryDeadline = rx.ChunkRetryDeadline
}
- if statusResumeIncomplete(res) {
- rx.Media.Next()
+ // Each chunk gets its own initialized-at-zero backoff and invocation ID.
+ bo := rx.Retry.backoff()
+ quitAfterTimer := time.NewTimer(chunkRetryDeadline)
+ defer quitAfterTimer.Stop()
+ rx.attempts = 1
+ rx.invocationID = uuid.New().String()
+
+ var pause time.Duration
+ var resp *http.Response
+ var err error
+
+ // Retry loop for a single chunk.
+ for {
+ // Wait for the backoff period, unless the context is canceled or the
+ // retry deadline is hit.
+ backoffPauseTimer := time.NewTimer(pause)
+ select {
+ case <-ctx.Done():
+ backoffPauseTimer.Stop()
+ if err == nil {
+ err = ctx.Err()
+ }
+ return resp, err
+ case <-backoffPauseTimer.C:
+ case <-quitAfterTimer.C:
+ backoffPauseTimer.Stop()
+ return resp, err
+ }
+ backoffPauseTimer.Stop()
+
+ // Check for context cancellation or timeout once more. If more than one
+ // case in the select statement above was satisfied at the same time, Go
+ // will choose one arbitrarily.
+ // That can cause an operation to go through even if the context was
+ // canceled before or the timeout was reached.
+ select {
+ case <-ctx.Done():
+ if err == nil {
+ err = ctx.Err()
+ }
+ return resp, err
+ case <-quitAfterTimer.C:
+ return resp, err
+ default:
+ }
+
+ // We close the response's body here, since we definitely will not
+ // return `resp` now. If we close it before the select case above, a
+ // timer may fire and cause us to return a response with a closed body
+ // (in which case, the caller will not get the error message in the body).
+ if resp != nil && resp.Body != nil {
+ // Read the body to EOF - if the Body is not both read to EOF and closed,
+ // the Client's underlying RoundTripper may not be able to re-use the
+ // persistent TCP connection to the server for a subsequent "keep-alive" request.
+ // See https://pkg.go.dev/net/http#Client.Do
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+
+ resp, err = rx.transferChunk(ctx, chunk, off, size, done)
+ status := 0
+ if resp != nil {
+ status = resp.StatusCode
+ }
+ // We sent "X-GUploader-No-308: yes" (see comment elsewhere in
+ // this file), so we don't expect to get a 308.
+ if status == 308 {
+ return nil, errors.New("unexpected 308 response status code")
+ }
+ // Chunk upload should be retried if the ChunkTransferTimeout is non-zero and err is context deadline exceeded
+ // or we encounter a retryable error.
+ if (rx.ChunkTransferTimeout != 0 && errors.Is(err, context.DeadlineExceeded)) || shouldRetry(status, err) {
+ rx.attempts++
+ pause = bo.Pause()
+ chunk, _, _, _ = rx.Media.Chunk()
+ continue
+ }
+ return resp, err
}
- return res, nil
}
// Upload starts the process of a resumable upload with a cancellable context.
-// It retries using the provided back off strategy until cancelled or the
-// strategy indicates to stop retrying.
// It is called from the auto-generated API code and is not visible to the user.
// Before sending an HTTP request, Upload calls any registered hook functions,
// and calls the returned functions after the request returns (see send.go).
@@ -166,144 +289,49 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
// Upload does not parse the response into the error on a non 200 response;
// it is the caller's responsibility to call resp.Body.Close.
-func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
+func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
+ for {
+ chunk, off, size, err := rx.Media.Chunk()
+ done := err == io.EOF
+ if !done && err != nil {
+ return nil, err
+ }
- // There are a couple of cases where it's possible for err and resp to both
- // be non-nil. However, we expose a simpler contract to our callers: exactly
- // one of resp and err will be non-nil. This means that any response body
- // must be closed here before returning a non-nil error.
- var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) {
+ resp, err := rx.uploadChunkWithRetries(ctx, chunk, off, int64(size), done)
+ // There are a couple of cases where it's possible for err and resp to both
+ // be non-nil. However, we expose a simpler contract to our callers: exactly
+ // one of resp and err will be non-nil. This means that any response body
+ // must be closed here before returning a non-nil error.
if err != nil {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
// If there were retries, indicate this in the error message and wrap the final error.
if rx.attempts > 1 {
- return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err)
+ return nil, fmt.Errorf("chunk upload failed after %d attempts, final error: %w", rx.attempts, err)
}
return nil, err
}
+
// This case is very unlikely but possible only if rx.ChunkRetryDeadline is
// set to a very small value, in which case no requests will be sent before
// the deadline. Return an error to avoid causing a panic.
if resp == nil {
- return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI)
+ return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDeadline", rx.URI)
}
- return resp, nil
- }
- // Configure retryable error criteria.
- errorFunc := rx.Retry.errorFunc()
-
- // Configure per-chunk retry deadline.
- var retryDeadline time.Duration
- if rx.ChunkRetryDeadline != 0 {
- retryDeadline = rx.ChunkRetryDeadline
- } else {
- retryDeadline = defaultRetryDeadline
- }
-
- // Send all chunks.
- for {
- var pause time.Duration
-
- // Each chunk gets its own initialized-at-zero backoff and invocation ID.
- bo := rx.Retry.backoff()
- quitAfterTimer := time.NewTimer(retryDeadline)
- rx.attempts = 1
- rx.invocationID = uuid.New().String()
-
- // Retry loop for a single chunk.
- for {
- pauseTimer := time.NewTimer(pause)
- select {
- case <-ctx.Done():
- quitAfterTimer.Stop()
- pauseTimer.Stop()
- if err == nil {
- err = ctx.Err()
- }
- return prepareReturn(resp, err)
- case <-pauseTimer.C:
- case <-quitAfterTimer.C:
- pauseTimer.Stop()
- return prepareReturn(resp, err)
- }
- pauseTimer.Stop()
-
- // Check for context cancellation or timeout once more. If more than one
- // case in the select statement above was satisfied at the same time, Go
- // will choose one arbitrarily.
- // That can cause an operation to go through even if the context was
- // canceled before or the timeout was reached.
- select {
- case <-ctx.Done():
- quitAfterTimer.Stop()
- if err == nil {
- err = ctx.Err()
- }
- return prepareReturn(resp, err)
- case <-quitAfterTimer.C:
- return prepareReturn(resp, err)
- default:
- }
-
- // rCtx is derived from a context with a defined transferTimeout with non-zero value.
- // If a particular request exceeds this transfer time for getting response, the rCtx deadline will be exceeded,
- // triggering a retry of the request.
- var rCtx context.Context
- var cancel context.CancelFunc
-
- rCtx = ctx
- if rx.ChunkTransferTimeout != 0 {
- rCtx, cancel = context.WithTimeout(ctx, rx.ChunkTransferTimeout)
- }
-
- // We close the response's body here, since we definitely will not
- // return `resp` now. If we close it before the select case above, a
- // timer may fire and cause us to return a response with a closed body
- // (in which case, the caller will not get the error message in the body).
- if resp != nil && resp.Body != nil {
- // Read the body to EOF - if the Body is not both read to EOF and closed,
- // the Client's underlying RoundTripper may not be able to re-use the
- // persistent TCP connection to the server for a subsequent "keep-alive" request.
- // See https://pkg.go.dev/net/http#Client.Do
+ if resp.StatusCode == http.StatusOK {
+ rx.reportProgress(off, off+int64(size))
+ }
+ if statusResumeIncomplete(resp) {
+ // The upload is not yet complete, but the server has acknowledged this chunk.
+ // We don't have anything to do with the response body.
+ if resp.Body != nil {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
- resp, err = rx.transferChunk(rCtx)
-
- var status int
- if resp != nil {
- status = resp.StatusCode
- }
-
- // The upload should be retried if the rCtx is canceled due to a timeout.
- select {
- case <-rCtx.Done():
- if rx.ChunkTransferTimeout != 0 && errors.Is(rCtx.Err(), context.DeadlineExceeded) {
- // Cancel the context for rCtx
- cancel()
- continue
- }
- default:
- }
-
- // Check if we should retry the request.
- if !errorFunc(status, err) {
- quitAfterTimer.Stop()
- break
- }
-
- rx.attempts++
- pause = bo.Pause()
- }
-
- // If the chunk was uploaded successfully, but there's still
- // more to go, upload the next chunk without any delay.
- if statusResumeIncomplete(resp) {
+ rx.Media.Next()
continue
}
-
- return prepareReturn(resp, err)
+ return resp, nil
}
}
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 425ec574d65..6646bbcbfc0 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.239.0"
+const Version = "0.252.0"
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 905cf577d1e..22ab414c46b 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -253,7 +253,7 @@
"location": "northamerica-south1"
}
],
- "etag": "\"36383730363437323837383838393538333732\"",
+ "etag": "\"3131343633323936333034313936343439353533\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -1083,6 +1083,11 @@
"location": "query",
"type": "string"
},
+ "returnPartialSuccess": {
+ "description": "If true, return a list of bucket resource names for buckets that are in unreachable locations.",
+ "location": "query",
+ "type": "boolean"
+ },
"softDeleted": {
"description": "If true, only soft-deleted bucket versions will be returned. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).",
"location": "query",
@@ -3306,6 +3311,11 @@
"location": "query",
"type": "string"
},
+ "filter": {
+ "description": "Filter the returned objects. Currently only supported for the contexts field. If delimiter is set, the returned prefixes are exempt from this filter.",
+ "location": "query",
+ "type": "string"
+ },
"includeFoldersAsPrefixes": {
"description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.",
"location": "query",
@@ -4539,7 +4549,7 @@
}
}
},
- "revision": "20250605",
+ "revision": "20250925",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
"AdvanceRelocateBucketOperationRequest": {
@@ -4753,9 +4763,78 @@
"encryption": {
"description": "Encryption configuration for a bucket.",
"properties": {
+ "customerManagedEncryptionEnforcementConfig": {
+ "description": "If set, the new objects created in this bucket must comply with this enforcement config. Changing this has no effect on existing objects; it applies to new objects only. If omitted, the new objects are allowed to be encrypted with Customer Managed Encryption type by default.",
+ "properties": {
+ "effectiveTime": {
+ "description": "Server-determined value that indicates the time from which configuration was enforced and effective. This value is in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "restrictionMode": {
+ "description": "Restriction mode for Customer-Managed Encryption Keys. Defaults to NotRestricted.",
+ "enum": [
+ "NotRestricted",
+ "FullyRestricted"
+ ],
+ "enumDescriptions": [
+ "Creation of new objects with Customer-Managed Encryption is not restricted.",
+ "Creation of new objects with Customer-Managed Encryption is fully restricted."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "customerSuppliedEncryptionEnforcementConfig": {
+ "description": "If set, the new objects created in this bucket must comply with this enforcement config. Changing this has no effect on existing objects; it applies to new objects only. If omitted, the new objects are allowed to be encrypted with Customer Supplied Encryption type by default.",
+ "properties": {
+ "effectiveTime": {
+ "description": "Server-determined value that indicates the time from which configuration was enforced and effective. This value is in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "restrictionMode": {
+ "description": "Restriction mode for Customer-Supplied Encryption Keys. Defaults to NotRestricted.",
+ "enum": [
+ "NotRestricted",
+ "FullyRestricted"
+ ],
+ "enumDescriptions": [
+ "Creation of new objects with Customer-Supplied Encryption is not restricted.",
+ "Creation of new objects with Customer-Supplied Encryption is fully restricted."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"defaultKmsKeyName": {
"description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.",
"type": "string"
+ },
+ "googleManagedEncryptionEnforcementConfig": {
+ "description": "If set, the new objects created in this bucket must comply with this enforcement config. Changing this has no effect on existing objects; it applies to new objects only. If omitted, the new objects are allowed to be encrypted with Google Managed Encryption type by default.",
+ "properties": {
+ "effectiveTime": {
+ "description": "Server-determined value that indicates the time from which configuration was enforced and effective. This value is in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "restrictionMode": {
+ "description": "Restriction mode for Google-Managed Encryption Keys. Defaults to NotRestricted.",
+ "enum": [
+ "NotRestricted",
+ "FullyRestricted"
+ ],
+ "enumDescriptions": [
+ "Creation of new objects with Google Managed Encryption is not restricted.",
+ "Creation of new objects with Google Managed Encryption is fully restricted."
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
}
},
"type": "object"
@@ -5311,6 +5390,13 @@
"nextPageToken": {
"description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
"type": "string"
+ },
+ "unreachable": {
+ "description": "The list of bucket resource names that could not be reached during the listing operation.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
}
},
"type": "object"
@@ -5327,6 +5413,16 @@
"description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.",
"type": "boolean"
},
+ "createdAfterTime": {
+ "description": "Restores only the objects that were created after this time.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "createdBeforeTime": {
+ "description": "Restores only the objects that were created before this time.",
+ "format": "date-time",
+ "type": "string"
+ },
"matchGlobs": {
"description": "Restores only the objects matching any of the specified glob(s). If this parameter is not specified, all objects will be restored within the specified time range.",
"items": {
@@ -5932,6 +6028,20 @@
"description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.",
"type": "string"
},
+ "contexts": {
+ "description": "User-defined or system-defined object contexts. Each object context is a key-payload pair, where the key provides the identification and the payload holds the associated value and additional metadata.",
+ "properties": {
+ "custom": {
+ "additionalProperties": {
+ "$ref": "ObjectCustomContextPayload",
+ "description": "A single user-defined object context."
+ },
+ "description": "User-defined object contexts.",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
"crc32c": {
"description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).",
"type": "string"
@@ -6201,6 +6311,27 @@
},
"type": "object"
},
+ "ObjectCustomContextPayload": {
+ "description": "The payload of a single user-defined object context.",
+ "id": "ObjectCustomContextPayload",
+ "properties": {
+ "createTime": {
+ "description": "The time at which the object context was created in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "updateTime": {
+ "description": "The time at which the object context was last updated in RFC 3339 format.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "value": {
+ "description": "The value of the object context.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"Objects": {
"description": "A list of objects.",
"id": "Objects",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index bb4f8173442..6d4af3902ef 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -709,18 +709,38 @@ func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) {
// BucketEncryption: Encryption configuration for a bucket.
type BucketEncryption struct {
+ // CustomerManagedEncryptionEnforcementConfig: If set, the new objects created
+ // in this bucket must comply with this enforcement config. Changing this has
+ // no effect on existing objects; it applies to new objects only. If omitted,
+ // the new objects are allowed to be encrypted with Customer Managed Encryption
+ // type by default.
+ CustomerManagedEncryptionEnforcementConfig *BucketEncryptionCustomerManagedEncryptionEnforcementConfig `json:"customerManagedEncryptionEnforcementConfig,omitempty"`
+ // CustomerSuppliedEncryptionEnforcementConfig: If set, the new objects created
+ // in this bucket must comply with this enforcement config. Changing this has
+ // no effect on existing objects; it applies to new objects only. If omitted,
+ // the new objects are allowed to be encrypted with Customer Supplied
+ // Encryption type by default.
+ CustomerSuppliedEncryptionEnforcementConfig *BucketEncryptionCustomerSuppliedEncryptionEnforcementConfig `json:"customerSuppliedEncryptionEnforcementConfig,omitempty"`
// DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt objects
// inserted into this bucket, if no encryption method is specified.
DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"`
- // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
+ // GoogleManagedEncryptionEnforcementConfig: If set, the new objects created in
+ // this bucket must comply with this enforcement config. Changing this has no
+ // effect on existing objects; it applies to new objects only. If omitted, the
+ // new objects are allowed to be encrypted with Google Managed Encryption type
+ // by default.
+ GoogleManagedEncryptionEnforcementConfig *BucketEncryptionGoogleManagedEncryptionEnforcementConfig `json:"googleManagedEncryptionEnforcementConfig,omitempty"`
+ // ForceSendFields is a list of field names (e.g.
+ // "CustomerManagedEncryptionEnforcementConfig") to unconditionally include in
+ // API requests. By default, fields with empty or default values are omitted
+ // from API requests. See
// https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
// details.
ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to include in
- // API requests with the JSON null value. By default, fields with empty values
- // are omitted from API requests. See
+ // NullFields is a list of field names (e.g.
+ // "CustomerManagedEncryptionEnforcementConfig") to include in API requests
+ // with the JSON null value. By default, fields with empty values are omitted
+ // from API requests. See
// https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
NullFields []string `json:"-"`
}
@@ -730,6 +750,114 @@ func (s BucketEncryption) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// BucketEncryptionCustomerManagedEncryptionEnforcementConfig: If set, the new
+// objects created in this bucket must comply with this enforcement config.
+// Changing this has no effect on existing objects; it applies to new objects
+// only. If omitted, the new objects are allowed to be encrypted with Customer
+// Managed Encryption type by default.
+type BucketEncryptionCustomerManagedEncryptionEnforcementConfig struct {
+ // EffectiveTime: Server-determined value that indicates the time from which
+ // configuration was enforced and effective. This value is in RFC 3339 format.
+ EffectiveTime string `json:"effectiveTime,omitempty"`
+ // RestrictionMode: Restriction mode for Customer-Managed Encryption Keys.
+ // Defaults to NotRestricted.
+ //
+ // Possible values:
+ // "NotRestricted" - Creation of new objects with Customer-Managed Encryption
+ // is not restricted.
+ // "FullyRestricted" - Creation of new objects with Customer-Managed
+ // Encryption is fully restricted.
+ RestrictionMode string `json:"restrictionMode,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "EffectiveTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s BucketEncryptionCustomerManagedEncryptionEnforcementConfig) MarshalJSON() ([]byte, error) {
+ type NoMethod BucketEncryptionCustomerManagedEncryptionEnforcementConfig
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
+// BucketEncryptionCustomerSuppliedEncryptionEnforcementConfig: If set, the new
+// objects created in this bucket must comply with this enforcement config.
+// Changing this has no effect on existing objects; it applies to new objects
+// only. If omitted, the new objects are allowed to be encrypted with Customer
+// Supplied Encryption type by default.
+type BucketEncryptionCustomerSuppliedEncryptionEnforcementConfig struct {
+ // EffectiveTime: Server-determined value that indicates the time from which
+ // configuration was enforced and effective. This value is in RFC 3339 format.
+ EffectiveTime string `json:"effectiveTime,omitempty"`
+ // RestrictionMode: Restriction mode for Customer-Supplied Encryption Keys.
+ // Defaults to NotRestricted.
+ //
+ // Possible values:
+ // "NotRestricted" - Creation of new objects with Customer-Supplied
+ // Encryption is not restricted.
+ // "FullyRestricted" - Creation of new objects with Customer-Supplied
+ // Encryption is fully restricted.
+ RestrictionMode string `json:"restrictionMode,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "EffectiveTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s BucketEncryptionCustomerSuppliedEncryptionEnforcementConfig) MarshalJSON() ([]byte, error) {
+ type NoMethod BucketEncryptionCustomerSuppliedEncryptionEnforcementConfig
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
+// BucketEncryptionGoogleManagedEncryptionEnforcementConfig: If set, the new
+// objects created in this bucket must comply with this enforcement config.
+// Changing this has no effect on existing objects; it applies to new objects
+// only. If omitted, the new objects are allowed to be encrypted with Google
+// Managed Encryption type by default.
+type BucketEncryptionGoogleManagedEncryptionEnforcementConfig struct {
+ // EffectiveTime: Server-determined value that indicates the time from which
+ // configuration was enforced and effective. This value is in RFC 3339 format.
+ EffectiveTime string `json:"effectiveTime,omitempty"`
+ // RestrictionMode: Restriction mode for Google-Managed Encryption Keys.
+ // Defaults to NotRestricted.
+ //
+ // Possible values:
+ // "NotRestricted" - Creation of new objects with Google Managed Encryption
+ // is not restricted.
+ // "FullyRestricted" - Creation of new objects with Google Managed Encryption
+ // is fully restricted.
+ RestrictionMode string `json:"restrictionMode,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "EffectiveTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s BucketEncryptionGoogleManagedEncryptionEnforcementConfig) MarshalJSON() ([]byte, error) {
+ type NoMethod BucketEncryptionGoogleManagedEncryptionEnforcementConfig
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// BucketHierarchicalNamespace: The bucket's hierarchical namespace
// configuration.
type BucketHierarchicalNamespace struct {
@@ -1483,6 +1611,9 @@ type Buckets struct {
// sets. Provide this value in a subsequent request to return the next page of
// results.
NextPageToken string `json:"nextPageToken,omitempty"`
+ // Unreachable: The list of bucket resource names that could not be reached
+ // during the listing operation.
+ Unreachable []string `json:"unreachable,omitempty"`
// ServerResponse contains the HTTP response code and headers from the server.
googleapi.ServerResponse `json:"-"`
@@ -1517,6 +1648,12 @@ type BulkRestoreObjectsRequest struct {
// CopySourceAcl: If true, copies the source object's ACL; otherwise, uses the
// bucket's default object ACL. The default is false.
CopySourceAcl bool `json:"copySourceAcl,omitempty"`
+ // CreatedAfterTime: Restores only the objects that were created after this
+ // time.
+ CreatedAfterTime string `json:"createdAfterTime,omitempty"`
+ // CreatedBeforeTime: Restores only the objects that were created before this
+ // time.
+ CreatedBeforeTime string `json:"createdBeforeTime,omitempty"`
// MatchGlobs: Restores only the objects matching any of the specified glob(s).
// If this parameter is not specified, all objects will be restored within the
// specified time range.
@@ -2212,6 +2349,10 @@ type Object struct {
// ContentType: Content-Type of the object data. If an object is stored without
// a Content-Type, it is served as application/octet-stream.
ContentType string `json:"contentType,omitempty"`
+ // Contexts: User-defined or system-defined object contexts. Each object
+ // context is a key-payload pair, where the key provides the identification and
+ // the payload holds the associated value and additional metadata.
+ Contexts *ObjectContexts `json:"contexts,omitempty"`
// Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using
// base64 in big-endian byte order. For more information about using the CRC32c
// checksum, see Data Validation and Change Detection
@@ -2340,6 +2481,30 @@ func (s Object) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// ObjectContexts: User-defined or system-defined object contexts. Each object
+// context is a key-payload pair, where the key provides the identification and
+// the payload holds the associated value and additional metadata.
+type ObjectContexts struct {
+ // Custom: User-defined object contexts.
+ Custom map[string]ObjectCustomContextPayload `json:"custom,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "Custom") to unconditionally
+ // include in API requests. By default, fields with empty or default values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "Custom") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s ObjectContexts) MarshalJSON() ([]byte, error) {
+ type NoMethod ObjectContexts
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if
// the object is encrypted by such a key.
type ObjectCustomerEncryption struct {
@@ -2531,6 +2696,35 @@ func (s ObjectAccessControls) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// ObjectCustomContextPayload: The payload of a single user-defined object
+// context.
+type ObjectCustomContextPayload struct {
+ // CreateTime: The time at which the object context was created in RFC 3339
+ // format.
+ CreateTime string `json:"createTime,omitempty"`
+ // UpdateTime: The time at which the object context was last updated in RFC
+ // 3339 format.
+ UpdateTime string `json:"updateTime,omitempty"`
+ // Value: The value of the object context.
+ Value string `json:"value,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "CreateTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "CreateTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s ObjectCustomContextPayload) MarshalJSON() ([]byte, error) {
+ type NoMethod ObjectCustomContextPayload
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// Objects: A list of objects.
type Objects struct {
// Items: The list of items.
@@ -5032,6 +5226,14 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
return c
}
+// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess": If
+// true, return a list of bucket resource names for buckets that are in
+// unreachable locations.
+func (c *BucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BucketsListCall {
+ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
+ return c
+}
+
// SoftDeleted sets the optional parameter "softDeleted": If true, only
// soft-deleted bucket versions will be returned. The default is false. For
// more information, see Soft Delete
@@ -10916,6 +11118,14 @@ func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
return c
}
+// Filter sets the optional parameter "filter": Filter the returned objects.
+// Currently only supported for the contexts field. If delimiter is set, the
+// returned prefixes are exempt from this filter.
+func (c *ObjectsListCall) Filter(filter string) *ObjectsListCall {
+ c.urlParams_.Set("filter", filter)
+ return c
+}
+
// IncludeFoldersAsPrefixes sets the optional parameter
// "includeFoldersAsPrefixes": Only applicable if delimiter is set to '/'. If
// true, will also include folders and managed folders (besides objects) in the
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index de97deae035..cc09bdbc436 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -149,15 +149,10 @@ func logPanic(ctx context.Context, r interface{}) {
// should be packaged up into a testable and reusable object.
var ErrorHandlers = []ErrorHandler{
logError,
- func(_ context.Context, _ error, _ string, _ ...interface{}) {
- (&rudimentaryErrorBackoff{
- lastErrorTime: time.Now(),
- // 1ms was the number folks were able to stomach as a global rate limit.
- // If you need to log errors more than 1000 times a second you
- // should probably consider fixing your code instead. :)
- minPeriod: time.Millisecond,
- }).OnError()
- },
+ // 1ms was the number folks were able to stomach as a global rate limit.
+ // If you need to log errors more than 1000 times a second, you
+ // should probably consider fixing your code instead. :)
+ backoffError(1 * time.Millisecond),
}
type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{})
@@ -226,6 +221,18 @@ func logError(ctx context.Context, err error, msg string, keysAndValues ...inter
logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs.
}
+// backoffError blocks if it is called more often than the minPeriod.
+func backoffError(minPeriod time.Duration) ErrorHandler {
+ r := &rudimentaryErrorBackoff{
+ lastErrorTime: time.Now(),
+ minPeriod: minPeriod,
+ }
+
+ return func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
+ r.OnError()
+ }
+}
+
type rudimentaryErrorBackoff struct {
minPeriod time.Duration // immutable
// TODO(lavalamp): use the clock for testability. Need to move that
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
new file mode 100644
index 00000000000..4d7a17c3afa
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[byte]{}
+// s2 := New[byte]()
+type Byte map[byte]Empty
+
+// NewByte creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+ return Byte(New[byte](items...))
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet[T any](theMap map[byte]T) Byte {
+ return Byte(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) Byte {
+ return Byte(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) Byte {
+ return Byte(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Byte) Clone() Byte {
+ return Byte(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Byte) Difference(s2 Byte) Byte {
+ return Byte(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Byte) SymmetricDifference(s2 Byte) Byte {
+ return Byte(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+ return Byte(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+ return Byte(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Byte) UnsortedList() []byte {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+ return len(s)
+}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
similarity index 72%
rename from vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE
rename to vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
index 866d74a7ad7..194883390cf 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/NOTICE
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -1,4 +1,5 @@
-Copyright 2011-2016 Canonical Ltd.
+/*
+Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -11,3 +12,8 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+
+// Package sets has generic set and specified sets. Generic set will
+// replace specified ones over time. And specific ones are deprecated.
+package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
new file mode 100644
index 00000000000..fbb1df06d92
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
new file mode 100644
index 00000000000..5876fc9deb9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int]{}
+// s2 := New[int]()
+type Int map[int]Empty
+
+// NewInt creates a Int from a list of values.
+func NewInt(items ...int) Int {
+ return Int(New[int](items...))
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet[T any](theMap map[int]T) Int {
+ return Int(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) Int {
+ return Int(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) Int {
+ return Int(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int) Clone() Int {
+ return Int(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int) Difference(s2 Int) Int {
+ return Int(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int) SymmetricDifference(s2 Int) Int {
+ return Int(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+ return Int(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+ return Int(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int) UnsortedList() []int {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+ return len(s)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
new file mode 100644
index 00000000000..2c640c5d0f1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int32]{}
+// s2 := New[int32]()
+type Int32 map[int32]Empty
+
+// NewInt32 creates a Int32 from a list of values.
+func NewInt32(items ...int32) Int32 {
+ return Int32(New[int32](items...))
+}
+
+// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int32KeySet[T any](theMap map[int32]T) Int32 {
+ return Int32(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int32) Insert(items ...int32) Int32 {
+ return Int32(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int32) Delete(items ...int32) Int32 {
+ return Int32(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int32) Has(item int32) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int32) HasAll(items ...int32) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int32) HasAny(items ...int32) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int32) Clone() Int32 {
+ return Int32(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int32) Difference(s2 Int32) Int32 {
+ return Int32(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int32) SymmetricDifference(s2 Int32) Int32 {
+ return Int32(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int32) Union(s2 Int32) Int32 {
+ return Int32(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int32) Intersection(s2 Int32) Int32 {
+ return Int32(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int32) IsSuperset(s2 Int32) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int32) Equal(s2 Int32) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int32 slice.
+func (s Int32) List() []int32 {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int32) UnsortedList() []int32 {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int32) PopAny() (int32, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int32) Len() int {
+ return len(s)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
new file mode 100644
index 00000000000..bf3eb3ffa25
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int64]{}
+// s2 := New[int64]()
+type Int64 map[int64]Empty
+
+// NewInt64 creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+ return Int64(New[int64](items...))
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet[T any](theMap map[int64]T) Int64 {
+ return Int64(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) Int64 {
+ return Int64(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) Int64 {
+ return Int64(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int64) Clone() Int64 {
+ return Int64(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int64) Difference(s2 Int64) Int64 {
+ return Int64(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int64) SymmetricDifference(s2 Int64) Int64 {
+ return Int64(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+ return Int64(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+ return Int64(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int64) UnsortedList() []int64 {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+ return len(s)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
new file mode 100644
index 00000000000..cd961c8c593
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+import (
+ "cmp"
+ "sort"
+)
+
+// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption.
+type Set[T comparable] map[T]Empty
+
+// cast transforms specified set to generic Set[T].
+func cast[T comparable](s map[T]Empty) Set[T] { return s }
+
+// New creates a Set from a list of values.
+// NOTE: type param must be explicitly instantiated if given items are empty.
+func New[T comparable](items ...T) Set[T] {
+ ss := make(Set[T], len(items))
+ ss.Insert(items...)
+ return ss
+}
+
+// KeySet creates a Set from a keys of a map[comparable](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func KeySet[T comparable, V any](theMap map[T]V) Set[T] {
+ ret := make(Set[T], len(theMap))
+ for keyValue := range theMap {
+ ret.Insert(keyValue)
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Set[T]) Insert(items ...T) Set[T] {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+ return s
+}
+
+func Insert[T comparable](set Set[T], items ...T) Set[T] {
+ return set.Insert(items...)
+}
+
+// Delete removes all items from the set.
+func (s Set[T]) Delete(items ...T) Set[T] {
+ for _, item := range items {
+ delete(s, item)
+ }
+ return s
+}
+
+// Clear empties the set.
+// It is preferable to replace the set with a newly constructed set,
+// but not all callers can do that (when there are other references to the map).
+func (s Set[T]) Clear() Set[T] {
+ clear(s)
+ return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Set[T]) Has(item T) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Set[T]) HasAll(items ...T) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Set[T]) HasAny(items ...T) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Set[T]) Clone() Set[T] {
+ result := make(Set[T], len(s))
+ for key := range s {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Set[T]) Difference(s2 Set[T]) Set[T] {
+ result := New[T]()
+ for key := range s1 {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] {
+ return s1.Difference(s2).Union(s2.Difference(s1))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Set[T]) Union(s2 Set[T]) Set[T] {
+ result := s1.Clone()
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] {
+ var walk, other Set[T]
+ result := New[T]()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Set[T]) IsSuperset(s2 Set[T]) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Set[T]) Equal(s2 Set[T]) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfGeneric[T cmp.Ordered] []T
+
+func (g sortableSliceOfGeneric[T]) Len() int { return len(g) }
+func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) }
+func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
+
+// List returns the contents as a sorted T slice.
+//
+// This is a separate function and not a method because not all types supported
+// by Generic are ordered and only those can be sorted.
+func List[T cmp.Ordered](s Set[T]) []T {
+ res := make(sortableSliceOfGeneric[T], 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return res
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Set[T]) UnsortedList() []T {
+ res := make([]T, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// PopAny returns a single element from the set.
+func (s Set[T]) PopAny() (T, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue T
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Set[T]) Len() int {
+ return len(s)
+}
+
+func less[T cmp.Ordered](lhs, rhs T) bool {
+ return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
new file mode 100644
index 00000000000..1dab6d13cc7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[string]{}
+// s2 := New[string]()
+type String map[string]Empty
+
+// NewString creates a String from a list of values.
+func NewString(items ...string) String {
+ return String(New[string](items...))
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet[T any](theMap map[string]T) String {
+ return String(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) String {
+ return String(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) String {
+ return String(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s String) Clone() String {
+ return String(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 String) Difference(s2 String) String {
+ return String(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 String) SymmetricDifference(s2 String) String {
+ return String(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+ return String(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+ return String(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s String) UnsortedList() []string {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+ return len(s)
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go
index a76d830ede6..812301baf0f 100644
--- a/vendor/k8s.io/client-go/util/workqueue/doc.go
+++ b/vendor/k8s.io/client-go/util/workqueue/doc.go
@@ -22,5 +22,6 @@ limitations under the License.
// will only be processed once.
// - Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
+// In this case it will be processed again.
// - Shutdown notifications.
package workqueue
diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go
index 3cec1768a04..78b072dabe1 100644
--- a/vendor/k8s.io/client-go/util/workqueue/queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -20,6 +20,7 @@ import (
"sync"
"time"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
)
@@ -163,8 +164,8 @@ func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMet
t := &Typed[T]{
clock: c,
queue: queue,
- dirty: set[T]{},
- processing: set[T]{},
+ dirty: sets.Set[T]{},
+ processing: sets.Set[T]{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
@@ -192,13 +193,13 @@ type Typed[t comparable] struct {
queue Queue[t]
// dirty defines all of the items that need to be processed.
- dirty set[t]
+ dirty sets.Set[t]
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
- processing set[t]
+ processing sets.Set[t]
cond *sync.Cond
@@ -211,37 +212,19 @@ type Typed[t comparable] struct {
clock clock.WithTicker
}
-type empty struct{}
-type set[t comparable] map[t]empty
-
-func (s set[t]) has(item t) bool {
- _, exists := s[item]
- return exists
-}
-
-func (s set[t]) insert(item t) {
- s[item] = empty{}
-}
-
-func (s set[t]) delete(item t) {
- delete(s, item)
-}
-
-func (s set[t]) len() int {
- return len(s)
-}
-
-// Add marks item as needing processing.
+// Add marks item as needing processing. When the queue is shutdown new
+// items will silently be ignored and not queued or marked as dirty for
+// reprocessing.
func (q *Typed[T]) Add(item T) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
- if q.dirty.has(item) {
+ if q.dirty.Has(item) {
// the same item is added again before it is processed, call the Touch
// function if the queue cares about it (for e.g, reset its priority)
- if !q.processing.has(item) {
+ if !q.processing.Has(item) {
q.queue.Touch(item)
}
return
@@ -249,8 +232,8 @@ func (q *Typed[T]) Add(item T) {
q.metrics.add(item)
- q.dirty.insert(item)
- if q.processing.has(item) {
+ q.dirty.Insert(item)
+ if q.processing.Has(item) {
return
}
@@ -285,8 +268,8 @@ func (q *Typed[T]) Get() (item T, shutdown bool) {
q.metrics.get(item)
- q.processing.insert(item)
- q.dirty.delete(item)
+ q.processing.Insert(item)
+ q.dirty.Delete(item)
return item, false
}
@@ -300,17 +283,18 @@ func (q *Typed[T]) Done(item T) {
q.metrics.done(item)
- q.processing.delete(item)
- if q.dirty.has(item) {
+ q.processing.Delete(item)
+ if q.dirty.Has(item) {
q.queue.Push(item)
q.cond.Signal()
- } else if q.processing.len() == 0 {
+ } else if q.processing.Len() == 0 {
q.cond.Signal()
}
}
-// ShutDown will cause q to ignore all new items added to it and
-// immediately instruct the worker goroutines to exit.
+// ShutDown will cause q to ignore all new items added to it. Worker
+// goroutines will continue processing items in the queue until it is
+// empty and then receive the shutdown signal.
func (q *Typed[T]) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -320,15 +304,12 @@ func (q *Typed[T]) ShutDown() {
q.cond.Broadcast()
}
-// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
-// as the worker goroutines have "drained", i.e: finished processing and called
-// Done on all existing items in the queue; they will be instructed to exit and
-// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
-// your workers must ensure that Done is called on all items in the queue once
-// the shut down has been initiated, if that is not the case: this will block
-// indefinitely. It is, however, safe to call ShutDown after having called
-// ShutDownWithDrain, as to force the queue shut down to terminate immediately
-// without waiting for the drainage.
+// ShutDownWithDrain is equivalent to ShutDown but waits until all items
+// in the queue have been processed.
+// ShutDown can be called after ShutDownWithDrain to force
+// ShutDownWithDrain to stop waiting.
+// Workers must call Done on an item after processing it, otherwise
+// ShutDownWithDrain will block indefinitely.
func (q *Typed[T]) ShutDownWithDrain() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -337,7 +318,7 @@ func (q *Typed[T]) ShutDownWithDrain() {
q.shuttingDown = true
q.cond.Broadcast()
- for q.processing.len() != 0 && q.drain {
+ for q.processing.Len() != 0 && q.drain {
q.cond.Wait()
}
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c46b2eda756..a0bdaa2bf02 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,8 +7,8 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
-# cloud.google.com/go/auth v0.16.2
-## explicit; go 1.23.0
+# cloud.google.com/go/auth v0.17.0
+## explicit; go 1.24.0
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
cloud.google.com/go/auth/credentials/internal/externalaccount
@@ -22,8 +22,11 @@ cloud.google.com/go/auth/internal
cloud.google.com/go/auth/internal/compute
cloud.google.com/go/auth/internal/credsfile
cloud.google.com/go/auth/internal/jwt
+cloud.google.com/go/auth/internal/retry
cloud.google.com/go/auth/internal/transport
cloud.google.com/go/auth/internal/transport/cert
+cloud.google.com/go/auth/internal/transport/headers
+cloud.google.com/go/auth/internal/trustboundary
# cloud.google.com/go/auth/oauth2adapt v0.2.8
## explicit; go 1.23.0
cloud.google.com/go/auth/oauth2adapt
@@ -46,7 +49,7 @@ cloud.google.com/go/storage/experimental
cloud.google.com/go/storage/internal
cloud.google.com/go/storage/internal/apiv2
cloud.google.com/go/storage/internal/apiv2/storagepb
-# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
+# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
## explicit; go 1.23.0
github.com/Azure/azure-sdk-for-go/sdk/azcore
github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource
@@ -68,11 +71,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
github.com/Azure/azure-sdk-for-go/sdk/azcore/to
github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
-# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
+# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
## explicit; go 1.23.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity
github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal
-# github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1
+# github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2
## explicit; go 1.23.0
github.com/Azure/azure-sdk-for-go/sdk/internal/diag
github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
@@ -96,7 +99,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service
-# github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2
+# github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0
## explicit; go 1.18
github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache
github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential
@@ -165,8 +168,8 @@ github.com/armon/go-metrics
# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
## explicit; go 1.13
github.com/asaskevich/govalidator
-# github.com/aws/aws-sdk-go-v2 v1.39.2
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2 v1.39.6
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/aws
github.com/aws/aws-sdk-go-v2/aws/defaults
github.com/aws/aws-sdk-go-v2/aws/middleware
@@ -192,11 +195,11 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults
github.com/aws/aws-sdk-go-v2/internal/strings
github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
github.com/aws/aws-sdk-go-v2/internal/timeconv
-# github.com/aws/aws-sdk-go-v2/config v1.31.12
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/config v1.31.17
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/config
-# github.com/aws/aws-sdk-go-v2/credentials v1.18.16
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/credentials v1.18.21
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/credentials
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
github.com/aws/aws-sdk-go-v2/credentials/endpointcreds
@@ -204,18 +207,18 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client
github.com/aws/aws-sdk-go-v2/credentials/processcreds
github.com/aws/aws-sdk-go-v2/credentials/ssocreds
github.com/aws/aws-sdk-go-v2/credentials/stscreds
-# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
-# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/internal/configsources
-# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
-# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/internal/ini
# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1
## explicit; go 1.22
@@ -223,37 +226,37 @@ github.com/aws/aws-sdk-go-v2/service/dynamodb
github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations
github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/dynamodb/types
-# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding
# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6
## explicit; go 1.22
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery
-# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
# github.com/aws/aws-sdk-go-v2/service/sns v1.38.5
## explicit; go 1.22
github.com/aws/aws-sdk-go-v2/service/sns
github.com/aws/aws-sdk-go-v2/service/sns/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/sns/types
-# github.com/aws/aws-sdk-go-v2/service/sso v1.29.6
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/service/sso v1.30.1
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/service/sso
github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/sso/types
-# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/service/ssooidc
github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/ssooidc/types
-# github.com/aws/aws-sdk-go-v2/service/sts v1.38.6
-## explicit; go 1.22
+# github.com/aws/aws-sdk-go-v2/service/sts v1.39.1
+## explicit; go 1.23
github.com/aws/aws-sdk-go-v2/service/sts
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/sts/types
-# github.com/aws/smithy-go v1.23.0
-## explicit; go 1.22
+# github.com/aws/smithy-go v1.23.2
+## explicit; go 1.23
github.com/aws/smithy-go
github.com/aws/smithy-go/auth
github.com/aws/smithy-go/auth/bearer
@@ -299,7 +302,7 @@ github.com/caio/go-tdigest
# github.com/cenkalti/backoff/v4 v4.3.0
## explicit; go 1.18
github.com/cenkalti/backoff/v4
-# github.com/cenkalti/backoff/v5 v5.0.2
+# github.com/cenkalti/backoff/v5 v5.0.3
## explicit; go 1.23
github.com/cenkalti/backoff/v5
# github.com/cespare/xxhash/v2 v2.3.0
@@ -565,8 +568,8 @@ github.com/gogo/protobuf/types
# github.com/gogo/status v1.1.1
## explicit; go 1.12
github.com/gogo/status
-# github.com/golang-jwt/jwt/v5 v5.2.2
-## explicit; go 1.18
+# github.com/golang-jwt/jwt/v5 v5.3.0
+## explicit; go 1.21
github.com/golang-jwt/jwt/v5
# github.com/golang-migrate/migrate/v4 v4.18.1
## explicit; go 1.22.0
@@ -601,8 +604,8 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a
-## explicit; go 1.23.0
+# github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8
+## explicit; go 1.24.0
github.com/google/pprof/profile
# github.com/google/s2a-go v0.1.9
## explicit; go 1.20
@@ -634,7 +637,7 @@ github.com/google/uuid
## explicit; go 1.23.0
github.com/googleapis/enterprise-certificate-proxy/client
github.com/googleapis/enterprise-certificate-proxy/client/util
-# github.com/googleapis/gax-go/v2 v2.14.2
+# github.com/googleapis/gax-go/v2 v2.15.0
## explicit; go 1.23.0
github.com/googleapis/gax-go/v2
github.com/googleapis/gax-go/v2/apierror
@@ -648,7 +651,7 @@ github.com/googleapis/gax-go/v2/iterator
# github.com/gorilla/mux v1.8.1
## explicit; go 1.20
github.com/gorilla/mux
-# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
+# github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
## explicit; go 1.21
github.com/grafana/regexp
github.com/grafana/regexp/syntax
@@ -660,7 +663,7 @@ github.com/grpc-ecosystem/go-grpc-middleware
github.com/grpc-ecosystem/go-grpc-middleware/v2
github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors
github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2
## explicit; go 1.23.0
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
@@ -760,7 +763,7 @@ github.com/knadh/koanf/maps
# github.com/knadh/koanf/providers/confmap v1.0.0
## explicit; go 1.23.0
github.com/knadh/koanf/providers/confmap
-# github.com/knadh/koanf/v2 v2.2.1
+# github.com/knadh/koanf/v2 v2.3.0
## explicit; go 1.23.0
github.com/knadh/koanf/v2
# github.com/kylelemons/godebug v1.1.0
@@ -796,7 +799,7 @@ github.com/mdlayher/vsock
# github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a
## explicit; go 1.14
github.com/metalmatze/signal/server/signalhttp
-# github.com/miekg/dns v1.1.66
+# github.com/miekg/dns v1.1.68
## explicit; go 1.23.0
github.com/miekg/dns
# github.com/minio/crc64nvme v1.1.1
@@ -842,7 +845,7 @@ github.com/mitchellh/reflectwalk
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
## explicit
github.com/modern-go/concurrent
-# github.com/modern-go/reflect2 v1.0.2
+# github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee
## explicit; go 1.12
github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
@@ -863,14 +866,14 @@ github.com/oklog/ulid
# github.com/oklog/ulid/v2 v2.1.1
## explicit; go 1.15
github.com/oklog/ulid/v2
-# github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0
-## explicit; go 1.23.0
+# github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0
+## explicit; go 1.24.0
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity
-# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0
-## explicit; go 1.23.0
+# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0
+## explicit; go 1.24.0
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil
-# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0
-## explicit; go 1.23.0
+# github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0
+## explicit; go 1.24.0
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/expo
@@ -1049,7 +1052,7 @@ github.com/prometheus/client_golang/exp/internal/github.com/efficientgo/core/bac
# github.com/prometheus/client_model v0.6.2
## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.67.1
+# github.com/prometheus/common v0.67.4
## explicit; go 1.24.0
github.com/prometheus/common/config
github.com/prometheus/common/expfmt
@@ -1058,10 +1061,10 @@ github.com/prometheus/common/model
github.com/prometheus/common/promslog
github.com/prometheus/common/route
github.com/prometheus/common/version
-# github.com/prometheus/exporter-toolkit v0.14.1
-## explicit; go 1.23.0
+# github.com/prometheus/exporter-toolkit v0.15.0
+## explicit; go 1.24.0
github.com/prometheus/exporter-toolkit/web
-# github.com/prometheus/otlptranslator v0.0.2 => github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
+# github.com/prometheus/otlptranslator v1.0.0 => github.com/prometheus/otlptranslator v1.0.0
## explicit; go 1.23.0
github.com/prometheus/otlptranslator
# github.com/prometheus/procfs v0.16.1
@@ -1069,8 +1072,8 @@ github.com/prometheus/otlptranslator
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v0.306.0
-## explicit; go 1.23.0
+# github.com/prometheus/prometheus v0.308.0
+## explicit; go 1.24.0
github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery
github.com/prometheus/prometheus/discovery/dns
@@ -1123,6 +1126,7 @@ github.com/prometheus/prometheus/util/gate
github.com/prometheus/prometheus/util/httputil
github.com/prometheus/prometheus/util/jsonutil
github.com/prometheus/prometheus/util/logging
+github.com/prometheus/prometheus/util/namevalidationutil
github.com/prometheus/prometheus/util/notifications
github.com/prometheus/prometheus/util/osutil
github.com/prometheus/prometheus/util/pool
@@ -1132,8 +1136,8 @@ github.com/prometheus/prometheus/util/teststorage
github.com/prometheus/prometheus/util/testutil
github.com/prometheus/prometheus/util/zeropool
github.com/prometheus/prometheus/web/api/v1
-# github.com/prometheus/sigv4 v0.2.1
-## explicit; go 1.23.0
+# github.com/prometheus/sigv4 v0.3.0
+## explicit; go 1.24.0
github.com/prometheus/sigv4
# github.com/puzpuzpuz/xsync/v3 v3.5.1
## explicit; go 1.18
@@ -1206,7 +1210,7 @@ github.com/thanos-io/objstore/providers/gcs
github.com/thanos-io/objstore/providers/s3
github.com/thanos-io/objstore/providers/swift
github.com/thanos-io/objstore/tracing/opentracing
-# github.com/thanos-io/promql-engine v0.0.0-20251224085502-3988aa4704b5
+# github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674
## explicit; go 1.24.0
github.com/thanos-io/promql-engine/api
github.com/thanos-io/promql-engine/compute
@@ -1231,7 +1235,7 @@ github.com/thanos-io/promql-engine/ringbuffer
github.com/thanos-io/promql-engine/storage
github.com/thanos-io/promql-engine/storage/prometheus
github.com/thanos-io/promql-engine/warnings
-# github.com/thanos-io/thanos v0.40.1-0.20260112164636-49dde505913b
+# github.com/thanos-io/thanos v0.40.1-0.20260112164636-49dde505913b => github.com/yeya24/thanos v0.2.2-0.20260203193035-ba37115033af
## explicit; go 1.25.0
github.com/thanos-io/thanos/pkg/api/query/querypb
github.com/thanos-io/thanos/pkg/block
@@ -1412,63 +1416,48 @@ go.opencensus.io/trace/tracestate
## explicit; go 1.24.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
-# go.opentelemetry.io/collector/component v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/component v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/component
-# go.opentelemetry.io/collector/confmap v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/confmap v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/confmap
+go.opentelemetry.io/collector/confmap/internal
go.opentelemetry.io/collector/confmap/internal/mapstructure
go.opentelemetry.io/collector/confmap/internal/third_party/composehook
-# go.opentelemetry.io/collector/confmap/xconfmap v0.129.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/confmap/xconfmap v0.139.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/confmap/xconfmap
-# go.opentelemetry.io/collector/consumer v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/consumer v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/consumer
go.opentelemetry.io/collector/consumer/internal
-# go.opentelemetry.io/collector/featuregate v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/featuregate v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/featuregate
-# go.opentelemetry.io/collector/internal/telemetry v0.129.0
-## explicit; go 1.23.0
-go.opentelemetry.io/collector/internal/telemetry
-go.opentelemetry.io/collector/internal/telemetry/componentattribute
-# go.opentelemetry.io/collector/pdata v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/pdata v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/pdata/internal
-go.opentelemetry.io/collector/pdata/internal/data
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development
-go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development
-go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1
-go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1
go.opentelemetry.io/collector/pdata/internal/json
+go.opentelemetry.io/collector/pdata/internal/otelgrpc
go.opentelemetry.io/collector/pdata/internal/otlp
+go.opentelemetry.io/collector/pdata/internal/proto
go.opentelemetry.io/collector/pdata/pcommon
go.opentelemetry.io/collector/pdata/plog
go.opentelemetry.io/collector/pdata/pmetric
go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp
go.opentelemetry.io/collector/pdata/ptrace
-# go.opentelemetry.io/collector/pipeline v0.129.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/pipeline v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/pipeline
go.opentelemetry.io/collector/pipeline/internal/globalsignal
-# go.opentelemetry.io/collector/processor v1.35.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/collector/processor v1.45.0
+## explicit; go 1.24.0
go.opentelemetry.io/collector/processor
go.opentelemetry.io/collector/processor/internal
# go.opentelemetry.io/collector/semconv v0.128.0
## explicit; go 1.23.0
go.opentelemetry.io/collector/semconv/v1.6.1
-# go.opentelemetry.io/contrib/bridges/otelzap v0.11.0
-## explicit; go 1.23.0
-go.opentelemetry.io/contrib/bridges/otelzap
# go.opentelemetry.io/contrib/detectors/gcp v1.38.0
## explicit; go 1.23.8
go.opentelemetry.io/contrib/detectors/gcp
@@ -1476,17 +1465,15 @@ go.opentelemetry.io/contrib/detectors/gcp
## explicit; go 1.23.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
-# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0
+# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0
## explicit; go 1.23.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
## explicit; go 1.23.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
# go.opentelemetry.io/contrib/propagators/autoprop v0.61.0
## explicit; go 1.23.0
go.opentelemetry.io/contrib/propagators/autoprop
@@ -1513,34 +1500,27 @@ go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.17.0
-go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.21.0
go.opentelemetry.io/otel/semconv/v1.24.0
-go.opentelemetry.io/otel/semconv/v1.26.0
go.opentelemetry.io/otel/semconv/v1.30.0
go.opentelemetry.io/otel/semconv/v1.37.0
+go.opentelemetry.io/otel/semconv/v1.37.0/httpconv
go.opentelemetry.io/otel/semconv/v1.37.0/otelconv
# go.opentelemetry.io/otel/bridge/opentracing v1.36.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/bridge/opentracing
go.opentelemetry.io/otel/bridge/opentracing/migration
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/log v0.12.2
-## explicit; go 1.23.0
-go.opentelemetry.io/otel/log
-go.opentelemetry.io/otel/log/embedded
-go.opentelemetry.io/otel/log/global
-go.opentelemetry.io/otel/log/internal/global
# go.opentelemetry.io/otel/metric v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/metric
@@ -1570,7 +1550,7 @@ go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
go.opentelemetry.io/otel/trace/noop
-# go.opentelemetry.io/proto/otlp v1.7.0
+# go.opentelemetry.io/proto/otlp v1.7.1
## explicit; go 1.23.0
go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
@@ -1631,7 +1611,7 @@ golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
-# golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
+# golang.org/x/exp v0.0.0-20250808145144-a408d31f581a
## explicit; go 1.23.0
golang.org/x/exp/constraints
golang.org/x/exp/slices
@@ -1694,8 +1674,8 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.12.0
-## explicit; go 1.23.0
+# golang.org/x/time v0.13.0
+## explicit; go 1.24.0
golang.org/x/time/rate
# golang.org/x/tools v0.39.0
## explicit; go 1.24.0
@@ -1723,8 +1703,8 @@ golang.org/x/tools/internal/versions
gonum.org/v1/gonum/floats
gonum.org/v1/gonum/floats/scalar
gonum.org/v1/gonum/internal/asm/f64
-# google.golang.org/api v0.239.0
-## explicit; go 1.23.0
+# google.golang.org/api v0.252.0
+## explicit; go 1.24.0
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
google.golang.org/api/iamcredentials/v1
@@ -1740,7 +1720,7 @@ google.golang.org/api/storage/v1
google.golang.org/api/transport
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
-# google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2
+# google.golang.org/genproto v0.0.0-20250603155806-513f23925822
## explicit; go 1.23.0
google.golang.org/genproto/googleapis/type/calendarperiod
google.golang.org/genproto/googleapis/type/date
@@ -1962,10 +1942,11 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/apimachinery v0.33.1
+# k8s.io/apimachinery v0.34.1
## explicit; go 1.24.0
k8s.io/apimachinery/pkg/util/runtime
-# k8s.io/client-go v0.33.1
+k8s.io/apimachinery/pkg/util/sets
+# k8s.io/client-go v0.34.1
## explicit; go 1.24.0
k8s.io/client-go/tools/metrics
k8s.io/client-go/util/workqueue
@@ -1981,9 +1962,6 @@ k8s.io/klog/v2/internal/sloghandler
# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
## explicit; go 1.18
k8s.io/utils/clock
-# sigs.k8s.io/yaml v1.4.0
-## explicit; go 1.12
-sigs.k8s.io/yaml/goyaml.v3
# github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f
# git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
# github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
@@ -1994,4 +1972,5 @@ sigs.k8s.io/yaml/goyaml.v3
# github.com/google/gnostic => github.com/googleapis/gnostic v0.6.9
# gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497
# google.golang.org/grpc => google.golang.org/grpc v1.71.2
-# github.com/prometheus/otlptranslator => github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
+# github.com/prometheus/otlptranslator => github.com/prometheus/otlptranslator v1.0.0
+# github.com/thanos-io/thanos => github.com/yeya24/thanos v0.2.2-0.20260203193035-ba37115033af
diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
deleted file mode 100644
index 093d6d3edf3..00000000000
--- a/vendor/sigs.k8s.io/yaml/LICENSE
+++ /dev/null
@@ -1,306 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Sam Ghods
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# The forked go-yaml.v3 library under this project is covered by two
-different licenses (MIT and Apache):
-
-#### MIT License ####
-
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original MIT license, with the additional
-copyright staring in 2011 when the project was ported over:
-
- apic.go emitterc.go parserc.go readerc.go scannerc.go
- writerc.go yamlh.go yamlprivateh.go
-
-Copyright (c) 2006-2010 Kirill Simonov
-Copyright (c) 2006-2011 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-### Apache License ###
-
-All the remaining project files are covered by the Apache license:
-
-Copyright (c) 2011-2019 Canonical Ltd
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-# The forked go-yaml.v2 library under the project is covered by an
-Apache license:
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE
deleted file mode 100644
index 2683e4bb1f2..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/LICENSE
+++ /dev/null
@@ -1,50 +0,0 @@
-
-This project is covered by two different licenses: MIT and Apache.
-
-#### MIT License ####
-
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original MIT license, with the additional
-copyright staring in 2011 when the project was ported over:
-
- apic.go emitterc.go parserc.go readerc.go scannerc.go
- writerc.go yamlh.go yamlprivateh.go
-
-Copyright (c) 2006-2010 Kirill Simonov
-Copyright (c) 2006-2011 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-### Apache License ###
-
-All the remaining project files are covered by the Apache license:
-
-Copyright (c) 2011-2019 Canonical Ltd
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS
deleted file mode 100644
index 73be0a3a9bd..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS
+++ /dev/null
@@ -1,24 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- jpbetz
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- natasha41575
-- knverey
-reviewers:
-- dims
-- thockin
-- jpbetz
-- smarterclayton
-- deads2k
-- derekwaynecarr
-- mikedanese
-- liggitt
-- sttts
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md
deleted file mode 100644
index b1a6b2e9e27..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/README.md
+++ /dev/null
@@ -1,160 +0,0 @@
-# go-yaml fork
-
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
-
-This fork is based on v3.0.1: https://github.com/go-yaml/yaml/releases/tag/v3.0.1.
-
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.2, but preserves some behavior
-from 1.1 for backwards compatibility.
-
-Specifically, as of v3 of the yaml package:
-
- - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
- decoded into a typed bool value. Otherwise they behave as a string. Booleans
- in YAML 1.2 are _true/false_ only.
- - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
- as specified in YAML 1.2, because most parsers still use the old format.
- Octals in the _0o777_ format are supported though, so new files work.
- - Does not support base-60 floats. These are gone from YAML 1.2, and were
- actually never supported by this package as it's clearly a poor choice.
-
-and offers backwards
-compatibility with YAML 1.1 in some cases.
-1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v3*.
-
-To install it, run:
-
- go get gopkg.in/yaml.v3
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
- - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
-
-API stability
--------------
-
-The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the MIT and Apache License 2.0 licenses.
-Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
- "fmt"
- "log"
-
- "gopkg.in/yaml.v3"
-)
-
-var data = `
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
- A string
- B struct {
- RenamedC int `yaml:"c"`
- D []int `yaml:",flow"`
- }
-}
-
-func main() {
- t := T{}
-
- err := yaml.Unmarshal([]byte(data), &t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t:\n%v\n\n", t)
-
- d, err := yaml.Marshal(&t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t dump:\n%s\n\n", string(d))
-
- m := make(map[interface{}]interface{})
-
- err = yaml.Unmarshal([]byte(data), &m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m:\n%v\n\n", m)
-
- d, err = yaml.Marshal(&m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
- c: 2
- d:
- - 3
- - 4
-```
-
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go
deleted file mode 100644
index ae7d049f182..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/apic.go
+++ /dev/null
@@ -1,747 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "io"
-)
-
-func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
- //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
-
- // Check if we can move the queue at the beginning of the buffer.
- if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
- if parser.tokens_head != len(parser.tokens) {
- copy(parser.tokens, parser.tokens[parser.tokens_head:])
- }
- parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
- parser.tokens_head = 0
- }
- parser.tokens = append(parser.tokens, *token)
- if pos < 0 {
- return
- }
- copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
- parser.tokens[parser.tokens_head+pos] = *token
-}
-
-// Create a new parser object.
-func yaml_parser_initialize(parser *yaml_parser_t) bool {
- *parser = yaml_parser_t{
- raw_buffer: make([]byte, 0, input_raw_buffer_size),
- buffer: make([]byte, 0, input_buffer_size),
- }
- return true
-}
-
-// Destroy a parser object.
-func yaml_parser_delete(parser *yaml_parser_t) {
- *parser = yaml_parser_t{}
-}
-
-// String read handler.
-func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- if parser.input_pos == len(parser.input) {
- return 0, io.EOF
- }
- n = copy(buffer, parser.input[parser.input_pos:])
- parser.input_pos += n
- return n, nil
-}
-
-// Reader read handler.
-func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- return parser.input_reader.Read(buffer)
-}
-
-// Set a string input.
-func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_string_read_handler
- parser.input = input
- parser.input_pos = 0
-}
-
-// Set a file input.
-func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_reader_read_handler
- parser.input_reader = r
-}
-
-// Set the source encoding.
-func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
- if parser.encoding != yaml_ANY_ENCODING {
- panic("must set the encoding only once")
- }
- parser.encoding = encoding
-}
-
-// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) {
- *emitter = yaml_emitter_t{
- buffer: make([]byte, output_buffer_size),
- raw_buffer: make([]byte, 0, output_raw_buffer_size),
- states: make([]yaml_emitter_state_t, 0, initial_stack_size),
- events: make([]yaml_event_t, 0, initial_queue_size),
- best_width: -1,
- }
-}
-
-// Destroy an emitter object.
-func yaml_emitter_delete(emitter *yaml_emitter_t) {
- *emitter = yaml_emitter_t{}
-}
-
-// String write handler.
-func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
- return nil
-}
-
-// yaml_writer_write_handler uses emitter.output_writer to write the
-// emitted text.
-func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- _, err := emitter.output_writer.Write(buffer)
- return err
-}
-
-// Set a string output.
-func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_string_write_handler
- emitter.output_buffer = output_buffer
-}
-
-// Set a file output.
-func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_writer_write_handler
- emitter.output_writer = w
-}
-
-// Set the output encoding.
-func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
- if emitter.encoding != yaml_ANY_ENCODING {
- panic("must set the output encoding only once")
- }
- emitter.encoding = encoding
-}
-
-// Set the canonical output style.
-func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
- emitter.canonical = canonical
-}
-
-// Set the indentation increment.
-func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
- if indent < 2 || indent > 9 {
- indent = 2
- }
- emitter.best_indent = indent
-}
-
-// Set the preferred line width.
-func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
- if width < 0 {
- width = -1
- }
- emitter.best_width = width
-}
-
-// Set if unescaped non-ASCII characters are allowed.
-func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
- emitter.unicode = unicode
-}
-
-// Set the preferred line break character.
-func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
- emitter.line_break = line_break
-}
-
-///*
-// * Destroy a token object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_token_delete(yaml_token_t *token)
-//{
-// assert(token); // Non-NULL token object expected.
-//
-// switch (token.type)
-// {
-// case YAML_TAG_DIRECTIVE_TOKEN:
-// yaml_free(token.data.tag_directive.handle);
-// yaml_free(token.data.tag_directive.prefix);
-// break;
-//
-// case YAML_ALIAS_TOKEN:
-// yaml_free(token.data.alias.value);
-// break;
-//
-// case YAML_ANCHOR_TOKEN:
-// yaml_free(token.data.anchor.value);
-// break;
-//
-// case YAML_TAG_TOKEN:
-// yaml_free(token.data.tag.handle);
-// yaml_free(token.data.tag.suffix);
-// break;
-//
-// case YAML_SCALAR_TOKEN:
-// yaml_free(token.data.scalar.value);
-// break;
-//
-// default:
-// break;
-// }
-//
-// memset(token, 0, sizeof(yaml_token_t));
-//}
-//
-///*
-// * Check if a string is a valid UTF-8 sequence.
-// *
-// * Check 'reader.c' for more details on UTF-8 encoding.
-// */
-//
-//static int
-//yaml_check_utf8(yaml_char_t *start, size_t length)
-//{
-// yaml_char_t *end = start+length;
-// yaml_char_t *pointer = start;
-//
-// while (pointer < end) {
-// unsigned char octet;
-// unsigned int width;
-// unsigned int value;
-// size_t k;
-//
-// octet = pointer[0];
-// width = (octet & 0x80) == 0x00 ? 1 :
-// (octet & 0xE0) == 0xC0 ? 2 :
-// (octet & 0xF0) == 0xE0 ? 3 :
-// (octet & 0xF8) == 0xF0 ? 4 : 0;
-// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
-// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
-// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
-// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
-// if (!width) return 0;
-// if (pointer+width > end) return 0;
-// for (k = 1; k < width; k ++) {
-// octet = pointer[k];
-// if ((octet & 0xC0) != 0x80) return 0;
-// value = (value << 6) + (octet & 0x3F);
-// }
-// if (!((width == 1) ||
-// (width == 2 && value >= 0x80) ||
-// (width == 3 && value >= 0x800) ||
-// (width == 4 && value >= 0x10000))) return 0;
-//
-// pointer += width;
-// }
-//
-// return 1;
-//}
-//
-
-// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- encoding: encoding,
- }
-}
-
-// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) {
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- }
-}
-
-// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(
- event *yaml_event_t,
- version_directive *yaml_version_directive_t,
- tag_directives []yaml_tag_directive_t,
- implicit bool,
-) {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: implicit,
- }
-}
-
-// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- implicit: implicit,
- }
-}
-
-// Create ALIAS.
-func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
- *event = yaml_event_t{
- typ: yaml_ALIAS_EVENT,
- anchor: anchor,
- }
- return true
-}
-
-// Create SCALAR.
-func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- anchor: anchor,
- tag: tag,
- value: value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-START.
-func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-END.
-func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- }
- return true
-}
-
-// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
-}
-
-// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) {
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- }
-}
-
-// Destroy an event object.
-func yaml_event_delete(event *yaml_event_t) {
- *event = yaml_event_t{}
-}
-
-///*
-// * Create a document object.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_initialize(document *yaml_document_t,
-// version_directive *yaml_version_directive_t,
-// tag_directives_start *yaml_tag_directive_t,
-// tag_directives_end *yaml_tag_directive_t,
-// start_implicit int, end_implicit int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// struct {
-// start *yaml_node_t
-// end *yaml_node_t
-// top *yaml_node_t
-// } nodes = { NULL, NULL, NULL }
-// version_directive_copy *yaml_version_directive_t = NULL
-// struct {
-// start *yaml_tag_directive_t
-// end *yaml_tag_directive_t
-// top *yaml_tag_directive_t
-// } tag_directives_copy = { NULL, NULL, NULL }
-// value yaml_tag_directive_t = { NULL, NULL }
-// mark yaml_mark_t = { 0, 0, 0 }
-//
-// assert(document) // Non-NULL document object is expected.
-// assert((tag_directives_start && tag_directives_end) ||
-// (tag_directives_start == tag_directives_end))
-// // Valid tag directives are expected.
-//
-// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
-//
-// if (version_directive) {
-// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
-// if (!version_directive_copy) goto error
-// version_directive_copy.major = version_directive.major
-// version_directive_copy.minor = version_directive.minor
-// }
-//
-// if (tag_directives_start != tag_directives_end) {
-// tag_directive *yaml_tag_directive_t
-// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
-// goto error
-// for (tag_directive = tag_directives_start
-// tag_directive != tag_directives_end; tag_directive ++) {
-// assert(tag_directive.handle)
-// assert(tag_directive.prefix)
-// if (!yaml_check_utf8(tag_directive.handle,
-// strlen((char *)tag_directive.handle)))
-// goto error
-// if (!yaml_check_utf8(tag_directive.prefix,
-// strlen((char *)tag_directive.prefix)))
-// goto error
-// value.handle = yaml_strdup(tag_directive.handle)
-// value.prefix = yaml_strdup(tag_directive.prefix)
-// if (!value.handle || !value.prefix) goto error
-// if (!PUSH(&context, tag_directives_copy, value))
-// goto error
-// value.handle = NULL
-// value.prefix = NULL
-// }
-// }
-//
-// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
-// tag_directives_copy.start, tag_directives_copy.top,
-// start_implicit, end_implicit, mark, mark)
-//
-// return 1
-//
-//error:
-// STACK_DEL(&context, nodes)
-// yaml_free(version_directive_copy)
-// while (!STACK_EMPTY(&context, tag_directives_copy)) {
-// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-// }
-// STACK_DEL(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-//
-// return 0
-//}
-//
-///*
-// * Destroy a document object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_document_delete(document *yaml_document_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// tag_directive *yaml_tag_directive_t
-//
-// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// while (!STACK_EMPTY(&context, document.nodes)) {
-// node yaml_node_t = POP(&context, document.nodes)
-// yaml_free(node.tag)
-// switch (node.type) {
-// case YAML_SCALAR_NODE:
-// yaml_free(node.data.scalar.value)
-// break
-// case YAML_SEQUENCE_NODE:
-// STACK_DEL(&context, node.data.sequence.items)
-// break
-// case YAML_MAPPING_NODE:
-// STACK_DEL(&context, node.data.mapping.pairs)
-// break
-// default:
-// assert(0) // Should not happen.
-// }
-// }
-// STACK_DEL(&context, document.nodes)
-//
-// yaml_free(document.version_directive)
-// for (tag_directive = document.tag_directives.start
-// tag_directive != document.tag_directives.end
-// tag_directive++) {
-// yaml_free(tag_directive.handle)
-// yaml_free(tag_directive.prefix)
-// }
-// yaml_free(document.tag_directives.start)
-//
-// memset(document, 0, sizeof(yaml_document_t))
-//}
-//
-///**
-// * Get a document node.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_node(document *yaml_document_t, index int)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
-// return document.nodes.start + index - 1
-// }
-// return NULL
-//}
-//
-///**
-// * Get the root object.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_root_node(document *yaml_document_t)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (document.nodes.top != document.nodes.start) {
-// return document.nodes.start
-// }
-// return NULL
-//}
-//
-///*
-// * Add a scalar node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_scalar(document *yaml_document_t,
-// tag *yaml_char_t, value *yaml_char_t, length int,
-// style yaml_scalar_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// value_copy *yaml_char_t = NULL
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-// assert(value) // Non-NULL value is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (length < 0) {
-// length = strlen((char *)value)
-// }
-//
-// if (!yaml_check_utf8(value, length)) goto error
-// value_copy = yaml_malloc(length+1)
-// if (!value_copy) goto error
-// memcpy(value_copy, value, length)
-// value_copy[length] = '\0'
-//
-// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// yaml_free(tag_copy)
-// yaml_free(value_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a sequence node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_sequence(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_sequence_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_item_t
-// end *yaml_node_item_t
-// top *yaml_node_item_t
-// } items = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
-//
-// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, items)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a mapping node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_mapping(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_mapping_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_pair_t
-// end *yaml_node_pair_t
-// top *yaml_node_pair_t
-// } pairs = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
-//
-// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, pairs)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Append an item to a sequence node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_sequence_item(document *yaml_document_t,
-// sequence int, item int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// assert(document) // Non-NULL document is required.
-// assert(sequence > 0
-// && document.nodes.start + sequence <= document.nodes.top)
-// // Valid sequence id is required.
-// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
-// // A sequence node is required.
-// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
-// // Valid item id is required.
-//
-// if (!PUSH(&context,
-// document.nodes.start[sequence-1].data.sequence.items, item))
-// return 0
-//
-// return 1
-//}
-//
-///*
-// * Append a pair of a key and a value to a mapping node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_mapping_pair(document *yaml_document_t,
-// mapping int, key int, value int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// pair yaml_node_pair_t
-//
-// assert(document) // Non-NULL document is required.
-// assert(mapping > 0
-// && document.nodes.start + mapping <= document.nodes.top)
-// // Valid mapping id is required.
-// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
-// // A mapping node is required.
-// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
-// // Valid key id is required.
-// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
-// // Valid value id is required.
-//
-// pair.key = key
-// pair.value = value
-//
-// if (!PUSH(&context,
-// document.nodes.start[mapping-1].data.mapping.pairs, pair))
-// return 0
-//
-// return 1
-//}
-//
-//
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go
deleted file mode 100644
index 0173b6982e8..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/decode.go
+++ /dev/null
@@ -1,1000 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
- "encoding"
- "encoding/base64"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "time"
-)
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
- parser yaml_parser_t
- event yaml_event_t
- doc *Node
- anchors map[string]*Node
- doneInit bool
- textless bool
-}
-
-func newParser(b []byte) *parser {
- p := parser{}
- if !yaml_parser_initialize(&p.parser) {
- panic("failed to initialize YAML emitter")
- }
- if len(b) == 0 {
- b = []byte{'\n'}
- }
- yaml_parser_set_input_string(&p.parser, b)
- return &p
-}
-
-func newParserFromReader(r io.Reader) *parser {
- p := parser{}
- if !yaml_parser_initialize(&p.parser) {
- panic("failed to initialize YAML emitter")
- }
- yaml_parser_set_input_reader(&p.parser, r)
- return &p
-}
-
-func (p *parser) init() {
- if p.doneInit {
- return
- }
- p.anchors = make(map[string]*Node)
- p.expect(yaml_STREAM_START_EVENT)
- p.doneInit = true
-}
-
-func (p *parser) destroy() {
- if p.event.typ != yaml_NO_EVENT {
- yaml_event_delete(&p.event)
- }
- yaml_parser_delete(&p.parser)
-}
-
-// expect consumes an event from the event stream and
-// checks that it's of the expected type.
-func (p *parser) expect(e yaml_event_type_t) {
- if p.event.typ == yaml_NO_EVENT {
- if !yaml_parser_parse(&p.parser, &p.event) {
- p.fail()
- }
- }
- if p.event.typ == yaml_STREAM_END_EVENT {
- failf("attempted to go past the end of stream; corrupted value?")
- }
- if p.event.typ != e {
- p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
- p.fail()
- }
- yaml_event_delete(&p.event)
- p.event.typ = yaml_NO_EVENT
-}
-
-// peek peeks at the next event in the event stream,
-// puts the results into p.event and returns the event type.
-func (p *parser) peek() yaml_event_type_t {
- if p.event.typ != yaml_NO_EVENT {
- return p.event.typ
- }
- // It's curious choice from the underlying API to generally return a
- // positive result on success, but on this case return true in an error
- // scenario. This was the source of bugs in the past (issue #666).
- if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
- p.fail()
- }
- return p.event.typ
-}
-
-func (p *parser) fail() {
- var where string
- var line int
- if p.parser.context_mark.line != 0 {
- line = p.parser.context_mark.line
- // Scanner errors don't iterate line before returning error
- if p.parser.error == yaml_SCANNER_ERROR {
- line++
- }
- } else if p.parser.problem_mark.line != 0 {
- line = p.parser.problem_mark.line
- // Scanner errors don't iterate line before returning error
- if p.parser.error == yaml_SCANNER_ERROR {
- line++
- }
- }
- if line != 0 {
- where = "line " + strconv.Itoa(line) + ": "
- }
- var msg string
- if len(p.parser.problem) > 0 {
- msg = p.parser.problem
- } else {
- msg = "unknown problem parsing YAML content"
- }
- failf("%s%s", where, msg)
-}
-
-func (p *parser) anchor(n *Node, anchor []byte) {
- if anchor != nil {
- n.Anchor = string(anchor)
- p.anchors[n.Anchor] = n
- }
-}
-
-func (p *parser) parse() *Node {
- p.init()
- switch p.peek() {
- case yaml_SCALAR_EVENT:
- return p.scalar()
- case yaml_ALIAS_EVENT:
- return p.alias()
- case yaml_MAPPING_START_EVENT:
- return p.mapping()
- case yaml_SEQUENCE_START_EVENT:
- return p.sequence()
- case yaml_DOCUMENT_START_EVENT:
- return p.document()
- case yaml_STREAM_END_EVENT:
- // Happens when attempting to decode an empty buffer.
- return nil
- case yaml_TAIL_COMMENT_EVENT:
- panic("internal error: unexpected tail comment event (please report)")
- default:
- panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
- }
-}
-
-func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
- var style Style
- if tag != "" && tag != "!" {
- tag = shortTag(tag)
- style = TaggedStyle
- } else if defaultTag != "" {
- tag = defaultTag
- } else if kind == ScalarNode {
- tag, _ = resolve("", value)
- }
- n := &Node{
- Kind: kind,
- Tag: tag,
- Value: value,
- Style: style,
- }
- if !p.textless {
- n.Line = p.event.start_mark.line + 1
- n.Column = p.event.start_mark.column + 1
- n.HeadComment = string(p.event.head_comment)
- n.LineComment = string(p.event.line_comment)
- n.FootComment = string(p.event.foot_comment)
- }
- return n
-}
-
-func (p *parser) parseChild(parent *Node) *Node {
- child := p.parse()
- parent.Content = append(parent.Content, child)
- return child
-}
-
-func (p *parser) document() *Node {
- n := p.node(DocumentNode, "", "", "")
- p.doc = n
- p.expect(yaml_DOCUMENT_START_EVENT)
- p.parseChild(n)
- if p.peek() == yaml_DOCUMENT_END_EVENT {
- n.FootComment = string(p.event.foot_comment)
- }
- p.expect(yaml_DOCUMENT_END_EVENT)
- return n
-}
-
-func (p *parser) alias() *Node {
- n := p.node(AliasNode, "", "", string(p.event.anchor))
- n.Alias = p.anchors[n.Value]
- if n.Alias == nil {
- failf("unknown anchor '%s' referenced", n.Value)
- }
- p.expect(yaml_ALIAS_EVENT)
- return n
-}
-
-func (p *parser) scalar() *Node {
- var parsedStyle = p.event.scalar_style()
- var nodeStyle Style
- switch {
- case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
- nodeStyle = DoubleQuotedStyle
- case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
- nodeStyle = SingleQuotedStyle
- case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
- nodeStyle = LiteralStyle
- case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
- nodeStyle = FoldedStyle
- }
- var nodeValue = string(p.event.value)
- var nodeTag = string(p.event.tag)
- var defaultTag string
- if nodeStyle == 0 {
- if nodeValue == "<<" {
- defaultTag = mergeTag
- }
- } else {
- defaultTag = strTag
- }
- n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
- n.Style |= nodeStyle
- p.anchor(n, p.event.anchor)
- p.expect(yaml_SCALAR_EVENT)
- return n
-}
-
-func (p *parser) sequence() *Node {
- n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
- if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
- n.Style |= FlowStyle
- }
- p.anchor(n, p.event.anchor)
- p.expect(yaml_SEQUENCE_START_EVENT)
- for p.peek() != yaml_SEQUENCE_END_EVENT {
- p.parseChild(n)
- }
- n.LineComment = string(p.event.line_comment)
- n.FootComment = string(p.event.foot_comment)
- p.expect(yaml_SEQUENCE_END_EVENT)
- return n
-}
-
-func (p *parser) mapping() *Node {
- n := p.node(MappingNode, mapTag, string(p.event.tag), "")
- block := true
- if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
- block = false
- n.Style |= FlowStyle
- }
- p.anchor(n, p.event.anchor)
- p.expect(yaml_MAPPING_START_EVENT)
- for p.peek() != yaml_MAPPING_END_EVENT {
- k := p.parseChild(n)
- if block && k.FootComment != "" {
- // Must be a foot comment for the prior value when being dedented.
- if len(n.Content) > 2 {
- n.Content[len(n.Content)-3].FootComment = k.FootComment
- k.FootComment = ""
- }
- }
- v := p.parseChild(n)
- if k.FootComment == "" && v.FootComment != "" {
- k.FootComment = v.FootComment
- v.FootComment = ""
- }
- if p.peek() == yaml_TAIL_COMMENT_EVENT {
- if k.FootComment == "" {
- k.FootComment = string(p.event.foot_comment)
- }
- p.expect(yaml_TAIL_COMMENT_EVENT)
- }
- }
- n.LineComment = string(p.event.line_comment)
- n.FootComment = string(p.event.foot_comment)
- if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
- n.Content[len(n.Content)-2].FootComment = n.FootComment
- n.FootComment = ""
- }
- p.expect(yaml_MAPPING_END_EVENT)
- return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
- doc *Node
- aliases map[*Node]bool
- terrors []string
-
- stringMapType reflect.Type
- generalMapType reflect.Type
-
- knownFields bool
- uniqueKeys bool
- decodeCount int
- aliasCount int
- aliasDepth int
-
- mergedFields map[interface{}]bool
-}
-
-var (
- nodeType = reflect.TypeOf(Node{})
- durationType = reflect.TypeOf(time.Duration(0))
- stringMapType = reflect.TypeOf(map[string]interface{}{})
- generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
- ifaceType = generalMapType.Elem()
- timeType = reflect.TypeOf(time.Time{})
- ptrTimeType = reflect.TypeOf(&time.Time{})
-)
-
-func newDecoder() *decoder {
- d := &decoder{
- stringMapType: stringMapType,
- generalMapType: generalMapType,
- uniqueKeys: true,
- }
- d.aliases = make(map[*Node]bool)
- return d
-}
-
-func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
- if n.Tag != "" {
- tag = n.Tag
- }
- value := n.Value
- if tag != seqTag && tag != mapTag {
- if len(value) > 10 {
- value = " `" + value[:7] + "...`"
- } else {
- value = " `" + value + "`"
- }
- }
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
-}
-
-func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
- err := u.UnmarshalYAML(n)
- if e, ok := err.(*TypeError); ok {
- d.terrors = append(d.terrors, e.Errors...)
- return false
- }
- if err != nil {
- fail(err)
- }
- return true
-}
-
-func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
- terrlen := len(d.terrors)
- err := u.UnmarshalYAML(func(v interface{}) (err error) {
- defer handleErr(&err)
- d.unmarshal(n, reflect.ValueOf(v))
- if len(d.terrors) > terrlen {
- issues := d.terrors[terrlen:]
- d.terrors = d.terrors[:terrlen]
- return &TypeError{issues}
- }
- return nil
- })
- if e, ok := err.(*TypeError); ok {
- d.terrors = append(d.terrors, e.Errors...)
- return false
- }
- if err != nil {
- fail(err)
- }
- return true
-}
-
-// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
-// if a value is found to implement it.
-// It returns the initialized and dereferenced out value, whether
-// unmarshalling was already done by UnmarshalYAML, and if so whether
-// its types unmarshalled appropriately.
-//
-// If n holds a null value, prepare returns before doing anything.
-func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
- if n.ShortTag() == nullTag {
- return out, false, false
- }
- again := true
- for again {
- again = false
- if out.Kind() == reflect.Ptr {
- if out.IsNil() {
- out.Set(reflect.New(out.Type().Elem()))
- }
- out = out.Elem()
- again = true
- }
- if out.CanAddr() {
- outi := out.Addr().Interface()
- if u, ok := outi.(Unmarshaler); ok {
- good = d.callUnmarshaler(n, u)
- return out, true, good
- }
- if u, ok := outi.(obsoleteUnmarshaler); ok {
- good = d.callObsoleteUnmarshaler(n, u)
- return out, true, good
- }
- }
- }
- return out, false, false
-}
-
-func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
- if n.ShortTag() == nullTag {
- return reflect.Value{}
- }
- for _, num := range index {
- for {
- if v.Kind() == reflect.Ptr {
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- v = v.Elem()
- continue
- }
- break
- }
- v = v.Field(num)
- }
- return v
-}
-
-const (
- // 400,000 decode operations is ~500kb of dense object declarations, or
- // ~5kb of dense object declarations with 10000% alias expansion
- alias_ratio_range_low = 400000
-
- // 4,000,000 decode operations is ~5MB of dense object declarations, or
- // ~4.5MB of dense object declarations with 10% alias expansion
- alias_ratio_range_high = 4000000
-
- // alias_ratio_range is the range over which we scale allowed alias ratios
- alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
-)
-
-func allowedAliasRatio(decodeCount int) float64 {
- switch {
- case decodeCount <= alias_ratio_range_low:
- // allow 99% to come from alias expansion for small-to-medium documents
- return 0.99
- case decodeCount >= alias_ratio_range_high:
- // allow 10% to come from alias expansion for very large documents
- return 0.10
- default:
- // scale smoothly from 99% down to 10% over the range.
- // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
- // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
- return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
- }
-}
-
-func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
- d.decodeCount++
- if d.aliasDepth > 0 {
- d.aliasCount++
- }
- if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
- failf("document contains excessive aliasing")
- }
- if out.Type() == nodeType {
- out.Set(reflect.ValueOf(n).Elem())
- return true
- }
- switch n.Kind {
- case DocumentNode:
- return d.document(n, out)
- case AliasNode:
- return d.alias(n, out)
- }
- out, unmarshaled, good := d.prepare(n, out)
- if unmarshaled {
- return good
- }
- switch n.Kind {
- case ScalarNode:
- good = d.scalar(n, out)
- case MappingNode:
- good = d.mapping(n, out)
- case SequenceNode:
- good = d.sequence(n, out)
- case 0:
- if n.IsZero() {
- return d.null(out)
- }
- fallthrough
- default:
- failf("cannot decode node with unknown kind %d", n.Kind)
- }
- return good
-}
-
-func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
- if len(n.Content) == 1 {
- d.doc = n
- d.unmarshal(n.Content[0], out)
- return true
- }
- return false
-}
-
-func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
- if d.aliases[n] {
- // TODO this could actually be allowed in some circumstances.
- failf("anchor '%s' value contains itself", n.Value)
- }
- d.aliases[n] = true
- d.aliasDepth++
- good = d.unmarshal(n.Alias, out)
- d.aliasDepth--
- delete(d.aliases, n)
- return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
- for _, k := range out.MapKeys() {
- out.SetMapIndex(k, zeroValue)
- }
-}
-
-func (d *decoder) null(out reflect.Value) bool {
- if out.CanAddr() {
- switch out.Kind() {
- case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
- out.Set(reflect.Zero(out.Type()))
- return true
- }
- }
- return false
-}
-
-func (d *decoder) scalar(n *Node, out reflect.Value) bool {
- var tag string
- var resolved interface{}
- if n.indicatedString() {
- tag = strTag
- resolved = n.Value
- } else {
- tag, resolved = resolve(n.Tag, n.Value)
- if tag == binaryTag {
- data, err := base64.StdEncoding.DecodeString(resolved.(string))
- if err != nil {
- failf("!!binary value contains invalid base64 data")
- }
- resolved = string(data)
- }
- }
- if resolved == nil {
- return d.null(out)
- }
- if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
- // We've resolved to exactly the type we want, so use that.
- out.Set(resolvedv)
- return true
- }
- // Perhaps we can use the value as a TextUnmarshaler to
- // set its value.
- if out.CanAddr() {
- u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
- if ok {
- var text []byte
- if tag == binaryTag {
- text = []byte(resolved.(string))
- } else {
- // We let any value be unmarshaled into TextUnmarshaler.
- // That might be more lax than we'd like, but the
- // TextUnmarshaler itself should bowl out any dubious values.
- text = []byte(n.Value)
- }
- err := u.UnmarshalText(text)
- if err != nil {
- fail(err)
- }
- return true
- }
- }
- switch out.Kind() {
- case reflect.String:
- if tag == binaryTag {
- out.SetString(resolved.(string))
- return true
- }
- out.SetString(n.Value)
- return true
- case reflect.Interface:
- out.Set(reflect.ValueOf(resolved))
- return true
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- // This used to work in v2, but it's very unfriendly.
- isDuration := out.Type() == durationType
-
- switch resolved := resolved.(type) {
- case int:
- if !isDuration && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- return true
- }
- case int64:
- if !isDuration && !out.OverflowInt(resolved) {
- out.SetInt(resolved)
- return true
- }
- case uint64:
- if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- return true
- }
- case float64:
- if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- return true
- }
- case string:
- if out.Type() == durationType {
- d, err := time.ParseDuration(resolved)
- if err == nil {
- out.SetInt(int64(d))
- return true
- }
- }
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- switch resolved := resolved.(type) {
- case int:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- return true
- }
- case int64:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- return true
- }
- case uint64:
- if !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- return true
- }
- case float64:
- if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- return true
- }
- }
- case reflect.Bool:
- switch resolved := resolved.(type) {
- case bool:
- out.SetBool(resolved)
- return true
- case string:
- // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
- // It only works if explicitly attempting to unmarshal into a typed bool value.
- switch resolved {
- case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
- out.SetBool(true)
- return true
- case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
- out.SetBool(false)
- return true
- }
- }
- case reflect.Float32, reflect.Float64:
- switch resolved := resolved.(type) {
- case int:
- out.SetFloat(float64(resolved))
- return true
- case int64:
- out.SetFloat(float64(resolved))
- return true
- case uint64:
- out.SetFloat(float64(resolved))
- return true
- case float64:
- out.SetFloat(resolved)
- return true
- }
- case reflect.Struct:
- if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
- out.Set(resolvedv)
- return true
- }
- case reflect.Ptr:
- panic("yaml internal error: please report the issue")
- }
- d.terror(n, tag, out)
- return false
-}
-
-func settableValueOf(i interface{}) reflect.Value {
- v := reflect.ValueOf(i)
- sv := reflect.New(v.Type()).Elem()
- sv.Set(v)
- return sv
-}
-
-func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
- l := len(n.Content)
-
- var iface reflect.Value
- switch out.Kind() {
- case reflect.Slice:
- out.Set(reflect.MakeSlice(out.Type(), l, l))
- case reflect.Array:
- if l != out.Len() {
- failf("invalid array: want %d elements but got %d", out.Len(), l)
- }
- case reflect.Interface:
- // No type hints. Will have to use a generic sequence.
- iface = out
- out = settableValueOf(make([]interface{}, l))
- default:
- d.terror(n, seqTag, out)
- return false
- }
- et := out.Type().Elem()
-
- j := 0
- for i := 0; i < l; i++ {
- e := reflect.New(et).Elem()
- if ok := d.unmarshal(n.Content[i], e); ok {
- out.Index(j).Set(e)
- j++
- }
- }
- if out.Kind() != reflect.Array {
- out.Set(out.Slice(0, j))
- }
- if iface.IsValid() {
- iface.Set(out)
- }
- return true
-}
-
-func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
- l := len(n.Content)
- if d.uniqueKeys {
- nerrs := len(d.terrors)
- for i := 0; i < l; i += 2 {
- ni := n.Content[i]
- for j := i + 2; j < l; j += 2 {
- nj := n.Content[j]
- if ni.Kind == nj.Kind && ni.Value == nj.Value {
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
- }
- }
- }
- if len(d.terrors) > nerrs {
- return false
- }
- }
- switch out.Kind() {
- case reflect.Struct:
- return d.mappingStruct(n, out)
- case reflect.Map:
- // okay
- case reflect.Interface:
- iface := out
- if isStringMap(n) {
- out = reflect.MakeMap(d.stringMapType)
- } else {
- out = reflect.MakeMap(d.generalMapType)
- }
- iface.Set(out)
- default:
- d.terror(n, mapTag, out)
- return false
- }
-
- outt := out.Type()
- kt := outt.Key()
- et := outt.Elem()
-
- stringMapType := d.stringMapType
- generalMapType := d.generalMapType
- if outt.Elem() == ifaceType {
- if outt.Key().Kind() == reflect.String {
- d.stringMapType = outt
- } else if outt.Key() == ifaceType {
- d.generalMapType = outt
- }
- }
-
- mergedFields := d.mergedFields
- d.mergedFields = nil
-
- var mergeNode *Node
-
- mapIsNew := false
- if out.IsNil() {
- out.Set(reflect.MakeMap(outt))
- mapIsNew = true
- }
- for i := 0; i < l; i += 2 {
- if isMerge(n.Content[i]) {
- mergeNode = n.Content[i+1]
- continue
- }
- k := reflect.New(kt).Elem()
- if d.unmarshal(n.Content[i], k) {
- if mergedFields != nil {
- ki := k.Interface()
- if mergedFields[ki] {
- continue
- }
- mergedFields[ki] = true
- }
- kkind := k.Kind()
- if kkind == reflect.Interface {
- kkind = k.Elem().Kind()
- }
- if kkind == reflect.Map || kkind == reflect.Slice {
- failf("invalid map key: %#v", k.Interface())
- }
- e := reflect.New(et).Elem()
- if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
- out.SetMapIndex(k, e)
- }
- }
- }
-
- d.mergedFields = mergedFields
- if mergeNode != nil {
- d.merge(n, mergeNode, out)
- }
-
- d.stringMapType = stringMapType
- d.generalMapType = generalMapType
- return true
-}
-
-func isStringMap(n *Node) bool {
- if n.Kind != MappingNode {
- return false
- }
- l := len(n.Content)
- for i := 0; i < l; i += 2 {
- shortTag := n.Content[i].ShortTag()
- if shortTag != strTag && shortTag != mergeTag {
- return false
- }
- }
- return true
-}
-
-func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
- sinfo, err := getStructInfo(out.Type())
- if err != nil {
- panic(err)
- }
-
- var inlineMap reflect.Value
- var elemType reflect.Type
- if sinfo.InlineMap != -1 {
- inlineMap = out.Field(sinfo.InlineMap)
- elemType = inlineMap.Type().Elem()
- }
-
- for _, index := range sinfo.InlineUnmarshalers {
- field := d.fieldByIndex(n, out, index)
- d.prepare(n, field)
- }
-
- mergedFields := d.mergedFields
- d.mergedFields = nil
- var mergeNode *Node
- var doneFields []bool
- if d.uniqueKeys {
- doneFields = make([]bool, len(sinfo.FieldsList))
- }
- name := settableValueOf("")
- l := len(n.Content)
- for i := 0; i < l; i += 2 {
- ni := n.Content[i]
- if isMerge(ni) {
- mergeNode = n.Content[i+1]
- continue
- }
- if !d.unmarshal(ni, name) {
- continue
- }
- sname := name.String()
- if mergedFields != nil {
- if mergedFields[sname] {
- continue
- }
- mergedFields[sname] = true
- }
- if info, ok := sinfo.FieldsMap[sname]; ok {
- if d.uniqueKeys {
- if doneFields[info.Id] {
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
- continue
- }
- doneFields[info.Id] = true
- }
- var field reflect.Value
- if info.Inline == nil {
- field = out.Field(info.Num)
- } else {
- field = d.fieldByIndex(n, out, info.Inline)
- }
- d.unmarshal(n.Content[i+1], field)
- } else if sinfo.InlineMap != -1 {
- if inlineMap.IsNil() {
- inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
- }
- value := reflect.New(elemType).Elem()
- d.unmarshal(n.Content[i+1], value)
- inlineMap.SetMapIndex(name, value)
- } else if d.knownFields {
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
- }
- }
-
- d.mergedFields = mergedFields
- if mergeNode != nil {
- d.merge(n, mergeNode, out)
- }
- return true
-}
-
-func failWantMap() {
- failf("map merge requires map or sequence of maps as the value")
-}
-
-func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
- mergedFields := d.mergedFields
- if mergedFields == nil {
- d.mergedFields = make(map[interface{}]bool)
- for i := 0; i < len(parent.Content); i += 2 {
- k := reflect.New(ifaceType).Elem()
- if d.unmarshal(parent.Content[i], k) {
- d.mergedFields[k.Interface()] = true
- }
- }
- }
-
- switch merge.Kind {
- case MappingNode:
- d.unmarshal(merge, out)
- case AliasNode:
- if merge.Alias != nil && merge.Alias.Kind != MappingNode {
- failWantMap()
- }
- d.unmarshal(merge, out)
- case SequenceNode:
- for i := 0; i < len(merge.Content); i++ {
- ni := merge.Content[i]
- if ni.Kind == AliasNode {
- if ni.Alias != nil && ni.Alias.Kind != MappingNode {
- failWantMap()
- }
- } else if ni.Kind != MappingNode {
- failWantMap()
- }
- d.unmarshal(ni, out)
- }
- default:
- failWantMap()
- }
-
- d.mergedFields = mergedFields
-}
-
-func isMerge(n *Node) bool {
- return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go
deleted file mode 100644
index 6ea0ae8c105..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/emitterc.go
+++ /dev/null
@@ -1,2043 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "bytes"
- "fmt"
-)
-
-// Flush the buffer if needed.
-func flush(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) {
- return yaml_emitter_flush(emitter)
- }
- return true
-}
-
-// Put a character to the output buffer.
-func put(emitter *yaml_emitter_t, value byte) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.buffer[emitter.buffer_pos] = value
- emitter.buffer_pos++
- emitter.column++
- return true
-}
-
-// Put a line break to the output buffer.
-func put_break(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- switch emitter.line_break {
- case yaml_CR_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\r'
- emitter.buffer_pos += 1
- case yaml_LN_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\n'
- emitter.buffer_pos += 1
- case yaml_CRLN_BREAK:
- emitter.buffer[emitter.buffer_pos+0] = '\r'
- emitter.buffer[emitter.buffer_pos+1] = '\n'
- emitter.buffer_pos += 2
- default:
- panic("unknown line break setting")
- }
- if emitter.column == 0 {
- emitter.space_above = true
- }
- emitter.column = 0
- emitter.line++
- // [Go] Do this here and below and drop from everywhere else (see commented lines).
- emitter.indention = true
- return true
-}
-
-// Copy a character from a string into buffer.
-func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- p := emitter.buffer_pos
- w := width(s[*i])
- switch w {
- case 4:
- emitter.buffer[p+3] = s[*i+3]
- fallthrough
- case 3:
- emitter.buffer[p+2] = s[*i+2]
- fallthrough
- case 2:
- emitter.buffer[p+1] = s[*i+1]
- fallthrough
- case 1:
- emitter.buffer[p+0] = s[*i+0]
- default:
- panic("unknown character width")
- }
- emitter.column++
- emitter.buffer_pos += w
- *i += w
- return true
-}
-
-// Write a whole string into buffer.
-func write_all(emitter *yaml_emitter_t, s []byte) bool {
- for i := 0; i < len(s); {
- if !write(emitter, s, &i) {
- return false
- }
- }
- return true
-}
-
-// Copy a line break character from a string into buffer.
-func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if s[*i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- *i++
- } else {
- if !write(emitter, s, i) {
- return false
- }
- if emitter.column == 0 {
- emitter.space_above = true
- }
- emitter.column = 0
- emitter.line++
- // [Go] Do this here and above and drop from everywhere else (see commented lines).
- emitter.indention = true
- }
- return true
-}
-
-// Set an emitter error and return false.
-func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_EMITTER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Emit an event.
-func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.events = append(emitter.events, *event)
- for !yaml_emitter_need_more_events(emitter) {
- event := &emitter.events[emitter.events_head]
- if !yaml_emitter_analyze_event(emitter, event) {
- return false
- }
- if !yaml_emitter_state_machine(emitter, event) {
- return false
- }
- yaml_event_delete(event)
- emitter.events_head++
- }
- return true
-}
-
-// Check if we need to accumulate more events before emitting.
-//
-// We accumulate extra
-// - 1 event for DOCUMENT-START
-// - 2 events for SEQUENCE-START
-// - 3 events for MAPPING-START
-//
-func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
- if emitter.events_head == len(emitter.events) {
- return true
- }
- var accumulate int
- switch emitter.events[emitter.events_head].typ {
- case yaml_DOCUMENT_START_EVENT:
- accumulate = 1
- break
- case yaml_SEQUENCE_START_EVENT:
- accumulate = 2
- break
- case yaml_MAPPING_START_EVENT:
- accumulate = 3
- break
- default:
- return false
- }
- if len(emitter.events)-emitter.events_head > accumulate {
- return false
- }
- var level int
- for i := emitter.events_head; i < len(emitter.events); i++ {
- switch emitter.events[i].typ {
- case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
- level++
- case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
- level--
- }
- if level == 0 {
- return false
- }
- }
- return true
-}
-
-// Append a directive to the directives stack.
-func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
- for i := 0; i < len(emitter.tag_directives); i++ {
- if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
- }
- }
-
- // [Go] Do we actually need to copy this given garbage collection
- // and the lack of deallocating destructors?
- tag_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(tag_copy.handle, value.handle)
- copy(tag_copy.prefix, value.prefix)
- emitter.tag_directives = append(emitter.tag_directives, tag_copy)
- return true
-}
-
-// Increase the indentation level.
-func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool {
- emitter.indents = append(emitter.indents, emitter.indent)
- if emitter.indent < 0 {
- if flow {
- emitter.indent = emitter.best_indent
- } else {
- emitter.indent = 0
- }
- } else if !indentless {
- // [Go] This was changed so that indentations are more regular.
- if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
- // The first indent inside a sequence will just skip the "- " indicator.
- emitter.indent += 2
- } else {
- // Everything else aligns to the chosen indentation.
- emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
- if compact_seq {
- // The value compact_seq passed in is almost always set to `false` when this function is called,
- // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we
- // are increasing the indent to account for sequence nodes, which will be correct because we need to
- // subtract 2 to account for the - at the beginning of the sequence node.
- emitter.indent = emitter.indent - 2
- }
- }
- }
- return true
-}
-
-// State dispatcher.
-func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- switch emitter.state {
- default:
- case yaml_EMIT_STREAM_START_STATE:
- return yaml_emitter_emit_stream_start(emitter, event)
-
- case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, true)
-
- case yaml_EMIT_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, false)
-
- case yaml_EMIT_DOCUMENT_CONTENT_STATE:
- return yaml_emitter_emit_document_content(emitter, event)
-
- case yaml_EMIT_DOCUMENT_END_STATE:
- return yaml_emitter_emit_document_end(emitter, event)
-
- case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
-
- case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
-
- case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
-
- case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
-
- case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
-
- case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
-
- case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
-
- case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, true)
-
- case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, false)
-
- case yaml_EMIT_END_STATE:
- return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
- }
- panic("invalid emitter state")
-}
-
-// Expect STREAM-START.
-func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_STREAM_START_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
- }
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = event.encoding
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = yaml_UTF8_ENCODING
- }
- }
- if emitter.best_indent < 2 || emitter.best_indent > 9 {
- emitter.best_indent = 2
- }
- if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
- emitter.best_width = 80
- }
- if emitter.best_width < 0 {
- emitter.best_width = 1<<31 - 1
- }
- if emitter.line_break == yaml_ANY_BREAK {
- emitter.line_break = yaml_LN_BREAK
- }
-
- emitter.indent = -1
- emitter.line = 0
- emitter.column = 0
- emitter.whitespace = true
- emitter.indention = true
- emitter.space_above = true
- emitter.foot_indent = -1
-
- if emitter.encoding != yaml_UTF8_ENCODING {
- if !yaml_emitter_write_bom(emitter) {
- return false
- }
- }
- emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
- return true
-}
-
-// Expect DOCUMENT-START or STREAM-END.
-func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-
- if event.typ == yaml_DOCUMENT_START_EVENT {
-
- if event.version_directive != nil {
- if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
- return false
- }
- }
-
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
- return false
- }
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
- return false
- }
- }
-
- for i := 0; i < len(default_tag_directives); i++ {
- tag_directive := &default_tag_directives[i]
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
- return false
- }
- }
-
- implicit := event.implicit
- if !first || emitter.canonical {
- implicit = false
- }
-
- if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if event.version_directive != nil {
- implicit = false
- if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if len(event.tag_directives) > 0 {
- implicit = false
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- if yaml_emitter_check_empty_document(emitter) {
- implicit = false
- }
- if !implicit {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
- return false
- }
- if emitter.canonical || true {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- if len(emitter.head_comment) > 0 {
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- if !put_break(emitter) {
- return false
- }
- }
-
- emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
- return true
- }
-
- if event.typ == yaml_STREAM_END_EVENT {
- if emitter.open_ended {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_END_STATE
- return true
- }
-
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
-}
-
-// Expect the root node.
-func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
-
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
- return false
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- return true
-}
-
-// Expect DOCUMENT-END.
-func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_DOCUMENT_END_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
- }
- // [Go] Force document foot separation.
- emitter.foot_indent = 0
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- emitter.foot_indent = -1
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !event.implicit {
- // [Go] Allocate the slice elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_DOCUMENT_START_STATE
- emitter.tag_directives = emitter.tag_directives[:0]
- return true
-}
-
-// Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_SEQUENCE_END_EVENT {
- if emitter.canonical && !first && !trail {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.column == 0 || emitter.canonical && !first {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
- return false
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
-
- return true
- }
-
- if !first && !trail {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
-
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- if emitter.column == 0 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
- } else {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
- }
- if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
- return false
- }
- if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- return true
-}
-
-// Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_MAPPING_END_EVENT {
- if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
- return false
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
-
- if !first && !trail {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
-
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
-
- if emitter.column == 0 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a flow value node.
-func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
- return false
- }
- }
- if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
- } else {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
- }
- if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
- return false
- }
- if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- return true
-}
-
-// Expect a block item node.
-func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- // emitter.mapping context tells us if we are currently in a mapping context.
- // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column.
- // emitter.indentation tells us if the last character was an indentation character.
- // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements.
- // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or
- // the last character was not an indentation character, and we consider '- ' part of the indentation
- // for sequence elements.
- seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) &&
- emitter.compact_sequence_indent
- if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) {
- return false
- }
- }
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
- if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
- return false
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- return true
-}
-
-// Expect a block key node.
-func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, false) {
- return false
- }
- }
- if !yaml_emitter_process_head_comment(emitter) {
- return false
- }
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if len(emitter.line_comment) > 0 {
- // [Go] A line comment was provided for the key. That's unusual as the
- // scanner associates line comments with the value. Either way,
- // save the line comment and render it appropriately later.
- emitter.key_line_comment = emitter.line_comment
- emitter.line_comment = nil
- }
- if yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block value node.
-func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
- return false
- }
- }
- if len(emitter.key_line_comment) > 0 {
- // [Go] Line comments are generally associated with the value, but when there's
- // no value on the same line as a mapping key they end up attached to the
- // key itself.
- if event.typ == yaml_SCALAR_EVENT {
- if len(emitter.line_comment) == 0 {
- // A scalar is coming and it has no line comments by itself yet,
- // so just let it handle the line comment as usual. If it has a
- // line comment, we can't have both so the one from the key is lost.
- emitter.line_comment = emitter.key_line_comment
- emitter.key_line_comment = nil
- }
- } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
- // An indented block follows, so write the comment right now.
- emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
- if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
- return false
- }
- if !yaml_emitter_process_line_comment(emitter) {
- return false
- }
- if !yaml_emitter_process_foot_comment(emitter) {
- return false
- }
- return true
-}
-
-func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
-}
-
-// Expect a node.
-func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
- root bool, sequence bool, mapping bool, simple_key bool) bool {
-
- emitter.root_context = root
- emitter.sequence_context = sequence
- emitter.mapping_context = mapping
- emitter.simple_key_context = simple_key
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- return yaml_emitter_emit_alias(emitter, event)
- case yaml_SCALAR_EVENT:
- return yaml_emitter_emit_scalar(emitter, event)
- case yaml_SEQUENCE_START_EVENT:
- return yaml_emitter_emit_sequence_start(emitter, event)
- case yaml_MAPPING_START_EVENT:
- return yaml_emitter_emit_mapping_start(emitter, event)
- default:
- return yaml_emitter_set_emitter_error(emitter,
- fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
- }
-}
-
-// Expect ALIAS.
-func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SCALAR.
-func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_select_scalar_style(emitter, event) {
- return false
- }
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- if !yaml_emitter_process_scalar(emitter) {
- return false
- }
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SEQUENCE-START.
-func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
- yaml_emitter_check_empty_sequence(emitter) {
- emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
- }
- return true
-}
-
-// Expect MAPPING-START.
-func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
- yaml_emitter_check_empty_mapping(emitter) {
- emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
- }
- return true
-}
-
-// Check if the document content is an empty scalar.
-func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
- return false // [Go] Huh?
-}
-
-// Check if the next events represent an empty sequence.
-func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
-}
-
-// Check if the next events represent an empty mapping.
-func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
-}
-
-// Check if the next node can be expressed as a simple key.
-func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
- length := 0
- switch emitter.events[emitter.events_head].typ {
- case yaml_ALIAS_EVENT:
- length += len(emitter.anchor_data.anchor)
- case yaml_SCALAR_EVENT:
- if emitter.scalar_data.multiline {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix) +
- len(emitter.scalar_data.value)
- case yaml_SEQUENCE_START_EVENT:
- if !yaml_emitter_check_empty_sequence(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- case yaml_MAPPING_START_EVENT:
- if !yaml_emitter_check_empty_mapping(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- default:
- return false
- }
- return length <= 128
-}
-
-// Determine an acceptable scalar style.
-func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
- if no_tag && !event.implicit && !event.quoted_implicit {
- return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
- }
-
- style := event.scalar_style()
- if style == yaml_ANY_SCALAR_STYLE {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- if emitter.canonical {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- if emitter.simple_key_context && emitter.scalar_data.multiline {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
-
- if style == yaml_PLAIN_SCALAR_STYLE {
- if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
- emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if no_tag && !event.implicit {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
- if !emitter.scalar_data.single_quoted_allowed {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
- if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
-
- if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
- emitter.tag_data.handle = []byte{'!'}
- }
- emitter.scalar_data.style = style
- return true
-}
-
-// Write an anchor.
-func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
- if emitter.anchor_data.anchor == nil {
- return true
- }
- c := []byte{'&'}
- if emitter.anchor_data.alias {
- c[0] = '*'
- }
- if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
- return false
- }
- return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
-}
-
-// Write a tag.
-func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
- if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
- return true
- }
- if len(emitter.tag_data.handle) > 0 {
- if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
- return false
- }
- if len(emitter.tag_data.suffix) > 0 {
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- }
- } else {
- // [Go] Allocate these slices elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
- return false
- }
- }
- return true
-}
-
-// Write a scalar.
-func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
- switch emitter.scalar_data.style {
- case yaml_PLAIN_SCALAR_STYLE:
- return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_SINGLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_LITERAL_SCALAR_STYLE:
- return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
-
- case yaml_FOLDED_SCALAR_STYLE:
- return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
- }
- panic("unknown scalar style")
-}
-
-// Write a head comment.
-func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
- if len(emitter.tail_comment) > 0 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
- return false
- }
- emitter.tail_comment = emitter.tail_comment[:0]
- emitter.foot_indent = emitter.indent
- if emitter.foot_indent < 0 {
- emitter.foot_indent = 0
- }
- }
-
- if len(emitter.head_comment) == 0 {
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
- return false
- }
- emitter.head_comment = emitter.head_comment[:0]
- return true
-}
-
-// Write an line comment.
-func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool {
- if len(emitter.line_comment) == 0 {
- // The next 3 lines are needed to resolve an issue with leading newlines
- // See https://github.com/go-yaml/yaml/issues/755
- // When linebreak is set to true, put_break will be called and will add
- // the needed newline.
- if linebreak && !put_break(emitter) {
- return false
- }
- return true
- }
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
- return false
- }
- emitter.line_comment = emitter.line_comment[:0]
- return true
-}
-
-// Write a foot comment.
-func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
- if len(emitter.foot_comment) == 0 {
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
- return false
- }
- emitter.foot_comment = emitter.foot_comment[:0]
- emitter.foot_indent = emitter.indent
- if emitter.foot_indent < 0 {
- emitter.foot_indent = 0
- }
- return true
-}
-
-// Check if a %YAML directive is valid.
-func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
- if version_directive.major != 1 || version_directive.minor != 1 {
- return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
- }
- return true
-}
-
-// Check if a %TAG directive is valid.
-func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
- handle := tag_directive.handle
- prefix := tag_directive.prefix
- if len(handle) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
- }
- if handle[0] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
- }
- if handle[len(handle)-1] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
- }
- for i := 1; i < len(handle)-1; i += width(handle[i]) {
- if !is_alpha(handle, i) {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
- }
- }
- if len(prefix) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
- }
- return true
-}
-
-// Check if an anchor is valid.
-func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
- if len(anchor) == 0 {
- problem := "anchor value must not be empty"
- if alias {
- problem = "alias value must not be empty"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- for i := 0; i < len(anchor); i += width(anchor[i]) {
- if !is_alpha(anchor, i) {
- problem := "anchor value must contain alphanumerical characters only"
- if alias {
- problem = "alias value must contain alphanumerical characters only"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- }
- emitter.anchor_data.anchor = anchor
- emitter.anchor_data.alias = alias
- return true
-}
-
-// Check if a tag is valid.
-func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
- if len(tag) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
- }
- for i := 0; i < len(emitter.tag_directives); i++ {
- tag_directive := &emitter.tag_directives[i]
- if bytes.HasPrefix(tag, tag_directive.prefix) {
- emitter.tag_data.handle = tag_directive.handle
- emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
- return true
- }
- }
- emitter.tag_data.suffix = tag
- return true
-}
-
-// Check if a scalar is valid.
-func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
- var (
- block_indicators = false
- flow_indicators = false
- line_breaks = false
- special_characters = false
- tab_characters = false
-
- leading_space = false
- leading_break = false
- trailing_space = false
- trailing_break = false
- break_space = false
- space_break = false
-
- preceded_by_whitespace = false
- followed_by_whitespace = false
- previous_space = false
- previous_break = false
- )
-
- emitter.scalar_data.value = value
-
- if len(value) == 0 {
- emitter.scalar_data.multiline = false
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = false
- return true
- }
-
- if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
- block_indicators = true
- flow_indicators = true
- }
-
- preceded_by_whitespace = true
- for i, w := 0, 0; i < len(value); i += w {
- w = width(value[i])
- followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
-
- if i == 0 {
- switch value[i] {
- case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
- flow_indicators = true
- block_indicators = true
- case '?', ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '-':
- if followed_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- } else {
- switch value[i] {
- case ',', '?', '[', ']', '{', '}':
- flow_indicators = true
- case ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '#':
- if preceded_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- }
-
- if value[i] == '\t' {
- tab_characters = true
- } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
- special_characters = true
- }
- if is_space(value, i) {
- if i == 0 {
- leading_space = true
- }
- if i+width(value[i]) == len(value) {
- trailing_space = true
- }
- if previous_break {
- break_space = true
- }
- previous_space = true
- previous_break = false
- } else if is_break(value, i) {
- line_breaks = true
- if i == 0 {
- leading_break = true
- }
- if i+width(value[i]) == len(value) {
- trailing_break = true
- }
- if previous_space {
- space_break = true
- }
- previous_space = false
- previous_break = true
- } else {
- previous_space = false
- previous_break = false
- }
-
- // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
- preceded_by_whitespace = is_blankz(value, i)
- }
-
- emitter.scalar_data.multiline = line_breaks
- emitter.scalar_data.flow_plain_allowed = true
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = true
-
- if leading_space || leading_break || trailing_space || trailing_break {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if trailing_space {
- emitter.scalar_data.block_allowed = false
- }
- if break_space {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- }
- if space_break || tab_characters || special_characters {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- }
- if space_break || special_characters {
- emitter.scalar_data.block_allowed = false
- }
- if line_breaks {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if flow_indicators {
- emitter.scalar_data.flow_plain_allowed = false
- }
- if block_indicators {
- emitter.scalar_data.block_plain_allowed = false
- }
- return true
-}
-
-// Check if the event data is valid.
-func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- emitter.anchor_data.anchor = nil
- emitter.tag_data.handle = nil
- emitter.tag_data.suffix = nil
- emitter.scalar_data.value = nil
-
- if len(event.head_comment) > 0 {
- emitter.head_comment = event.head_comment
- }
- if len(event.line_comment) > 0 {
- emitter.line_comment = event.line_comment
- }
- if len(event.foot_comment) > 0 {
- emitter.foot_comment = event.foot_comment
- }
- if len(event.tail_comment) > 0 {
- emitter.tail_comment = event.tail_comment
- }
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
- return false
- }
-
- case yaml_SCALAR_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- if !yaml_emitter_analyze_scalar(emitter, event.value) {
- return false
- }
-
- case yaml_SEQUENCE_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
-
- case yaml_MAPPING_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- }
- return true
-}
-
-// Write the BOM character.
-func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
- if !flush(emitter) {
- return false
- }
- pos := emitter.buffer_pos
- emitter.buffer[pos+0] = '\xEF'
- emitter.buffer[pos+1] = '\xBB'
- emitter.buffer[pos+2] = '\xBF'
- emitter.buffer_pos += 3
- return true
-}
-
-func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
- indent := emitter.indent
- if indent < 0 {
- indent = 0
- }
- if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
- if !put_break(emitter) {
- return false
- }
- }
- if emitter.foot_indent == indent {
- if !put_break(emitter) {
- return false
- }
- }
- for emitter.column < indent {
- if !put(emitter, ' ') {
- return false
- }
- }
- emitter.whitespace = true
- //emitter.indention = true
- emitter.space_above = false
- emitter.foot_indent = -1
- return true
-}
-
-func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, indicator) {
- return false
- }
- emitter.whitespace = is_whitespace
- emitter.indention = (emitter.indention && is_indention)
- emitter.open_ended = false
- return true
-}
-
-func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- for i := 0; i < len(value); {
- var must_write bool
- switch value[i] {
- case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
- must_write = true
- default:
- must_write = is_alpha(value, i)
- }
- if must_write {
- if !write(emitter, value, &i) {
- return false
- }
- } else {
- w := width(value[i])
- for k := 0; k < w; k++ {
- octet := value[i]
- i++
- if !put(emitter, '%') {
- return false
- }
-
- c := octet >> 4
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
-
- c = octet & 0x0f
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
- }
- }
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- if len(value) > 0 && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- //emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
-
- if len(value) > 0 {
- emitter.whitespace = false
- }
- emitter.indention = false
- if emitter.root_context {
- emitter.open_ended = true
- }
-
- return true
-}
-
-func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
- return false
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- //emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if value[i] == '\'' {
- if !put(emitter, '\'') {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- spaces := false
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
- return false
- }
-
- for i := 0; i < len(value); {
- if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
- is_bom(value, i) || is_break(value, i) ||
- value[i] == '"' || value[i] == '\\' {
-
- octet := value[i]
-
- var w int
- var v rune
- switch {
- case octet&0x80 == 0x00:
- w, v = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, v = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, v = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, v = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = value[i+k]
- v = (v << 6) + (rune(octet) & 0x3F)
- }
- i += w
-
- if !put(emitter, '\\') {
- return false
- }
-
- var ok bool
- switch v {
- case 0x00:
- ok = put(emitter, '0')
- case 0x07:
- ok = put(emitter, 'a')
- case 0x08:
- ok = put(emitter, 'b')
- case 0x09:
- ok = put(emitter, 't')
- case 0x0A:
- ok = put(emitter, 'n')
- case 0x0b:
- ok = put(emitter, 'v')
- case 0x0c:
- ok = put(emitter, 'f')
- case 0x0d:
- ok = put(emitter, 'r')
- case 0x1b:
- ok = put(emitter, 'e')
- case 0x22:
- ok = put(emitter, '"')
- case 0x5c:
- ok = put(emitter, '\\')
- case 0x85:
- ok = put(emitter, 'N')
- case 0xA0:
- ok = put(emitter, '_')
- case 0x2028:
- ok = put(emitter, 'L')
- case 0x2029:
- ok = put(emitter, 'P')
- default:
- if v <= 0xFF {
- ok = put(emitter, 'x')
- w = 2
- } else if v <= 0xFFFF {
- ok = put(emitter, 'u')
- w = 4
- } else {
- ok = put(emitter, 'U')
- w = 8
- }
- for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
- digit := byte((v >> uint(k)) & 0x0F)
- if digit < 10 {
- ok = put(emitter, digit+'0')
- } else {
- ok = put(emitter, digit+'A'-10)
- }
- }
- }
- if !ok {
- return false
- }
- spaces = false
- } else if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if is_space(value, i+1) {
- if !put(emitter, '\\') {
- return false
- }
- }
- i += width(value[i])
- } else if !write(emitter, value, &i) {
- return false
- }
- spaces = true
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- spaces = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
- if is_space(value, 0) || is_break(value, 0) {
- indent_hint := []byte{'0' + byte(emitter.best_indent)}
- if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
- return false
- }
- }
-
- emitter.open_ended = false
-
- var chomp_hint [1]byte
- if len(value) == 0 {
- chomp_hint[0] = '-'
- } else {
- i := len(value) - 1
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if !is_break(value, i) {
- chomp_hint[0] = '-'
- } else if i == 0 {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- } else {
- i--
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if is_break(value, i) {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- }
- }
- }
- if chomp_hint[0] != 0 {
- if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
- return false
- }
- }
- return true
-}
-
-func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
- if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
- return false
- }
- //emitter.indention = true
- emitter.whitespace = true
- breaks := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !write_break(emitter, value, &i) {
- return false
- }
- //emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- breaks = false
- }
- }
-
- return true
-}
-
-func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
- if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
- return false
- }
-
- //emitter.indention = true
- emitter.whitespace = true
-
- breaks := true
- leading_spaces := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !breaks && !leading_spaces && value[i] == '\n' {
- k := 0
- for is_break(value, k) {
- k += width(value[k])
- }
- if !is_blankz(value, k) {
- if !put_break(emitter) {
- return false
- }
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- //emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- leading_spaces = is_blank(value, i)
- }
- if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- emitter.indention = false
- breaks = false
- }
- }
- return true
-}
-
-func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
- breaks := false
- pound := false
- for i := 0; i < len(comment); {
- if is_break(comment, i) {
- if !write_break(emitter, comment, &i) {
- return false
- }
- //emitter.indention = true
- breaks = true
- pound = false
- } else {
- if breaks && !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !pound {
- if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
- return false
- }
- pound = true
- }
- if !write(emitter, comment, &i) {
- return false
- }
- emitter.indention = false
- breaks = false
- }
- }
- if !breaks && !put_break(emitter) {
- return false
- }
-
- emitter.whitespace = true
- //emitter.indention = true
- return true
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go
deleted file mode 100644
index de9e72a3e63..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/encode.go
+++ /dev/null
@@ -1,577 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
- "encoding"
- "fmt"
- "io"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-type encoder struct {
- emitter yaml_emitter_t
- event yaml_event_t
- out []byte
- flow bool
- indent int
- doneInit bool
-}
-
-func newEncoder() *encoder {
- e := &encoder{}
- yaml_emitter_initialize(&e.emitter)
- yaml_emitter_set_output_string(&e.emitter, &e.out)
- yaml_emitter_set_unicode(&e.emitter, true)
- return e
-}
-
-func newEncoderWithWriter(w io.Writer) *encoder {
- e := &encoder{}
- yaml_emitter_initialize(&e.emitter)
- yaml_emitter_set_output_writer(&e.emitter, w)
- yaml_emitter_set_unicode(&e.emitter, true)
- return e
-}
-
-func (e *encoder) init() {
- if e.doneInit {
- return
- }
- if e.indent == 0 {
- e.indent = 4
- }
- e.emitter.best_indent = e.indent
- yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
- e.emit()
- e.doneInit = true
-}
-
-func (e *encoder) finish() {
- e.emitter.open_ended = false
- yaml_stream_end_event_initialize(&e.event)
- e.emit()
-}
-
-func (e *encoder) destroy() {
- yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
- // This will internally delete the e.event value.
- e.must(yaml_emitter_emit(&e.emitter, &e.event))
-}
-
-func (e *encoder) must(ok bool) {
- if !ok {
- msg := e.emitter.problem
- if msg == "" {
- msg = "unknown problem generating YAML content"
- }
- failf("%s", msg)
- }
-}
-
-func (e *encoder) marshalDoc(tag string, in reflect.Value) {
- e.init()
- var node *Node
- if in.IsValid() {
- node, _ = in.Interface().(*Node)
- }
- if node != nil && node.Kind == DocumentNode {
- e.nodev(in)
- } else {
- yaml_document_start_event_initialize(&e.event, nil, nil, true)
- e.emit()
- e.marshal(tag, in)
- yaml_document_end_event_initialize(&e.event, true)
- e.emit()
- }
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
- tag = shortTag(tag)
- if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
- e.nilv()
- return
- }
- iface := in.Interface()
- switch value := iface.(type) {
- case *Node:
- e.nodev(in)
- return
- case Node:
- if !in.CanAddr() {
- var n = reflect.New(in.Type()).Elem()
- n.Set(in)
- in = n
- }
- e.nodev(in.Addr())
- return
- case time.Time:
- e.timev(tag, in)
- return
- case *time.Time:
- e.timev(tag, in.Elem())
- return
- case time.Duration:
- e.stringv(tag, reflect.ValueOf(value.String()))
- return
- case Marshaler:
- v, err := value.MarshalYAML()
- if err != nil {
- fail(err)
- }
- if v == nil {
- e.nilv()
- return
- }
- e.marshal(tag, reflect.ValueOf(v))
- return
- case encoding.TextMarshaler:
- text, err := value.MarshalText()
- if err != nil {
- fail(err)
- }
- in = reflect.ValueOf(string(text))
- case nil:
- e.nilv()
- return
- }
- switch in.Kind() {
- case reflect.Interface:
- e.marshal(tag, in.Elem())
- case reflect.Map:
- e.mapv(tag, in)
- case reflect.Ptr:
- e.marshal(tag, in.Elem())
- case reflect.Struct:
- e.structv(tag, in)
- case reflect.Slice, reflect.Array:
- e.slicev(tag, in)
- case reflect.String:
- e.stringv(tag, in)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- e.intv(tag, in)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- e.uintv(tag, in)
- case reflect.Float32, reflect.Float64:
- e.floatv(tag, in)
- case reflect.Bool:
- e.boolv(tag, in)
- default:
- panic("cannot marshal type: " + in.Type().String())
- }
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- keys := keyList(in.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- e.marshal("", k)
- e.marshal("", in.MapIndex(k))
- }
- })
-}
-
-func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
- for _, num := range index {
- for {
- if v.Kind() == reflect.Ptr {
- if v.IsNil() {
- return reflect.Value{}
- }
- v = v.Elem()
- continue
- }
- break
- }
- v = v.Field(num)
- }
- return v
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
- sinfo, err := getStructInfo(in.Type())
- if err != nil {
- panic(err)
- }
- e.mappingv(tag, func() {
- for _, info := range sinfo.FieldsList {
- var value reflect.Value
- if info.Inline == nil {
- value = in.Field(info.Num)
- } else {
- value = e.fieldByIndex(in, info.Inline)
- if !value.IsValid() {
- continue
- }
- }
- if info.OmitEmpty && isZero(value) {
- continue
- }
- e.marshal("", reflect.ValueOf(info.Key))
- e.flow = info.Flow
- e.marshal("", value)
- }
- if sinfo.InlineMap >= 0 {
- m := in.Field(sinfo.InlineMap)
- if m.Len() > 0 {
- e.flow = false
- keys := keyList(m.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- if _, found := sinfo.FieldsMap[k.String()]; found {
- panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
- }
- e.marshal("", k)
- e.flow = false
- e.marshal("", m.MapIndex(k))
- }
- }
- }
- })
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
- implicit := tag == ""
- style := yaml_BLOCK_MAPPING_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_MAPPING_STYLE
- }
- yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
- e.emit()
- f()
- yaml_mapping_end_event_initialize(&e.event)
- e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
- implicit := tag == ""
- style := yaml_BLOCK_SEQUENCE_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_SEQUENCE_STYLE
- }
- e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- n := in.Len()
- for i := 0; i < n; i++ {
- e.marshal("", in.Index(i))
- }
- e.must(yaml_sequence_end_event_initialize(&e.event))
- e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
- // Fast path.
- if s == "" {
- return false
- }
- c := s[0]
- if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
- return false
- }
- // Do the full match.
- return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-// isOldBool returns whether s is bool notation as defined in YAML 1.1.
-//
-// We continue to force strings that YAML 1.1 would interpret as booleans to be
-// rendered as quotes strings so that the marshalled output valid for YAML 1.1
-// parsing.
-func isOldBool(s string) (result bool) {
- switch s {
- case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
- "n", "N", "no", "No", "NO", "off", "Off", "OFF":
- return true
- default:
- return false
- }
-}
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
- var style yaml_scalar_style_t
- s := in.String()
- canUsePlain := true
- switch {
- case !utf8.ValidString(s):
- if tag == binaryTag {
- failf("explicitly tagged !!binary data must be base64-encoded")
- }
- if tag != "" {
- failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
- }
- // It can't be encoded directly as YAML so use a binary tag
- // and encode it as base64.
- tag = binaryTag
- s = encodeBase64(s)
- case tag == "":
- // Check to see if it would resolve to a specific
- // tag when encoded unquoted. If it doesn't,
- // there's no need to quote it.
- rtag, _ := resolve("", s)
- canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
- }
- // Note: it's possible for user code to emit invalid YAML
- // if they explicitly specify a tag and a string containing
- // text that's incompatible with that tag.
- switch {
- case strings.Contains(s, "\n"):
- if e.flow {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- } else {
- style = yaml_LITERAL_SCALAR_STYLE
- }
- case canUsePlain:
- style = yaml_PLAIN_SCALAR_STYLE
- default:
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
- var s string
- if in.Bool() {
- s = "true"
- } else {
- s = "false"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
- s := strconv.FormatInt(in.Int(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
- s := strconv.FormatUint(in.Uint(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) timev(tag string, in reflect.Value) {
- t := in.Interface().(time.Time)
- s := t.Format(time.RFC3339Nano)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
- // Issue #352: When formatting, use the precision of the underlying value
- precision := 64
- if in.Kind() == reflect.Float32 {
- precision = 32
- }
-
- s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
- switch s {
- case "+Inf":
- s = ".inf"
- case "-Inf":
- s = "-.inf"
- case "NaN":
- s = ".nan"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) nilv() {
- e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
- // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
- implicit := tag == ""
- if !implicit {
- tag = longTag(tag)
- }
- e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
- e.event.head_comment = head
- e.event.line_comment = line
- e.event.foot_comment = foot
- e.event.tail_comment = tail
- e.emit()
-}
-
-func (e *encoder) nodev(in reflect.Value) {
- e.node(in.Interface().(*Node), "")
-}
-
-func (e *encoder) node(node *Node, tail string) {
- // Zero nodes behave as nil.
- if node.Kind == 0 && node.IsZero() {
- e.nilv()
- return
- }
-
- // If the tag was not explicitly requested, and dropping it won't change the
- // implicit tag of the value, don't include it in the presentation.
- var tag = node.Tag
- var stag = shortTag(tag)
- var forceQuoting bool
- if tag != "" && node.Style&TaggedStyle == 0 {
- if node.Kind == ScalarNode {
- if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
- tag = ""
- } else {
- rtag, _ := resolve("", node.Value)
- if rtag == stag {
- tag = ""
- } else if stag == strTag {
- tag = ""
- forceQuoting = true
- }
- }
- } else {
- var rtag string
- switch node.Kind {
- case MappingNode:
- rtag = mapTag
- case SequenceNode:
- rtag = seqTag
- }
- if rtag == stag {
- tag = ""
- }
- }
- }
-
- switch node.Kind {
- case DocumentNode:
- yaml_document_start_event_initialize(&e.event, nil, nil, true)
- e.event.head_comment = []byte(node.HeadComment)
- e.emit()
- for _, node := range node.Content {
- e.node(node, "")
- }
- yaml_document_end_event_initialize(&e.event, true)
- e.event.foot_comment = []byte(node.FootComment)
- e.emit()
-
- case SequenceNode:
- style := yaml_BLOCK_SEQUENCE_STYLE
- if node.Style&FlowStyle != 0 {
- style = yaml_FLOW_SEQUENCE_STYLE
- }
- e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
- e.event.head_comment = []byte(node.HeadComment)
- e.emit()
- for _, node := range node.Content {
- e.node(node, "")
- }
- e.must(yaml_sequence_end_event_initialize(&e.event))
- e.event.line_comment = []byte(node.LineComment)
- e.event.foot_comment = []byte(node.FootComment)
- e.emit()
-
- case MappingNode:
- style := yaml_BLOCK_MAPPING_STYLE
- if node.Style&FlowStyle != 0 {
- style = yaml_FLOW_MAPPING_STYLE
- }
- yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
- e.event.tail_comment = []byte(tail)
- e.event.head_comment = []byte(node.HeadComment)
- e.emit()
-
- // The tail logic below moves the foot comment of prior keys to the following key,
- // since the value for each key may be a nested structure and the foot needs to be
- // processed only the entirety of the value is streamed. The last tail is processed
- // with the mapping end event.
- var tail string
- for i := 0; i+1 < len(node.Content); i += 2 {
- k := node.Content[i]
- foot := k.FootComment
- if foot != "" {
- kopy := *k
- kopy.FootComment = ""
- k = &kopy
- }
- e.node(k, tail)
- tail = foot
-
- v := node.Content[i+1]
- e.node(v, "")
- }
-
- yaml_mapping_end_event_initialize(&e.event)
- e.event.tail_comment = []byte(tail)
- e.event.line_comment = []byte(node.LineComment)
- e.event.foot_comment = []byte(node.FootComment)
- e.emit()
-
- case AliasNode:
- yaml_alias_event_initialize(&e.event, []byte(node.Value))
- e.event.head_comment = []byte(node.HeadComment)
- e.event.line_comment = []byte(node.LineComment)
- e.event.foot_comment = []byte(node.FootComment)
- e.emit()
-
- case ScalarNode:
- value := node.Value
- if !utf8.ValidString(value) {
- if stag == binaryTag {
- failf("explicitly tagged !!binary data must be base64-encoded")
- }
- if stag != "" {
- failf("cannot marshal invalid UTF-8 data as %s", stag)
- }
- // It can't be encoded directly as YAML so use a binary tag
- // and encode it as base64.
- tag = binaryTag
- value = encodeBase64(value)
- }
-
- style := yaml_PLAIN_SCALAR_STYLE
- switch {
- case node.Style&DoubleQuotedStyle != 0:
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- case node.Style&SingleQuotedStyle != 0:
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- case node.Style&LiteralStyle != 0:
- style = yaml_LITERAL_SCALAR_STYLE
- case node.Style&FoldedStyle != 0:
- style = yaml_FOLDED_SCALAR_STYLE
- case strings.Contains(value, "\n"):
- style = yaml_LITERAL_SCALAR_STYLE
- case forceQuoting:
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
-
- e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
- default:
- failf("cannot encode node with unknown kind %d", node.Kind)
- }
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go
deleted file mode 100644
index 268558a0d63..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/parserc.go
+++ /dev/null
@@ -1,1258 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "bytes"
-)
-
-// The parser implements the following grammar:
-//
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// implicit_document ::= block_node DOCUMENT-END*
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// | properties (block_content | indentless_block_sequence)?
-// | block_content
-// | indentless_block_sequence
-// block_node ::= ALIAS
-// | properties block_content?
-// | block_content
-// flow_node ::= ALIAS
-// | properties flow_content?
-// | flow_content
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// block_content ::= block_collection | flow_collection | SCALAR
-// flow_content ::= flow_collection | SCALAR
-// block_collection ::= block_sequence | block_mapping
-// flow_collection ::= flow_sequence | flow_mapping
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// block_mapping ::= BLOCK-MAPPING_START
-// ((KEY block_node_or_indentless_sequence?)?
-// (VALUE block_node_or_indentless_sequence?)?)*
-// BLOCK-END
-// flow_sequence ::= FLOW-SEQUENCE-START
-// (flow_sequence_entry FLOW-ENTRY)*
-// flow_sequence_entry?
-// FLOW-SEQUENCE-END
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// flow_mapping ::= FLOW-MAPPING-START
-// (flow_mapping_entry FLOW-ENTRY)*
-// flow_mapping_entry?
-// FLOW-MAPPING-END
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-// Peek the next token in the token queue.
-func peek_token(parser *yaml_parser_t) *yaml_token_t {
- if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
- token := &parser.tokens[parser.tokens_head]
- yaml_parser_unfold_comments(parser, token)
- return token
- }
- return nil
-}
-
-// yaml_parser_unfold_comments walks through the comments queue and joins all
-// comments behind the position of the provided token into the respective
-// top-level comment slices in the parser.
-func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
- for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
- comment := &parser.comments[parser.comments_head]
- if len(comment.head) > 0 {
- if token.typ == yaml_BLOCK_END_TOKEN {
- // No heads on ends, so keep comment.head for a follow up token.
- break
- }
- if len(parser.head_comment) > 0 {
- parser.head_comment = append(parser.head_comment, '\n')
- }
- parser.head_comment = append(parser.head_comment, comment.head...)
- }
- if len(comment.foot) > 0 {
- if len(parser.foot_comment) > 0 {
- parser.foot_comment = append(parser.foot_comment, '\n')
- }
- parser.foot_comment = append(parser.foot_comment, comment.foot...)
- }
- if len(comment.line) > 0 {
- if len(parser.line_comment) > 0 {
- parser.line_comment = append(parser.line_comment, '\n')
- }
- parser.line_comment = append(parser.line_comment, comment.line...)
- }
- *comment = yaml_comment_t{}
- parser.comments_head++
- }
-}
-
-// Remove the next token from the queue (must be called after peek_token).
-func skip_token(parser *yaml_parser_t) {
- parser.token_available = false
- parser.tokens_parsed++
- parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
- parser.tokens_head++
-}
-
-// Get the next event.
-func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
- // Erase the event object.
- *event = yaml_event_t{}
-
- // No events after the end of the stream or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
- return true
- }
-
- // Generate the next event.
- return yaml_parser_state_machine(parser, event)
-}
-
-// Set parser error.
-func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-// State dispatcher.
-func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
- //trace("yaml_parser_state_machine", "state:", parser.state.String())
-
- switch parser.state {
- case yaml_PARSE_STREAM_START_STATE:
- return yaml_parser_parse_stream_start(parser, event)
-
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, true)
-
- case yaml_PARSE_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, false)
-
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return yaml_parser_parse_document_content(parser, event)
-
- case yaml_PARSE_DOCUMENT_END_STATE:
- return yaml_parser_parse_document_end(parser, event)
-
- case yaml_PARSE_BLOCK_NODE_STATE:
- return yaml_parser_parse_node(parser, event, true, false)
-
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return yaml_parser_parse_node(parser, event, true, true)
-
- case yaml_PARSE_FLOW_NODE_STATE:
- return yaml_parser_parse_node(parser, event, false, false)
-
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, true)
-
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, false)
-
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_indentless_sequence_entry(parser, event)
-
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, true)
-
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, false)
-
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return yaml_parser_parse_block_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, true)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, false)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
-
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, true)
-
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, true)
-
- default:
- panic("invalid parser state")
- }
-}
-
-// Parse the production:
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// ************
-func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_STREAM_START_TOKEN {
- return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
- }
- parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- encoding: token.encoding,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// *************************
-func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- // Parse extra document end indicators.
- if !implicit {
- for token.typ == yaml_DOCUMENT_END_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
- token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
- token.typ != yaml_DOCUMENT_START_TOKEN &&
- token.typ != yaml_STREAM_END_TOKEN {
- // Parse an implicit document.
- if !yaml_parser_process_directives(parser, nil, nil) {
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_BLOCK_NODE_STATE
-
- var head_comment []byte
- if len(parser.head_comment) > 0 {
- // [Go] Scan the header comment backwards, and if an empty line is found, break
- // the header so the part before the last empty line goes into the
- // document header, while the bottom of it goes into a follow up event.
- for i := len(parser.head_comment) - 1; i > 0; i-- {
- if parser.head_comment[i] == '\n' {
- if i == len(parser.head_comment)-1 {
- head_comment = parser.head_comment[:i]
- parser.head_comment = parser.head_comment[i+1:]
- break
- } else if parser.head_comment[i-1] == '\n' {
- head_comment = parser.head_comment[:i-1]
- parser.head_comment = parser.head_comment[i+1:]
- break
- }
- }
- }
- }
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
-
- head_comment: head_comment,
- }
-
- } else if token.typ != yaml_STREAM_END_TOKEN {
- // Parse an explicit document.
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
- start_mark := token.start_mark
- if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
- return false
- }
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_DOCUMENT_START_TOKEN {
- yaml_parser_set_parser_error(parser,
- "did not find expected ", token.start_mark)
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
- end_mark := token.end_mark
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: false,
- }
- skip_token(parser)
-
- } else {
- // Parse the stream end.
- parser.state = yaml_PARSE_END_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- }
-
- return true
-}
-
-// Parse the productions:
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// ***********
-//
-func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
- token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
- token.typ == yaml_DOCUMENT_START_TOKEN ||
- token.typ == yaml_DOCUMENT_END_TOKEN ||
- token.typ == yaml_STREAM_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- return yaml_parser_process_empty_scalar(parser, event,
- token.start_mark)
- }
- return yaml_parser_parse_node(parser, event, true, false)
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *************
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//
-func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- implicit := true
- if token.typ == yaml_DOCUMENT_END_TOKEN {
- end_mark = token.end_mark
- skip_token(parser)
- implicit = false
- }
-
- parser.tag_directives = parser.tag_directives[:0]
-
- parser.state = yaml_PARSE_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- implicit: implicit,
- }
- yaml_parser_set_event_comments(parser, event)
- if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
- event.foot_comment = event.head_comment
- event.head_comment = nil
- }
- return true
-}
-
-func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
- event.head_comment = parser.head_comment
- event.line_comment = parser.line_comment
- event.foot_comment = parser.foot_comment
- parser.head_comment = nil
- parser.line_comment = nil
- parser.foot_comment = nil
- parser.tail_comment = nil
- parser.stem_comment = nil
-}
-
-// Parse the productions:
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// *****
-// | properties (block_content | indentless_block_sequence)?
-// ********** *
-// | block_content | indentless_block_sequence
-// *
-// block_node ::= ALIAS
-// *****
-// | properties block_content?
-// ********** *
-// | block_content
-// *
-// flow_node ::= ALIAS
-// *****
-// | properties flow_content?
-// ********** *
-// | flow_content
-// *
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// *************************
-// block_content ::= block_collection | flow_collection | SCALAR
-// ******
-// flow_content ::= flow_collection | SCALAR
-// ******
-func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
- //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_ALIAS_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- *event = yaml_event_t{
- typ: yaml_ALIAS_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- anchor: token.value,
- }
- yaml_parser_set_event_comments(parser, event)
- skip_token(parser)
- return true
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- var tag_token bool
- var tag_handle, tag_suffix, anchor []byte
- var tag_mark yaml_mark_t
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- start_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- } else if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- start_mark = token.start_mark
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- var tag []byte
- if tag_token {
- if len(tag_handle) == 0 {
- tag = tag_suffix
- tag_suffix = nil
- } else {
- for i := range parser.tag_directives {
- if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
- tag = append([]byte(nil), parser.tag_directives[i].prefix...)
- tag = append(tag, tag_suffix...)
- break
- }
- }
- if len(tag) == 0 {
- yaml_parser_set_parser_error_context(parser,
- "while parsing a node", start_mark,
- "found undefined tag handle", tag_mark)
- return false
- }
- }
- }
-
- implicit := len(tag) == 0
- if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_SCALAR_TOKEN {
- var plain_implicit, quoted_implicit bool
- end_mark = token.end_mark
- if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
- plain_implicit = true
- } else if len(tag) == 0 {
- quoted_implicit = true
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- value: token.value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(token.style),
- }
- yaml_parser_set_event_comments(parser, event)
- skip_token(parser)
- return true
- }
- if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
- // [Go] Some of the events below can be merged as they differ only on style.
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
- }
- yaml_parser_set_event_comments(parser, event)
- return true
- }
- if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- yaml_parser_set_event_comments(parser, event)
- return true
- }
- if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- if parser.stem_comment != nil {
- event.head_comment = parser.stem_comment
- parser.stem_comment = nil
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
- }
- if parser.stem_comment != nil {
- event.head_comment = parser.stem_comment
- parser.stem_comment = nil
- }
- return true
- }
- if len(anchor) > 0 || len(tag) > 0 {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- quoted_implicit: false,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
- }
-
- context := "while parsing a flow node"
- if block {
- context = "while parsing a block node"
- }
- yaml_parser_set_parser_error_context(parser, context, start_mark,
- "did not find expected node content", token.start_mark)
- return false
-}
-
-// Parse the productions:
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// ******************** *********** * *********
-//
-func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- prior_head_len := len(parser.head_comment)
- skip_token(parser)
- yaml_parser_split_stem_comment(parser, prior_head_len)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- } else {
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- }
- if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block collection", context_mark,
- "did not find expected '-' indicator", token.start_mark)
-}
-
-// Parse the productions:
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// *********** *
-func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- prior_head_len := len(parser.head_comment)
- skip_token(parser)
- yaml_parser_split_stem_comment(parser, prior_head_len)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
- token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- }
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
- }
- return true
-}
-
-// Split stem comment from head comment.
-//
-// When a sequence or map is found under a sequence entry, the former head comment
-// is assigned to the underlying sequence or map as a whole, not the individual
-// sequence or map entry as would be expected otherwise. To handle this case the
-// previous head comment is moved aside as the stem comment.
-func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
- if stem_len == 0 {
- return
- }
-
- token := peek_token(parser)
- if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
- return
- }
-
- parser.stem_comment = parser.head_comment[:stem_len]
- if len(parser.head_comment) == stem_len {
- parser.head_comment = nil
- } else {
- // Copy suffix to prevent very strange bugs if someone ever appends
- // further bytes to the prefix in the stem_comment slice above.
- parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
- }
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-// *******************
-// ((KEY block_node_or_indentless_sequence?)?
-// *** *
-// (VALUE block_node_or_indentless_sequence?)?)*
-//
-// BLOCK-END
-// *********
-//
-func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- // [Go] A tail comment was left from the prior mapping value processed. Emit an event
- // as it needs to be processed with that value and not the following key.
- if len(parser.tail_comment) > 0 {
- *event = yaml_event_t{
- typ: yaml_TAIL_COMMENT_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- foot_comment: parser.tail_comment,
- }
- parser.tail_comment = nil
- return true
- }
-
- if token.typ == yaml_KEY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- } else {
- parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- } else if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- yaml_parser_set_event_comments(parser, event)
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block mapping", context_mark,
- "did not find expected key", token.start_mark)
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-//
-// ((KEY block_node_or_indentless_sequence?)?
-//
-// (VALUE block_node_or_indentless_sequence?)?)*
-// ***** *
-// BLOCK-END
-//
-//
-func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence ::= FLOW-SEQUENCE-START
-// *******************
-// (flow_sequence_entry FLOW-ENTRY)*
-// * **********
-// flow_sequence_entry?
-// *
-// FLOW-SEQUENCE-END
-// *****************
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow sequence", context_mark,
- "did not find expected ',' or ']'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- implicit: true,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- skip_token(parser)
- return true
- } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- yaml_parser_set_event_comments(parser, event)
-
- skip_token(parser)
- return true
-}
-
-//
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- mark := token.end_mark
- skip_token(parser)
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// ***** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// flow_mapping ::= FLOW-MAPPING-START
-// ******************
-// (flow_mapping_entry FLOW-ENTRY)*
-// * **********
-// flow_mapping_entry?
-// ******************
-// FLOW-MAPPING-END
-// ****************
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * *** *
-//
-func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow mapping", context_mark,
- "did not find expected ',' or '}'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- } else {
- parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- yaml_parser_set_event_comments(parser, event)
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * ***** *
-//
-func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if empty {
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Generate an empty scalar event.
-func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: mark,
- end_mark: mark,
- value: nil, // Empty
- implicit: true,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
-}
-
-var default_tag_directives = []yaml_tag_directive_t{
- {[]byte("!"), []byte("!")},
- {[]byte("!!"), []byte("tag:yaml.org,2002:")},
-}
-
-// Parse directives.
-func yaml_parser_process_directives(parser *yaml_parser_t,
- version_directive_ref **yaml_version_directive_t,
- tag_directives_ref *[]yaml_tag_directive_t) bool {
-
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
- if version_directive != nil {
- yaml_parser_set_parser_error(parser,
- "found duplicate %YAML directive", token.start_mark)
- return false
- }
- if token.major != 1 || token.minor != 1 {
- yaml_parser_set_parser_error(parser,
- "found incompatible YAML document", token.start_mark)
- return false
- }
- version_directive = &yaml_version_directive_t{
- major: token.major,
- minor: token.minor,
- }
- } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- value := yaml_tag_directive_t{
- handle: token.value,
- prefix: token.prefix,
- }
- if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
- return false
- }
- tag_directives = append(tag_directives, value)
- }
-
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
-
- for i := range default_tag_directives {
- if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
- return false
- }
- }
-
- if version_directive_ref != nil {
- *version_directive_ref = version_directive
- }
- if tag_directives_ref != nil {
- *tag_directives_ref = tag_directives
- }
- return true
-}
-
-// Append a tag directive to the directives stack.
-func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
- for i := range parser.tag_directives {
- if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
- }
- }
-
- // [Go] I suspect the copy is unnecessary. This was likely done
- // because there was no way to track ownership of the data.
- value_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(value_copy.handle, value.handle)
- copy(value_copy.prefix, value.prefix)
- parser.tag_directives = append(parser.tag_directives, value_copy)
- return true
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go
deleted file mode 100644
index b98c3321ed0..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2023 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-// yaml_emitter_increase_indent preserves the original signature and delegates to
-// yaml_emitter_increase_indent_compact without compact-sequence indentation
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
- return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
-}
-
-// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
-func (e *Encoder) CompactSeqIndent() {
- e.encoder.emitter.compact_sequence_indent = true
-}
-
-// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
-func (e *Encoder) DefaultSeqIndent() {
- e.encoder.emitter.compact_sequence_indent = false
-}
-
-// yaml_emitter_process_line_comment preserves the original signature and delegates to
-// yaml_emitter_process_line_comment_linebreak passing false for linebreak
-func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
- return yaml_emitter_process_line_comment_linebreak(emitter, false)
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go
deleted file mode 100644
index b7de0a89c46..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/readerc.go
+++ /dev/null
@@ -1,434 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "io"
-)
-
-// Set the reader error and return 0.
-func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
- parser.error = yaml_READER_ERROR
- parser.problem = problem
- parser.problem_offset = offset
- parser.problem_value = value
- return false
-}
-
-// Byte order marks.
-const (
- bom_UTF8 = "\xef\xbb\xbf"
- bom_UTF16LE = "\xff\xfe"
- bom_UTF16BE = "\xfe\xff"
-)
-
-// Determine the input stream encoding by checking the BOM symbol. If no BOM is
-// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
-func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
- // Ensure that we had enough bytes in the raw buffer.
- for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
- if !yaml_parser_update_raw_buffer(parser) {
- return false
- }
- }
-
- // Determine the encoding.
- buf := parser.raw_buffer
- pos := parser.raw_buffer_pos
- avail := len(buf) - pos
- if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
- parser.encoding = yaml_UTF16LE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
- parser.encoding = yaml_UTF16BE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
- parser.encoding = yaml_UTF8_ENCODING
- parser.raw_buffer_pos += 3
- parser.offset += 3
- } else {
- parser.encoding = yaml_UTF8_ENCODING
- }
- return true
-}
-
-// Update the raw buffer.
-func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
- size_read := 0
-
- // Return if the raw buffer is full.
- if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
- return true
- }
-
- // Return on EOF.
- if parser.eof {
- return true
- }
-
- // Move the remaining bytes in the raw buffer to the beginning.
- if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
- copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
- }
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
- parser.raw_buffer_pos = 0
-
- // Call the read handler to fill the buffer.
- size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
- if err == io.EOF {
- parser.eof = true
- } else if err != nil {
- return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
- }
- return true
-}
-
-// Ensure that the buffer contains at least `length` characters.
-// Return true on success, false on failure.
-//
-// The length is supposed to be significantly less that the buffer size.
-func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
- if parser.read_handler == nil {
- panic("read handler must be set")
- }
-
- // [Go] This function was changed to guarantee the requested length size at EOF.
- // The fact we need to do this is pretty awful, but the description above implies
- // for that to be the case, and there are tests
-
- // If the EOF flag is set and the raw buffer is empty, do nothing.
- if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
- // [Go] ACTUALLY! Read the documentation of this function above.
- // This is just broken. To return true, we need to have the
- // given length in the buffer. Not doing that means every single
- // check that calls this function to make sure the buffer has a
- // given length is Go) panicking; or C) accessing invalid memory.
- //return true
- }
-
- // Return if the buffer contains enough characters.
- if parser.unread >= length {
- return true
- }
-
- // Determine the input encoding if it is not known yet.
- if parser.encoding == yaml_ANY_ENCODING {
- if !yaml_parser_determine_encoding(parser) {
- return false
- }
- }
-
- // Move the unread characters to the beginning of the buffer.
- buffer_len := len(parser.buffer)
- if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
- copy(parser.buffer, parser.buffer[parser.buffer_pos:])
- buffer_len -= parser.buffer_pos
- parser.buffer_pos = 0
- } else if parser.buffer_pos == buffer_len {
- buffer_len = 0
- parser.buffer_pos = 0
- }
-
- // Open the whole buffer for writing, and cut it before returning.
- parser.buffer = parser.buffer[:cap(parser.buffer)]
-
- // Fill the buffer until it has enough characters.
- first := true
- for parser.unread < length {
-
- // Fill the raw buffer if necessary.
- if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
- if !yaml_parser_update_raw_buffer(parser) {
- parser.buffer = parser.buffer[:buffer_len]
- return false
- }
- }
- first = false
-
- // Decode the raw buffer.
- inner:
- for parser.raw_buffer_pos != len(parser.raw_buffer) {
- var value rune
- var width int
-
- raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
-
- // Decode the next character.
- switch parser.encoding {
- case yaml_UTF8_ENCODING:
- // Decode a UTF-8 character. Check RFC 3629
- // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
- //
- // The following table (taken from the RFC) is used for
- // decoding.
- //
- // Char. number range | UTF-8 octet sequence
- // (hexadecimal) | (binary)
- // --------------------+------------------------------------
- // 0000 0000-0000 007F | 0xxxxxxx
- // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
- // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
- // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- //
- // Additionally, the characters in the range 0xD800-0xDFFF
- // are prohibited as they are reserved for use with UTF-16
- // surrogate pairs.
-
- // Determine the length of the UTF-8 sequence.
- octet := parser.raw_buffer[parser.raw_buffer_pos]
- switch {
- case octet&0x80 == 0x00:
- width = 1
- case octet&0xE0 == 0xC0:
- width = 2
- case octet&0xF0 == 0xE0:
- width = 3
- case octet&0xF8 == 0xF0:
- width = 4
- default:
- // The leading octet is invalid.
- return yaml_parser_set_reader_error(parser,
- "invalid leading UTF-8 octet",
- parser.offset, int(octet))
- }
-
- // Check if the raw buffer contains an incomplete character.
- if width > raw_unread {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-8 octet sequence",
- parser.offset, -1)
- }
- break inner
- }
-
- // Decode the leading octet.
- switch {
- case octet&0x80 == 0x00:
- value = rune(octet & 0x7F)
- case octet&0xE0 == 0xC0:
- value = rune(octet & 0x1F)
- case octet&0xF0 == 0xE0:
- value = rune(octet & 0x0F)
- case octet&0xF8 == 0xF0:
- value = rune(octet & 0x07)
- default:
- value = 0
- }
-
- // Check and decode the trailing octets.
- for k := 1; k < width; k++ {
- octet = parser.raw_buffer[parser.raw_buffer_pos+k]
-
- // Check if the octet is valid.
- if (octet & 0xC0) != 0x80 {
- return yaml_parser_set_reader_error(parser,
- "invalid trailing UTF-8 octet",
- parser.offset+k, int(octet))
- }
-
- // Decode the octet.
- value = (value << 6) + rune(octet&0x3F)
- }
-
- // Check the length of the sequence against the value.
- switch {
- case width == 1:
- case width == 2 && value >= 0x80:
- case width == 3 && value >= 0x800:
- case width == 4 && value >= 0x10000:
- default:
- return yaml_parser_set_reader_error(parser,
- "invalid length of a UTF-8 sequence",
- parser.offset, -1)
- }
-
- // Check the range of the value.
- if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
- return yaml_parser_set_reader_error(parser,
- "invalid Unicode character",
- parser.offset, int(value))
- }
-
- case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
- var low, high int
- if parser.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- low, high = 1, 0
- }
-
- // The UTF-16 encoding is not as simple as one might
- // naively think. Check RFC 2781
- // (http://www.ietf.org/rfc/rfc2781.txt).
- //
- // Normally, two subsequent bytes describe a Unicode
- // character. However a special technique (called a
- // surrogate pair) is used for specifying character
- // values larger than 0xFFFF.
- //
- // A surrogate pair consists of two pseudo-characters:
- // high surrogate area (0xD800-0xDBFF)
- // low surrogate area (0xDC00-0xDFFF)
- //
- // The following formulas are used for decoding
- // and encoding characters using surrogate pairs:
- //
- // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
- // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
- // W1 = 110110yyyyyyyyyy
- // W2 = 110111xxxxxxxxxx
- //
- // where U is the character value, W1 is the high surrogate
- // area, W2 is the low surrogate area.
-
- // Check for incomplete UTF-16 character.
- if raw_unread < 2 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 character",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the character.
- value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
-
- // Check for unexpected low surrogate area.
- if value&0xFC00 == 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "unexpected low surrogate area",
- parser.offset, int(value))
- }
-
- // Check for a high surrogate area.
- if value&0xFC00 == 0xD800 {
- width = 4
-
- // Check for incomplete surrogate pair.
- if raw_unread < 4 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 surrogate pair",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the next character.
- value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
-
- // Check for a low surrogate area.
- if value2&0xFC00 != 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "expected low surrogate area",
- parser.offset+2, int(value2))
- }
-
- // Generate the value of the surrogate pair.
- value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
- } else {
- width = 2
- }
-
- default:
- panic("impossible")
- }
-
- // Check if the character is in the allowed range:
- // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
- // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
- // | [#x10000-#x10FFFF] (32 bit)
- switch {
- case value == 0x09:
- case value == 0x0A:
- case value == 0x0D:
- case value >= 0x20 && value <= 0x7E:
- case value == 0x85:
- case value >= 0xA0 && value <= 0xD7FF:
- case value >= 0xE000 && value <= 0xFFFD:
- case value >= 0x10000 && value <= 0x10FFFF:
- default:
- return yaml_parser_set_reader_error(parser,
- "control characters are not allowed",
- parser.offset, int(value))
- }
-
- // Move the raw pointers.
- parser.raw_buffer_pos += width
- parser.offset += width
-
- // Finally put the character into the buffer.
- if value <= 0x7F {
- // 0000 0000-0000 007F . 0xxxxxxx
- parser.buffer[buffer_len+0] = byte(value)
- buffer_len += 1
- } else if value <= 0x7FF {
- // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
- parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
- buffer_len += 2
- } else if value <= 0xFFFF {
- // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
- buffer_len += 3
- } else {
- // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
- buffer_len += 4
- }
-
- parser.unread++
- }
-
- // On EOF, put NUL into the buffer and return.
- if parser.eof {
- parser.buffer[buffer_len] = 0
- buffer_len++
- parser.unread++
- break
- }
- }
- // [Go] Read the documentation of this function above. To return true,
- // we need to have the given length in the buffer. Not doing that means
- // every single check that calls this function to make sure the buffer
- // has a given length is Go) panicking; or C) accessing invalid memory.
- // This happens here due to the EOF above breaking early.
- for buffer_len < length {
- parser.buffer[buffer_len] = 0
- buffer_len++
- }
- parser.buffer = parser.buffer[:buffer_len]
- return true
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go
deleted file mode 100644
index 64ae888057a..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/resolve.go
+++ /dev/null
@@ -1,326 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
- "encoding/base64"
- "math"
- "regexp"
- "strconv"
- "strings"
- "time"
-)
-
-type resolveMapItem struct {
- value interface{}
- tag string
-}
-
-var resolveTable = make([]byte, 256)
-var resolveMap = make(map[string]resolveMapItem)
-
-func init() {
- t := resolveTable
- t[int('+')] = 'S' // Sign
- t[int('-')] = 'S'
- for _, c := range "0123456789" {
- t[int(c)] = 'D' // Digit
- }
- for _, c := range "yYnNtTfFoO~" {
- t[int(c)] = 'M' // In map
- }
- t[int('.')] = '.' // Float (potentially in map)
-
- var resolveMapList = []struct {
- v interface{}
- tag string
- l []string
- }{
- {true, boolTag, []string{"true", "True", "TRUE"}},
- {false, boolTag, []string{"false", "False", "FALSE"}},
- {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
- {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
- {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
- {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
- {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
- {"<<", mergeTag, []string{"<<"}},
- }
-
- m := resolveMap
- for _, item := range resolveMapList {
- for _, s := range item.l {
- m[s] = resolveMapItem{item.v, item.tag}
- }
- }
-}
-
-const (
- nullTag = "!!null"
- boolTag = "!!bool"
- strTag = "!!str"
- intTag = "!!int"
- floatTag = "!!float"
- timestampTag = "!!timestamp"
- seqTag = "!!seq"
- mapTag = "!!map"
- binaryTag = "!!binary"
- mergeTag = "!!merge"
-)
-
-var longTags = make(map[string]string)
-var shortTags = make(map[string]string)
-
-func init() {
- for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
- ltag := longTag(stag)
- longTags[stag] = ltag
- shortTags[ltag] = stag
- }
-}
-
-const longTagPrefix = "tag:yaml.org,2002:"
-
-func shortTag(tag string) string {
- if strings.HasPrefix(tag, longTagPrefix) {
- if stag, ok := shortTags[tag]; ok {
- return stag
- }
- return "!!" + tag[len(longTagPrefix):]
- }
- return tag
-}
-
-func longTag(tag string) string {
- if strings.HasPrefix(tag, "!!") {
- if ltag, ok := longTags[tag]; ok {
- return ltag
- }
- return longTagPrefix + tag[2:]
- }
- return tag
-}
-
-func resolvableTag(tag string) bool {
- switch tag {
- case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
- return true
- }
- return false
-}
-
-var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
-
-func resolve(tag string, in string) (rtag string, out interface{}) {
- tag = shortTag(tag)
- if !resolvableTag(tag) {
- return tag, in
- }
-
- defer func() {
- switch tag {
- case "", rtag, strTag, binaryTag:
- return
- case floatTag:
- if rtag == intTag {
- switch v := out.(type) {
- case int64:
- rtag = floatTag
- out = float64(v)
- return
- case int:
- rtag = floatTag
- out = float64(v)
- return
- }
- }
- }
- failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
- }()
-
- // Any data is accepted as a !!str or !!binary.
- // Otherwise, the prefix is enough of a hint about what it might be.
- hint := byte('N')
- if in != "" {
- hint = resolveTable[in[0]]
- }
- if hint != 0 && tag != strTag && tag != binaryTag {
- // Handle things we can lookup in a map.
- if item, ok := resolveMap[in]; ok {
- return item.tag, item.value
- }
-
- // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
- // are purposefully unsupported here. They're still quoted on
- // the way out for compatibility with other parser, though.
-
- switch hint {
- case 'M':
- // We've already checked the map above.
-
- case '.':
- // Not in the map, so maybe a normal float.
- floatv, err := strconv.ParseFloat(in, 64)
- if err == nil {
- return floatTag, floatv
- }
-
- case 'D', 'S':
- // Int, float, or timestamp.
- // Only try values as a timestamp if the value is unquoted or there's an explicit
- // !!timestamp tag.
- if tag == "" || tag == timestampTag {
- t, ok := parseTimestamp(in)
- if ok {
- return timestampTag, t
- }
- }
-
- plain := strings.Replace(in, "_", "", -1)
- intv, err := strconv.ParseInt(plain, 0, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return intTag, int(intv)
- } else {
- return intTag, intv
- }
- }
- uintv, err := strconv.ParseUint(plain, 0, 64)
- if err == nil {
- return intTag, uintv
- }
- if yamlStyleFloat.MatchString(plain) {
- floatv, err := strconv.ParseFloat(plain, 64)
- if err == nil {
- return floatTag, floatv
- }
- }
- if strings.HasPrefix(plain, "0b") {
- intv, err := strconv.ParseInt(plain[2:], 2, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return intTag, int(intv)
- } else {
- return intTag, intv
- }
- }
- uintv, err := strconv.ParseUint(plain[2:], 2, 64)
- if err == nil {
- return intTag, uintv
- }
- } else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
- if err == nil {
- if true || intv == int64(int(intv)) {
- return intTag, int(intv)
- } else {
- return intTag, intv
- }
- }
- }
- // Octals as introduced in version 1.2 of the spec.
- // Octals from the 1.1 spec, spelled as 0777, are still
- // decoded by default in v3 as well for compatibility.
- // May be dropped in v4 depending on how usage evolves.
- if strings.HasPrefix(plain, "0o") {
- intv, err := strconv.ParseInt(plain[2:], 8, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return intTag, int(intv)
- } else {
- return intTag, intv
- }
- }
- uintv, err := strconv.ParseUint(plain[2:], 8, 64)
- if err == nil {
- return intTag, uintv
- }
- } else if strings.HasPrefix(plain, "-0o") {
- intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
- if err == nil {
- if true || intv == int64(int(intv)) {
- return intTag, int(intv)
- } else {
- return intTag, intv
- }
- }
- }
- default:
- panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
- }
- }
- return strTag, in
-}
-
-// encodeBase64 encodes s as base64 that is broken up into multiple lines
-// as appropriate for the resulting length.
-func encodeBase64(s string) string {
- const lineLen = 70
- encLen := base64.StdEncoding.EncodedLen(len(s))
- lines := encLen/lineLen + 1
- buf := make([]byte, encLen*2+lines)
- in := buf[0:encLen]
- out := buf[encLen:]
- base64.StdEncoding.Encode(in, []byte(s))
- k := 0
- for i := 0; i < len(in); i += lineLen {
- j := i + lineLen
- if j > len(in) {
- j = len(in)
- }
- k += copy(out[k:], in[i:j])
- if lines > 1 {
- out[k] = '\n'
- k++
- }
- }
- return string(out[:k])
-}
-
-// This is a subset of the formats allowed by the regular expression
-// defined at http://yaml.org/type/timestamp.html.
-var allowedTimestampFormats = []string{
- "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
- "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
- "2006-1-2 15:4:5.999999999", // space separated with no time zone
- "2006-1-2", // date only
- // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
- // from the set of examples.
-}
-
-// parseTimestamp parses s as a timestamp string and
-// returns the timestamp and reports whether it succeeded.
-// Timestamp formats are defined at http://yaml.org/type/timestamp.html
-func parseTimestamp(s string) (time.Time, bool) {
- // TODO write code to check all the formats supported by
- // http://yaml.org/type/timestamp.html instead of using time.Parse.
-
- // Quick check: all date formats start with YYYY-.
- i := 0
- for ; i < len(s); i++ {
- if c := s[i]; c < '0' || c > '9' {
- break
- }
- }
- if i != 4 || i == len(s) || s[i] != '-' {
- return time.Time{}, false
- }
- for _, format := range allowedTimestampFormats {
- if t, err := time.Parse(format, s); err == nil {
- return t, true
- }
- }
- return time.Time{}, false
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go
deleted file mode 100644
index ca0070108f4..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/scannerc.go
+++ /dev/null
@@ -1,3038 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "bytes"
- "fmt"
-)
-
-// Introduction
-// ************
-//
-// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
-// some cases we are less restrictive that it requires.
-//
-// The process of transforming a YAML stream into a sequence of events is
-// divided on two steps: Scanning and Parsing.
-//
-// The Scanner transforms the input stream into a sequence of tokens, while the
-// parser transform the sequence of tokens produced by the Scanner into a
-// sequence of parsing events.
-//
-// The Scanner is rather clever and complicated. The Parser, on the contrary,
-// is a straightforward implementation of a recursive-descendant parser (or,
-// LL(1) parser, as it is usually called).
-//
-// Actually there are two issues of Scanning that might be called "clever", the
-// rest is quite straightforward. The issues are "block collection start" and
-// "simple keys". Both issues are explained below in details.
-//
-// Here the Scanning step is explained and implemented. We start with the list
-// of all the tokens produced by the Scanner together with short descriptions.
-//
-// Now, tokens:
-//
-// STREAM-START(encoding) # The stream start.
-// STREAM-END # The stream end.
-// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
-// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
-// DOCUMENT-START # '---'
-// DOCUMENT-END # '...'
-// BLOCK-SEQUENCE-START # Indentation increase denoting a block
-// BLOCK-MAPPING-START # sequence or a block mapping.
-// BLOCK-END # Indentation decrease.
-// FLOW-SEQUENCE-START # '['
-// FLOW-SEQUENCE-END # ']'
-// BLOCK-SEQUENCE-START # '{'
-// BLOCK-SEQUENCE-END # '}'
-// BLOCK-ENTRY # '-'
-// FLOW-ENTRY # ','
-// KEY # '?' or nothing (simple keys).
-// VALUE # ':'
-// ALIAS(anchor) # '*anchor'
-// ANCHOR(anchor) # '&anchor'
-// TAG(handle,suffix) # '!handle!suffix'
-// SCALAR(value,style) # A scalar.
-//
-// The following two tokens are "virtual" tokens denoting the beginning and the
-// end of the stream:
-//
-// STREAM-START(encoding)
-// STREAM-END
-//
-// We pass the information about the input stream encoding with the
-// STREAM-START token.
-//
-// The next two tokens are responsible for tags:
-//
-// VERSION-DIRECTIVE(major,minor)
-// TAG-DIRECTIVE(handle,prefix)
-//
-// Example:
-//
-// %YAML 1.1
-// %TAG ! !foo
-// %TAG !yaml! tag:yaml.org,2002:
-// ---
-//
-// The correspoding sequence of tokens:
-//
-// STREAM-START(utf-8)
-// VERSION-DIRECTIVE(1,1)
-// TAG-DIRECTIVE("!","!foo")
-// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
-// DOCUMENT-START
-// STREAM-END
-//
-// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
-// line.
-//
-// The document start and end indicators are represented by:
-//
-// DOCUMENT-START
-// DOCUMENT-END
-//
-// Note that if a YAML stream contains an implicit document (without '---'
-// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
-// produced.
-//
-// In the following examples, we present whole documents together with the
-// produced tokens.
-//
-// 1. An implicit document:
-//
-// 'a scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// STREAM-END
-//
-// 2. An explicit document:
-//
-// ---
-// 'a scalar'
-// ...
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-END
-// STREAM-END
-//
-// 3. Several documents in a stream:
-//
-// 'a scalar'
-// ---
-// 'another scalar'
-// ---
-// 'yet another scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("another scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("yet another scalar",single-quoted)
-// STREAM-END
-//
-// We have already introduced the SCALAR token above. The following tokens are
-// used to describe aliases, anchors, tag, and scalars:
-//
-// ALIAS(anchor)
-// ANCHOR(anchor)
-// TAG(handle,suffix)
-// SCALAR(value,style)
-//
-// The following series of examples illustrate the usage of these tokens:
-//
-// 1. A recursive sequence:
-//
-// &A [ *A ]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// ANCHOR("A")
-// FLOW-SEQUENCE-START
-// ALIAS("A")
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A tagged scalar:
-//
-// !!float "3.14" # A good approximation.
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// TAG("!!","float")
-// SCALAR("3.14",double-quoted)
-// STREAM-END
-//
-// 3. Various scalar styles:
-//
-// --- # Implicit empty plain scalars do not produce tokens.
-// --- a plain scalar
-// --- 'a single-quoted scalar'
-// --- "a double-quoted scalar"
-// --- |-
-// a literal scalar
-// --- >-
-// a folded
-// scalar
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// DOCUMENT-START
-// SCALAR("a plain scalar",plain)
-// DOCUMENT-START
-// SCALAR("a single-quoted scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("a double-quoted scalar",double-quoted)
-// DOCUMENT-START
-// SCALAR("a literal scalar",literal)
-// DOCUMENT-START
-// SCALAR("a folded scalar",folded)
-// STREAM-END
-//
-// Now it's time to review collection-related tokens. We will start with
-// flow collections:
-//
-// FLOW-SEQUENCE-START
-// FLOW-SEQUENCE-END
-// FLOW-MAPPING-START
-// FLOW-MAPPING-END
-// FLOW-ENTRY
-// KEY
-// VALUE
-//
-// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
-// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
-// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
-// indicators '?' and ':', which are used for denoting mapping keys and values,
-// are represented by the KEY and VALUE tokens.
-//
-// The following examples show flow collections:
-//
-// 1. A flow sequence:
-//
-// [item 1, item 2, item 3]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-SEQUENCE-START
-// SCALAR("item 1",plain)
-// FLOW-ENTRY
-// SCALAR("item 2",plain)
-// FLOW-ENTRY
-// SCALAR("item 3",plain)
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A flow mapping:
-//
-// {
-// a simple key: a value, # Note that the KEY token is produced.
-// ? a complex key: another value,
-// }
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// FLOW-ENTRY
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// FLOW-ENTRY
-// FLOW-MAPPING-END
-// STREAM-END
-//
-// A simple key is a key which is not denoted by the '?' indicator. Note that
-// the Scanner still produce the KEY token whenever it encounters a simple key.
-//
-// For scanning block collections, the following tokens are used (note that we
-// repeat KEY and VALUE here):
-//
-// BLOCK-SEQUENCE-START
-// BLOCK-MAPPING-START
-// BLOCK-END
-// BLOCK-ENTRY
-// KEY
-// VALUE
-//
-// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
-// increase that precedes a block collection (cf. the INDENT token in Python).
-// The token BLOCK-END denote indentation decrease that ends a block collection
-// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
-// that makes detections of these tokens more complex.
-//
-// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
-// '-', '?', and ':' correspondingly.
-//
-// The following examples show how the tokens BLOCK-SEQUENCE-START,
-// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
-//
-// 1. Block sequences:
-//
-// - item 1
-// - item 2
-// -
-// - item 3.1
-// - item 3.2
-// -
-// key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 3.1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 3.2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Block mappings:
-//
-// a simple key: a value # The KEY token is produced here.
-// ? a complex key
-// : another value
-// a mapping:
-// key 1: value 1
-// key 2: value 2
-// a sequence:
-// - item 1
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// KEY
-// SCALAR("a mapping",plain)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML does not always require to start a new block collection from a new
-// line. If the current line contains only '-', '?', and ':' indicators, a new
-// block collection may start at the current line. The following examples
-// illustrate this case:
-//
-// 1. Collections in a sequence:
-//
-// - - item 1
-// - item 2
-// - key 1: value 1
-// key 2: value 2
-// - ? complex key
-// : complex value
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("complex key")
-// VALUE
-// SCALAR("complex value")
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Collections in a mapping:
-//
-// ? a sequence
-// : - item 1
-// - item 2
-// ? a mapping
-// : key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a mapping",plain)
-// VALUE
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML also permits non-indented sequences if they are included into a block
-// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
-//
-// key:
-// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key",plain)
-// VALUE
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-//
-
-// Ensure that the buffer contains the required number of characters.
-// Return true on success, false on failure (reader error or memory error).
-func cache(parser *yaml_parser_t, length int) bool {
- // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
- return parser.unread >= length || yaml_parser_update_buffer(parser, length)
-}
-
-// Advance the buffer pointer.
-func skip(parser *yaml_parser_t) {
- if !is_blank(parser.buffer, parser.buffer_pos) {
- parser.newlines = 0
- }
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-}
-
-func skip_line(parser *yaml_parser_t) {
- if is_crlf(parser.buffer, parser.buffer_pos) {
- parser.mark.index += 2
- parser.mark.column = 0
- parser.mark.line++
- parser.unread -= 2
- parser.buffer_pos += 2
- parser.newlines++
- } else if is_break(parser.buffer, parser.buffer_pos) {
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
- parser.newlines++
- }
-}
-
-// Copy a character to a string buffer and advance pointers.
-func read(parser *yaml_parser_t, s []byte) []byte {
- if !is_blank(parser.buffer, parser.buffer_pos) {
- parser.newlines = 0
- }
- w := width(parser.buffer[parser.buffer_pos])
- if w == 0 {
- panic("invalid character sequence")
- }
- if len(s) == 0 {
- s = make([]byte, 0, 32)
- }
- if w == 1 && len(s)+w <= cap(s) {
- s = s[:len(s)+1]
- s[len(s)-1] = parser.buffer[parser.buffer_pos]
- parser.buffer_pos++
- } else {
- s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
- parser.buffer_pos += w
- }
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- return s
-}
-
-// Copy a line break character to a string buffer and advance pointers.
-func read_line(parser *yaml_parser_t, s []byte) []byte {
- buf := parser.buffer
- pos := parser.buffer_pos
- switch {
- case buf[pos] == '\r' && buf[pos+1] == '\n':
- // CR LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- parser.mark.index++
- parser.unread--
- case buf[pos] == '\r' || buf[pos] == '\n':
- // CR|LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 1
- case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
- // NEL . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
- // LS|PS . LS|PS
- s = append(s, buf[parser.buffer_pos:pos+3]...)
- parser.buffer_pos += 3
- default:
- return s
- }
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- parser.newlines++
- return s
-}
-
-// Get the next token.
-func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Erase the token object.
- *token = yaml_token_t{} // [Go] Is this necessary?
-
- // No tokens after STREAM-END or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
- return true
- }
-
- // Ensure that the tokens queue contains enough tokens.
- if !parser.token_available {
- if !yaml_parser_fetch_more_tokens(parser) {
- return false
- }
- }
-
- // Fetch the next token from the queue.
- *token = parser.tokens[parser.tokens_head]
- parser.tokens_head++
- parser.tokens_parsed++
- parser.token_available = false
-
- if token.typ == yaml_STREAM_END_TOKEN {
- parser.stream_end_produced = true
- }
- return true
-}
-
-// Set the scanner error and return false.
-func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
- parser.error = yaml_SCANNER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = parser.mark
- return false
-}
-
-func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
- context := "while parsing a tag"
- if directive {
- context = "while parsing a %TAG directive"
- }
- return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
-}
-
-func trace(args ...interface{}) func() {
- pargs := append([]interface{}{"+++"}, args...)
- fmt.Println(pargs...)
- pargs = append([]interface{}{"---"}, args...)
- return func() { fmt.Println(pargs...) }
-}
-
-// Ensure that the tokens queue contains at least one token which can be
-// returned to the Parser.
-func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
- // While we need more tokens to fetch, do it.
- for {
- // [Go] The comment parsing logic requires a lookahead of two tokens
- // so that foot comments may be parsed in time of associating them
- // with the tokens that are parsed before them, and also for line
- // comments to be transformed into head comments in some edge cases.
- if parser.tokens_head < len(parser.tokens)-2 {
- // If a potential simple key is at the head position, we need to fetch
- // the next token to disambiguate it.
- head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
- if !ok {
- break
- } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
- return false
- } else if !valid {
- break
- }
- }
- // Fetch the next token.
- if !yaml_parser_fetch_next_token(parser) {
- return false
- }
- }
-
- parser.token_available = true
- return true
-}
-
-// The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
- // Ensure that the buffer is initialized.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check if we just started scanning. Fetch STREAM-START then.
- if !parser.stream_start_produced {
- return yaml_parser_fetch_stream_start(parser)
- }
-
- scan_mark := parser.mark
-
- // Eat whitespaces and comments until we reach the next token.
- if !yaml_parser_scan_to_next_token(parser) {
- return false
- }
-
- // [Go] While unrolling indents, transform the head comments of prior
- // indentation levels observed after scan_start into foot comments at
- // the respective indexes.
-
- // Check the indentation level against the current column.
- if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
- return false
- }
-
- // Ensure that the buffer contains at least 4 characters. 4 is the length
- // of the longest indicators ('--- ' and '... ').
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- // Is it the end of the stream?
- if is_z(parser.buffer, parser.buffer_pos) {
- return yaml_parser_fetch_stream_end(parser)
- }
-
- // Is it a directive?
- if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
- return yaml_parser_fetch_directive(parser)
- }
-
- buf := parser.buffer
- pos := parser.buffer_pos
-
- // Is it the document start indicator?
- if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
- }
-
- // Is it the document end indicator?
- if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
- }
-
- comment_mark := parser.mark
- if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
- // Associate any following comments with the prior token.
- comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
- }
- defer func() {
- if !ok {
- return
- }
- if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
- // Sequence indicators alone have no line comments. It becomes
- // a head comment for whatever follows.
- return
- }
- if !yaml_parser_scan_line_comment(parser, comment_mark) {
- ok = false
- return
- }
- }()
-
- // Is it the flow sequence start indicator?
- if buf[pos] == '[' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
- }
-
- // Is it the flow mapping start indicator?
- if parser.buffer[parser.buffer_pos] == '{' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
- }
-
- // Is it the flow sequence end indicator?
- if parser.buffer[parser.buffer_pos] == ']' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_SEQUENCE_END_TOKEN)
- }
-
- // Is it the flow mapping end indicator?
- if parser.buffer[parser.buffer_pos] == '}' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_MAPPING_END_TOKEN)
- }
-
- // Is it the flow entry indicator?
- if parser.buffer[parser.buffer_pos] == ',' {
- return yaml_parser_fetch_flow_entry(parser)
- }
-
- // Is it the block entry indicator?
- if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
- return yaml_parser_fetch_block_entry(parser)
- }
-
- // Is it the key indicator?
- if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_key(parser)
- }
-
- // Is it the value indicator?
- if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_value(parser)
- }
-
- // Is it an alias?
- if parser.buffer[parser.buffer_pos] == '*' {
- return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
- }
-
- // Is it an anchor?
- if parser.buffer[parser.buffer_pos] == '&' {
- return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
- }
-
- // Is it a tag?
- if parser.buffer[parser.buffer_pos] == '!' {
- return yaml_parser_fetch_tag(parser)
- }
-
- // Is it a literal scalar?
- if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, true)
- }
-
- // Is it a folded scalar?
- if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, false)
- }
-
- // Is it a single-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '\'' {
- return yaml_parser_fetch_flow_scalar(parser, true)
- }
-
- // Is it a double-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '"' {
- return yaml_parser_fetch_flow_scalar(parser, false)
- }
-
- // Is it a plain scalar?
- //
- // A plain scalar may start with any non-blank characters except
- //
- // '-', '?', ':', ',', '[', ']', '{', '}',
- // '#', '&', '*', '!', '|', '>', '\'', '\"',
- // '%', '@', '`'.
- //
- // In the block context (and, for the '-' indicator, in the flow context
- // too), it may also start with the characters
- //
- // '-', '?', ':'
- //
- // if it is followed by a non-space character.
- //
- // The last rule is more restrictive than the specification requires.
- // [Go] TODO Make this logic more reasonable.
- //switch parser.buffer[parser.buffer_pos] {
- //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
- //}
- if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
- parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
- parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
- (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level == 0 &&
- (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
- !is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_plain_scalar(parser)
- }
-
- // If we don't determine the token type so far, it is an error.
- return yaml_parser_set_scanner_error(parser,
- "while scanning for the next token", parser.mark,
- "found character that cannot start any token")
-}
-
-func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
- if !simple_key.possible {
- return false, true
- }
-
- // The 1.2 specification says:
- //
- // "If the ? indicator is omitted, parsing needs to see past the
- // implicit key to recognize it as such. To limit the amount of
- // lookahead required, the “:” indicator must appear at most 1024
- // Unicode characters beyond the start of the key. In addition, the key
- // is restricted to a single line."
- //
- if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
- // Check if the potential simple key to be removed is required.
- if simple_key.required {
- return false, yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", simple_key.mark,
- "could not find expected ':'")
- }
- simple_key.possible = false
- return false, true
- }
- return true, true
-}
-
-// Check if a simple key may start at the current position and add it if
-// needed.
-func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
- // A simple key is required at the current position if the scanner is in
- // the block context and the current column coincides with the indentation
- // level.
-
- required := parser.flow_level == 0 && parser.indent == parser.mark.column
-
- //
- // If the current position may start a simple key, save it.
- //
- if parser.simple_key_allowed {
- simple_key := yaml_simple_key_t{
- possible: true,
- required: required,
- token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
- mark: parser.mark,
- }
-
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
- parser.simple_keys[len(parser.simple_keys)-1] = simple_key
- parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
- }
- return true
-}
-
-// Remove a potential simple key at the current flow level.
-func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
- i := len(parser.simple_keys) - 1
- if parser.simple_keys[i].possible {
- // If the key is required, it is an error.
- if parser.simple_keys[i].required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", parser.simple_keys[i].mark,
- "could not find expected ':'")
- }
- // Remove the key from the stack.
- parser.simple_keys[i].possible = false
- delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
- }
- return true
-}
-
-// max_flow_level limits the flow_level
-const max_flow_level = 10000
-
-// Increase the flow level and resize the simple key list if needed.
-func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
- // Reset the simple key on the next level.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
- possible: false,
- required: false,
- token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
- mark: parser.mark,
- })
-
- // Increase the flow level.
- parser.flow_level++
- if parser.flow_level > max_flow_level {
- return yaml_parser_set_scanner_error(parser,
- "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
- fmt.Sprintf("exceeded max depth of %d", max_flow_level))
- }
- return true
-}
-
-// Decrease the flow level.
-func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
- if parser.flow_level > 0 {
- parser.flow_level--
- last := len(parser.simple_keys) - 1
- delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
- parser.simple_keys = parser.simple_keys[:last]
- }
- return true
-}
-
-// max_indents limits the indents stack size
-const max_indents = 10000
-
-// Push the current indentation level to the stack and set the new level
-// the current column is greater than the indentation level. In this case,
-// append or insert the specified token into the token queue.
-func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- if parser.indent < column {
- // Push the current indentation level to the stack and set the new
- // indentation level.
- parser.indents = append(parser.indents, parser.indent)
- parser.indent = column
- if len(parser.indents) > max_indents {
- return yaml_parser_set_scanner_error(parser,
- "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
- fmt.Sprintf("exceeded max depth of %d", max_indents))
- }
-
- // Create a token and insert it into the queue.
- token := yaml_token_t{
- typ: typ,
- start_mark: mark,
- end_mark: mark,
- }
- if number > -1 {
- number -= parser.tokens_parsed
- }
- yaml_insert_token(parser, number, &token)
- }
- return true
-}
-
-// Pop indentation levels from the indents stack until the current level
-// becomes less or equal to the column. For each indentation level, append
-// the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- block_mark := scan_mark
- block_mark.index--
-
- // Loop through the indentation levels in the stack.
- for parser.indent > column {
-
- // [Go] Reposition the end token before potential following
- // foot comments of parent blocks. For that, search
- // backwards for recent comments that were at the same
- // indent as the block that is ending now.
- stop_index := block_mark.index
- for i := len(parser.comments) - 1; i >= 0; i-- {
- comment := &parser.comments[i]
-
- if comment.end_mark.index < stop_index {
- // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
- // If requested indent column is < 0, then the document is over and everything else
- // is a foot anyway.
- break
- }
- if comment.start_mark.column == parser.indent+1 {
- // This is a good match. But maybe there's a former comment
- // at that same indent level, so keep searching.
- block_mark = comment.start_mark
- }
-
- // While the end of the former comment matches with
- // the start of the following one, we know there's
- // nothing in between and scanning is still safe.
- stop_index = comment.scan_mark.index
- }
-
- // Create a token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_END_TOKEN,
- start_mark: block_mark,
- end_mark: block_mark,
- }
- yaml_insert_token(parser, -1, &token)
-
- // Pop the indentation level.
- parser.indent = parser.indents[len(parser.indents)-1]
- parser.indents = parser.indents[:len(parser.indents)-1]
- }
- return true
-}
-
-// Initialize the scanner and produce the STREAM-START token.
-func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
-
- // Set the initial indentation.
- parser.indent = -1
-
- // Initialize the simple key stack.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- parser.simple_keys_by_tok = make(map[int]int)
-
- // A simple key is allowed at the beginning of the stream.
- parser.simple_key_allowed = true
-
- // We have started.
- parser.stream_start_produced = true
-
- // Create the STREAM-START token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_START_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- encoding: parser.encoding,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the STREAM-END token and shut down the scanner.
-func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
-
- // Force new line.
- if parser.mark.column != 0 {
- parser.mark.column = 0
- parser.mark.line++
- }
-
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the STREAM-END token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
-func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
- token := yaml_token_t{}
- if !yaml_parser_scan_directive(parser, &token) {
- return false
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the DOCUMENT-START or DOCUMENT-END token.
-func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Consume the token.
- start_mark := parser.mark
-
- skip(parser)
- skip(parser)
- skip(parser)
-
- end_mark := parser.mark
-
- // Create the DOCUMENT-START or DOCUMENT-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
-func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
-
- // The indicators '[' and '{' may start a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // Increase the flow level.
- if !yaml_parser_increase_flow_level(parser) {
- return false
- }
-
- // A simple key may follow the indicators '[' and '{'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
-func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset any potential simple key on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Decrease the flow level.
- if !yaml_parser_decrease_flow_level(parser) {
- return false
- }
-
- // No simple keys after the indicators ']' and '}'.
- parser.simple_key_allowed = false
-
- // Consume the token.
-
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-ENTRY token.
-func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after ','.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_FLOW_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the BLOCK-ENTRY token.
-func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
- // Check if the scanner is in the block context.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new entry.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "block sequence entries are not allowed in this context")
- }
- // Add the BLOCK-SEQUENCE-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
- return false
- }
- } else {
- // It is an error for the '-' indicator to occur in the flow context,
- // but we let the Parser detect and report about it because the Parser
- // is able to point to the context.
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '-'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the BLOCK-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the KEY token.
-func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
-
- // In the block context, additional checks are required.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new key (not nessesary simple).
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping keys are not allowed in this context")
- }
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '?' in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the KEY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the VALUE token.
-func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
-
- simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
-
- // Have we found a simple key?
- if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
- return false
-
- } else if valid {
-
- // Create the KEY token and insert it into the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: simple_key.mark,
- end_mark: simple_key.mark,
- }
- yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
-
- // In the block context, we may need to add the BLOCK-MAPPING-START token.
- if !yaml_parser_roll_indent(parser, simple_key.mark.column,
- simple_key.token_number,
- yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
- return false
- }
-
- // Remove the simple key.
- simple_key.possible = false
- delete(parser.simple_keys_by_tok, simple_key.token_number)
-
- // A simple key cannot follow another simple key.
- parser.simple_key_allowed = false
-
- } else {
- // The ':' indicator follows a complex key.
-
- // In the block context, extra checks are required.
- if parser.flow_level == 0 {
-
- // Check if we are allowed to start a complex value.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping values are not allowed in this context")
- }
-
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Simple keys after ':' are allowed in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
- }
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the VALUE token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_VALUE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the ALIAS or ANCHOR token.
-func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // An anchor or an alias could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow an anchor or an alias.
- parser.simple_key_allowed = false
-
- // Create the ALIAS or ANCHOR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_anchor(parser, &token, typ) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the TAG token.
-func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
- // A tag could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a tag.
- parser.simple_key_allowed = false
-
- // Create the TAG token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_tag(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
-func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
- // Remove any potential simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // A simple key may follow a block scalar.
- parser.simple_key_allowed = true
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_block_scalar(parser, &token, literal) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
-func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_flow_scalar(parser, &token, single) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,plain) token.
-func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_plain_scalar(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Eat whitespaces and comments until the next token is found.
-func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
-
- scan_mark := parser.mark
-
- // Until the next token is not found.
- for {
- // Allow the BOM mark to start a line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
- skip(parser)
- }
-
- // Eat whitespaces.
- // Tabs are allowed:
- // - in the flow context
- // - in the block context, but not at the beginning of the line or
- // after '-', '?', or ':' (complex value).
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if we just had a line comment under a sequence entry that
- // looks more like a header to the following content. Similar to this:
- //
- // - # The comment
- // - Some data
- //
- // If so, transform the line comment to a head comment and reposition.
- if len(parser.comments) > 0 && len(parser.tokens) > 1 {
- tokenA := parser.tokens[len(parser.tokens)-2]
- tokenB := parser.tokens[len(parser.tokens)-1]
- comment := &parser.comments[len(parser.comments)-1]
- if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
- // If it was in the prior line, reposition so it becomes a
- // header of the follow up token. Otherwise, keep it in place
- // so it becomes a header of the former.
- comment.head = comment.line
- comment.line = nil
- if comment.start_mark.line == parser.mark.line-1 {
- comment.token_mark = parser.mark
- }
- }
- }
-
- // Eat a comment until a line break.
- if parser.buffer[parser.buffer_pos] == '#' {
- if !yaml_parser_scan_comments(parser, scan_mark) {
- return false
- }
- }
-
- // If it is a line break, eat it.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
-
- // In the block context, a new line may start a simple key.
- if parser.flow_level == 0 {
- parser.simple_key_allowed = true
- }
- } else {
- break // We have found a token.
- }
- }
-
- return true
-}
-
-// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Eat '%'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the directive name.
- var name []byte
- if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
- return false
- }
-
- // Is it a YAML directive?
- if bytes.Equal(name, []byte("YAML")) {
- // Scan the VERSION directive value.
- var major, minor int8
- if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
- return false
- }
- end_mark := parser.mark
-
- // Create a VERSION-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_VERSION_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- major: major,
- minor: minor,
- }
-
- // Is it a TAG directive?
- } else if bytes.Equal(name, []byte("TAG")) {
- // Scan the TAG directive value.
- var handle, prefix []byte
- if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
- return false
- }
- end_mark := parser.mark
-
- // Create a TAG-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_TAG_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- prefix: prefix,
- }
-
- // Unknown directive.
- } else {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found unknown directive name")
- return false
- }
-
- // Eat the rest of the line including any comments.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- if parser.buffer[parser.buffer_pos] == '#' {
- // [Go] Discard this inline comment for the time being.
- //if !yaml_parser_scan_line_comment(parser, start_mark) {
- // return false
- //}
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- return true
-}
-
-// Scan the directive name.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^
-//
-func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
- // Consume the directive name.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- var s []byte
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the name is empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "could not find expected directive name")
- return false
- }
-
- // Check for an blank character after the name.
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found unexpected non-alphabetical character")
- return false
- }
- *name = s
- return true
-}
-
-// Scan the value of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^
-func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the major version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
- return false
- }
-
- // Eat '.'.
- if parser.buffer[parser.buffer_pos] != '.' {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected digit or '.' character")
- }
-
- skip(parser)
-
- // Consume the minor version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
- return false
- }
- return true
-}
-
-const max_number_length = 2
-
-// Scan the version number of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^
-// %YAML 1.1 # a comment \n
-// ^
-func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
-
- // Repeat while the next character is digit.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var value, length int8
- for is_digit(parser.buffer, parser.buffer_pos) {
- // Check if the number is too long.
- length++
- if length > max_number_length {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "found extremely long version number")
- }
- value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the number was present.
- if length == 0 {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected version number")
- }
- *number = value
- return true
-}
-
-// Scan the value of a TAG-DIRECTIVE token.
-//
-// Scope:
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
- var handle_value, prefix_value []byte
-
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a handle.
- if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
- return false
- }
-
- // Expect a whitespace.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blank(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace")
- return false
- }
-
- // Eat whitespaces.
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a prefix.
- if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
- return false
- }
-
- // Expect a whitespace or line break.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- *handle = handle_value
- *prefix = prefix_value
- return true
-}
-
-func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
- var s []byte
-
- // Eat the indicator character.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the value.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- end_mark := parser.mark
-
- /*
- * Check if length of the anchor is greater than 0 and it is followed by
- * a whitespace character or one of the indicators:
- *
- * '?', ':', ',', ']', '}', '%', '@', '`'.
- */
-
- if len(s) == 0 ||
- !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
- parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '`') {
- context := "while scanning an alias"
- if typ == yaml_ANCHOR_TOKEN {
- context = "while scanning an anchor"
- }
- yaml_parser_set_scanner_error(parser, context, start_mark,
- "did not find expected alphabetic or numeric character")
- return false
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- }
-
- return true
-}
-
-/*
- * Scan a TAG token.
- */
-
-func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
- var handle, suffix []byte
-
- start_mark := parser.mark
-
- // Check if the tag is in the canonical form.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- if parser.buffer[parser.buffer_pos+1] == '<' {
- // Keep the handle as ''
-
- // Eat '!<'
- skip(parser)
- skip(parser)
-
- // Consume the tag value.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
-
- // Check for '>' and eat it.
- if parser.buffer[parser.buffer_pos] != '>' {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find the expected '>'")
- return false
- }
-
- skip(parser)
- } else {
- // The tag has either the '!suffix' or the '!handle!suffix' form.
-
- // First, try to scan a handle.
- if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
- return false
- }
-
- // Check if it is, indeed, handle.
- if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
- // Scan the suffix now.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
- } else {
- // It wasn't a handle after all. Scan the rest of the tag.
- if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
- return false
- }
-
- // Set the handle to '!'.
- handle = []byte{'!'}
-
- // A special case: the '!' tag. Set the handle to '' and the
- // suffix to '!'.
- if len(suffix) == 0 {
- handle, suffix = suffix, handle
- }
- }
- }
-
- // Check the character which ends the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_TAG_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- suffix: suffix,
- }
- return true
-}
-
-// Scan a tag handle.
-func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
- // Check the initial '!' character.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] != '!' {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
-
- var s []byte
-
- // Copy the '!' character.
- s = read(parser, s)
-
- // Copy all subsequent alphabetical and numerical characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the trailing character is '!' and copy it.
- if parser.buffer[parser.buffer_pos] == '!' {
- s = read(parser, s)
- } else {
- // It's either the '!' tag or not really a tag handle. If it's a %TAG
- // directive, it's an error. If it's a tag token, it must be a part of URI.
- if directive && string(s) != "!" {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
- }
-
- *handle = s
- return true
-}
-
-// Scan a tag.
-func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
- //size_t length = head ? strlen((char *)head) : 0
- var s []byte
- hasTag := len(head) > 0
-
- // Copy the head if needed.
- //
- // Note that we don't copy the leading '!' character.
- if len(head) > 1 {
- s = append(s, head[1:]...)
- }
-
- // Scan the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // The set of characters that may appear in URI is as follows:
- //
- // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
- // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
- // '%'.
- // [Go] TODO Convert this into more reasonable logic.
- for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
- parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
- parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
- parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
- parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
- parser.buffer[parser.buffer_pos] == '%' {
- // Check if it is a URI-escape sequence.
- if parser.buffer[parser.buffer_pos] == '%' {
- if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
- return false
- }
- } else {
- s = read(parser, s)
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- hasTag = true
- }
-
- if !hasTag {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected tag URI")
- return false
- }
- *uri = s
- return true
-}
-
-// Decode an URI-escape sequence corresponding to a single UTF-8 character.
-func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
-
- // Decode the required number of characters.
- w := 1024
- for w > 0 {
- // Check for a URI-escaped octet.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
-
- if !(parser.buffer[parser.buffer_pos] == '%' &&
- is_hex(parser.buffer, parser.buffer_pos+1) &&
- is_hex(parser.buffer, parser.buffer_pos+2)) {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find URI escaped octet")
- }
-
- // Get the octet.
- octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
-
- // If it is the leading octet, determine the length of the UTF-8 sequence.
- if w == 1024 {
- w = width(octet)
- if w == 0 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect leading UTF-8 octet")
- }
- } else {
- // Check if the trailing octet is correct.
- if octet&0xC0 != 0x80 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect trailing UTF-8 octet")
- }
- }
-
- // Copy the octet and move the pointers.
- *s = append(*s, octet)
- skip(parser)
- skip(parser)
- skip(parser)
- w--
- }
- return true
-}
-
-// Scan a block scalar.
-func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
- // Eat the indicator '|' or '>'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the additional block scalar indicators.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check for a chomping indicator.
- var chomping, increment int
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- // Set the chomping method and eat the indicator.
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
-
- // Check for an indentation indicator.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_digit(parser.buffer, parser.buffer_pos) {
- // Check that the indentation is greater than 0.
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an indentation indicator equal to 0")
- return false
- }
-
- // Get the indentation level and eat the indicator.
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
- }
-
- } else if is_digit(parser.buffer, parser.buffer_pos) {
- // Do the same as above, but in the opposite order.
-
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an indentation indicator equal to 0")
- return false
- }
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
-
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
- }
- }
-
- // Eat whitespaces and comments to the end of the line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.buffer[parser.buffer_pos] == '#' {
- if !yaml_parser_scan_line_comment(parser, start_mark) {
- return false
- }
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- end_mark := parser.mark
-
- // Set the indentation level if it was specified.
- var indent int
- if increment > 0 {
- if parser.indent >= 0 {
- indent = parser.indent + increment
- } else {
- indent = increment
- }
- }
-
- // Scan the leading line breaks and determine the indentation level if needed.
- var s, leading_break, trailing_breaks []byte
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
-
- // Scan the block scalar content.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var leading_blank, trailing_blank bool
- for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
- // We are at the beginning of a non-empty line.
-
- // Is it a trailing whitespace?
- trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Check if we need to fold the leading line break.
- if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
- // Do we need to join the lines by space?
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- }
- } else {
- s = append(s, leading_break...)
- }
- leading_break = leading_break[:0]
-
- // Append the remaining line breaks.
- s = append(s, trailing_breaks...)
- trailing_breaks = trailing_breaks[:0]
-
- // Is it a leading whitespace?
- leading_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Consume the current line.
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- leading_break = read_line(parser, leading_break)
-
- // Eat the following indentation spaces and line breaks.
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
- }
-
- // Chomp the tail.
- if chomping != -1 {
- s = append(s, leading_break...)
- }
- if chomping == 1 {
- s = append(s, trailing_breaks...)
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_LITERAL_SCALAR_STYLE,
- }
- if !literal {
- token.style = yaml_FOLDED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan indentation spaces and line breaks for a block scalar. Determine the
-// indentation level if needed.
-func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
- *end_mark = parser.mark
-
- // Eat the indentation spaces and line breaks.
- max_indent := 0
- for {
- // Eat the indentation spaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.mark.column > max_indent {
- max_indent = parser.mark.column
- }
-
- // Check for a tab character messing the indentation.
- if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
- return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found a tab character where an indentation space is expected")
- }
-
- // Have we found a non-empty line?
- if !is_break(parser.buffer, parser.buffer_pos) {
- break
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- // [Go] Should really be returning breaks instead.
- *breaks = read_line(parser, *breaks)
- *end_mark = parser.mark
- }
-
- // Determine the indentation level if needed.
- if *indent == 0 {
- *indent = max_indent
- if *indent < parser.indent+1 {
- *indent = parser.indent + 1
- }
- if *indent < 1 {
- *indent = 1
- }
- }
- return true
-}
-
-// Scan a quoted scalar.
-func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
- // Eat the left quote.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the content of the quoted scalar.
- var s, leading_break, trailing_breaks, whitespaces []byte
- for {
- // Check that there are no document indicators at the beginning of the line.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected document indicator")
- return false
- }
-
- // Check for EOF.
- if is_z(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected end of stream")
- return false
- }
-
- // Consume non-blank characters.
- leading_blanks := false
- for !is_blankz(parser.buffer, parser.buffer_pos) {
- if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
- // Is is an escaped single quote.
- s = append(s, '\'')
- skip(parser)
- skip(parser)
-
- } else if single && parser.buffer[parser.buffer_pos] == '\'' {
- // It is a right single quote.
- break
- } else if !single && parser.buffer[parser.buffer_pos] == '"' {
- // It is a right double quote.
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
- // It is an escaped line break.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
- skip(parser)
- skip_line(parser)
- leading_blanks = true
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
- // It is an escape sequence.
- code_length := 0
-
- // Check the escape character.
- switch parser.buffer[parser.buffer_pos+1] {
- case '0':
- s = append(s, 0)
- case 'a':
- s = append(s, '\x07')
- case 'b':
- s = append(s, '\x08')
- case 't', '\t':
- s = append(s, '\x09')
- case 'n':
- s = append(s, '\x0A')
- case 'v':
- s = append(s, '\x0B')
- case 'f':
- s = append(s, '\x0C')
- case 'r':
- s = append(s, '\x0D')
- case 'e':
- s = append(s, '\x1B')
- case ' ':
- s = append(s, '\x20')
- case '"':
- s = append(s, '"')
- case '\'':
- s = append(s, '\'')
- case '\\':
- s = append(s, '\\')
- case 'N': // NEL (#x85)
- s = append(s, '\xC2')
- s = append(s, '\x85')
- case '_': // #xA0
- s = append(s, '\xC2')
- s = append(s, '\xA0')
- case 'L': // LS (#x2028)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA8')
- case 'P': // PS (#x2029)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA9')
- case 'x':
- code_length = 2
- case 'u':
- code_length = 4
- case 'U':
- code_length = 8
- default:
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found unknown escape character")
- return false
- }
-
- skip(parser)
- skip(parser)
-
- // Consume an arbitrary escape code.
- if code_length > 0 {
- var value int
-
- // Scan the character value.
- if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
- return false
- }
- for k := 0; k < code_length; k++ {
- if !is_hex(parser.buffer, parser.buffer_pos+k) {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "did not find expected hexdecimal number")
- return false
- }
- value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
- }
-
- // Check the value and write the character.
- if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found invalid Unicode character escape code")
- return false
- }
- if value <= 0x7F {
- s = append(s, byte(value))
- } else if value <= 0x7FF {
- s = append(s, byte(0xC0+(value>>6)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else if value <= 0xFFFF {
- s = append(s, byte(0xE0+(value>>12)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else {
- s = append(s, byte(0xF0+(value>>18)))
- s = append(s, byte(0x80+((value>>12)&0x3F)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- }
-
- // Advance the pointer.
- for k := 0; k < code_length; k++ {
- skip(parser)
- }
- }
- } else {
- // It is a non-escaped non-blank character.
- s = read(parser, s)
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check if we are at the end of the scalar.
- if single {
- if parser.buffer[parser.buffer_pos] == '\'' {
- break
- }
- } else {
- if parser.buffer[parser.buffer_pos] == '"' {
- break
- }
- }
-
- // Consume blank characters.
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Join the whitespaces or fold line breaks.
- if leading_blanks {
- // Do we need to fold line breaks?
- if len(leading_break) > 0 && leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Eat the right quote.
- skip(parser)
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
- }
- if !single {
- token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan a plain scalar.
-func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
-
- var s, leading_break, trailing_breaks, whitespaces []byte
- var leading_blanks bool
- var indent = parser.indent + 1
-
- start_mark := parser.mark
- end_mark := parser.mark
-
- // Consume the content of the plain scalar.
- for {
- // Check for a document indicator.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- break
- }
-
- // Check for a comment.
- if parser.buffer[parser.buffer_pos] == '#' {
- break
- }
-
- // Consume non-blank characters.
- for !is_blankz(parser.buffer, parser.buffer_pos) {
-
- // Check for indicators that may end a plain scalar.
- if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level > 0 &&
- (parser.buffer[parser.buffer_pos] == ',' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}')) {
- break
- }
-
- // Check if we need to join whitespaces and breaks.
- if leading_blanks || len(whitespaces) > 0 {
- if leading_blanks {
- // Do we need to fold line breaks?
- if leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- leading_blanks = false
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Copy the character.
- s = read(parser, s)
-
- end_mark = parser.mark
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Is it the end?
- if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
- break
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
-
- // Check for tab characters that abuse indentation.
- if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found a tab character that violates indentation")
- return false
- }
-
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check indentation level.
- if parser.flow_level == 0 && parser.mark.column < indent {
- break
- }
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_PLAIN_SCALAR_STYLE,
- }
-
- // Note that we change the 'simple_key_allowed' flag.
- if leading_blanks {
- parser.simple_key_allowed = true
- }
- return true
-}
-
-func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
- if parser.newlines > 0 {
- return true
- }
-
- var start_mark yaml_mark_t
- var text []byte
-
- for peek := 0; peek < 512; peek++ {
- if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
- break
- }
- if is_blank(parser.buffer, parser.buffer_pos+peek) {
- continue
- }
- if parser.buffer[parser.buffer_pos+peek] == '#' {
- seen := parser.mark.index+peek
- for {
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_breakz(parser.buffer, parser.buffer_pos) {
- if parser.mark.index >= seen {
- break
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- } else if parser.mark.index >= seen {
- if len(text) == 0 {
- start_mark = parser.mark
- }
- text = read(parser, text)
- } else {
- skip(parser)
- }
- }
- }
- break
- }
- if len(text) > 0 {
- parser.comments = append(parser.comments, yaml_comment_t{
- token_mark: token_mark,
- start_mark: start_mark,
- line: text,
- })
- }
- return true
-}
-
-func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
- token := parser.tokens[len(parser.tokens)-1]
-
- if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
- token = parser.tokens[len(parser.tokens)-2]
- }
-
- var token_mark = token.start_mark
- var start_mark yaml_mark_t
- var next_indent = parser.indent
- if next_indent < 0 {
- next_indent = 0
- }
-
- var recent_empty = false
- var first_empty = parser.newlines <= 1
-
- var line = parser.mark.line
- var column = parser.mark.column
-
- var text []byte
-
- // The foot line is the place where a comment must start to
- // still be considered as a foot of the prior content.
- // If there's some content in the currently parsed line, then
- // the foot is the line below it.
- var foot_line = -1
- if scan_mark.line > 0 {
- foot_line = parser.mark.line-parser.newlines+1
- if parser.newlines == 0 && parser.mark.column > 1 {
- foot_line++
- }
- }
-
- var peek = 0
- for ; peek < 512; peek++ {
- if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
- break
- }
- column++
- if is_blank(parser.buffer, parser.buffer_pos+peek) {
- continue
- }
- c := parser.buffer[parser.buffer_pos+peek]
- var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
- if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
- // Got line break or terminator.
- if close_flow || !recent_empty {
- if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
- // This is the first empty line and there were no empty lines before,
- // so this initial part of the comment is a foot of the prior token
- // instead of being a head for the following one. Split it up.
- // Alternatively, this might also be the last comment inside a flow
- // scope, so it must be a footer.
- if len(text) > 0 {
- if start_mark.column-1 < next_indent {
- // If dedented it's unrelated to the prior token.
- token_mark = start_mark
- }
- parser.comments = append(parser.comments, yaml_comment_t{
- scan_mark: scan_mark,
- token_mark: token_mark,
- start_mark: start_mark,
- end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
- foot: text,
- })
- scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
- token_mark = scan_mark
- text = nil
- }
- } else {
- if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
- text = append(text, '\n')
- }
- }
- }
- if !is_break(parser.buffer, parser.buffer_pos+peek) {
- break
- }
- first_empty = false
- recent_empty = true
- column = 0
- line++
- continue
- }
-
- if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
- // The comment at the different indentation is a foot of the
- // preceding data rather than a head of the upcoming one.
- parser.comments = append(parser.comments, yaml_comment_t{
- scan_mark: scan_mark,
- token_mark: token_mark,
- start_mark: start_mark,
- end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
- foot: text,
- })
- scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
- token_mark = scan_mark
- text = nil
- }
-
- if parser.buffer[parser.buffer_pos+peek] != '#' {
- break
- }
-
- if len(text) == 0 {
- start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
- } else {
- text = append(text, '\n')
- }
-
- recent_empty = false
-
- // Consume until after the consumed comment line.
- seen := parser.mark.index+peek
- for {
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_breakz(parser.buffer, parser.buffer_pos) {
- if parser.mark.index >= seen {
- break
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- } else if parser.mark.index >= seen {
- text = read(parser, text)
- } else {
- skip(parser)
- }
- }
-
- peek = 0
- column = 0
- line = parser.mark.line
- next_indent = parser.indent
- if next_indent < 0 {
- next_indent = 0
- }
- }
-
- if len(text) > 0 {
- parser.comments = append(parser.comments, yaml_comment_t{
- scan_mark: scan_mark,
- token_mark: start_mark,
- start_mark: start_mark,
- end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
- head: text,
- })
- }
- return true
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go
deleted file mode 100644
index 9210ece7e97..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/sorter.go
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
- "reflect"
- "unicode"
-)
-
-type keyList []reflect.Value
-
-func (l keyList) Len() int { return len(l) }
-func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l keyList) Less(i, j int) bool {
- a := l[i]
- b := l[j]
- ak := a.Kind()
- bk := b.Kind()
- for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
- a = a.Elem()
- ak = a.Kind()
- }
- for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
- b = b.Elem()
- bk = b.Kind()
- }
- af, aok := keyFloat(a)
- bf, bok := keyFloat(b)
- if aok && bok {
- if af != bf {
- return af < bf
- }
- if ak != bk {
- return ak < bk
- }
- return numLess(a, b)
- }
- if ak != reflect.String || bk != reflect.String {
- return ak < bk
- }
- ar, br := []rune(a.String()), []rune(b.String())
- digits := false
- for i := 0; i < len(ar) && i < len(br); i++ {
- if ar[i] == br[i] {
- digits = unicode.IsDigit(ar[i])
- continue
- }
- al := unicode.IsLetter(ar[i])
- bl := unicode.IsLetter(br[i])
- if al && bl {
- return ar[i] < br[i]
- }
- if al || bl {
- if digits {
- return al
- } else {
- return bl
- }
- }
- var ai, bi int
- var an, bn int64
- if ar[i] == '0' || br[i] == '0' {
- for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
- if ar[j] != '0' {
- an = 1
- bn = 1
- break
- }
- }
- }
- for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
- an = an*10 + int64(ar[ai]-'0')
- }
- for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
- bn = bn*10 + int64(br[bi]-'0')
- }
- if an != bn {
- return an < bn
- }
- if ai != bi {
- return ai < bi
- }
- return ar[i] < br[i]
- }
- return len(ar) < len(br)
-}
-
-// keyFloat returns a float value for v if it is a number/bool
-// and whether it is a number/bool or not.
-func keyFloat(v reflect.Value) (f float64, ok bool) {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.Int()), true
- case reflect.Float32, reflect.Float64:
- return v.Float(), true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return float64(v.Uint()), true
- case reflect.Bool:
- if v.Bool() {
- return 1, true
- }
- return 0, true
- }
- return 0, false
-}
-
-// numLess returns whether a < b.
-// a and b must necessarily have the same kind.
-func numLess(a, b reflect.Value) bool {
- switch a.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return a.Int() < b.Int()
- case reflect.Float32, reflect.Float64:
- return a.Float() < b.Float()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return a.Uint() < b.Uint()
- case reflect.Bool:
- return !a.Bool() && b.Bool()
- }
- panic("not a number")
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go
deleted file mode 100644
index b8a116bf9a2..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/writerc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_WRITER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
- if emitter.write_handler == nil {
- panic("write handler not set")
- }
-
- // Check if the buffer is empty.
- if emitter.buffer_pos == 0 {
- return true
- }
-
- if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- return true
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go
deleted file mode 100644
index 8cec6da48d3..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/yaml.go
+++ /dev/null
@@ -1,698 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-// https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
- "errors"
- "fmt"
- "io"
- "reflect"
- "strings"
- "sync"
- "unicode/utf8"
-)
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document.
-type Unmarshaler interface {
- UnmarshalYAML(value *Node) error
-}
-
-type obsoleteUnmarshaler interface {
- UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
- MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
- return unmarshal(in, out, false)
-}
-
-// A Decoder reads and decodes YAML values from an input stream.
-type Decoder struct {
- parser *parser
- knownFields bool
-}
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// The decoder introduces its own buffering and may read
-// data from r beyond the YAML values requested.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{
- parser: newParserFromReader(r),
- }
-}
-
-// KnownFields ensures that the keys in decoded mappings to
-// exist as fields in the struct being decoded into.
-func (dec *Decoder) KnownFields(enable bool) {
- dec.knownFields = enable
-}
-
-// Decode reads the next YAML-encoded value from its input
-// and stores it in the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about the
-// conversion of YAML into a Go value.
-func (dec *Decoder) Decode(v interface{}) (err error) {
- d := newDecoder()
- d.knownFields = dec.knownFields
- defer handleErr(&err)
- node := dec.parser.parse()
- if node == nil {
- return io.EOF
- }
- out := reflect.ValueOf(v)
- if out.Kind() == reflect.Ptr && !out.IsNil() {
- out = out.Elem()
- }
- d.unmarshal(node, out)
- if len(d.terrors) > 0 {
- return &TypeError{d.terrors}
- }
- return nil
-}
-
-// Decode decodes the node and stores its data into the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about the
-// conversion of YAML into a Go value.
-func (n *Node) Decode(v interface{}) (err error) {
- d := newDecoder()
- defer handleErr(&err)
- out := reflect.ValueOf(v)
- if out.Kind() == reflect.Ptr && !out.IsNil() {
- out = out.Elem()
- }
- d.unmarshal(n, out)
- if len(d.terrors) > 0 {
- return &TypeError{d.terrors}
- }
- return nil
-}
-
-func unmarshal(in []byte, out interface{}, strict bool) (err error) {
- defer handleErr(&err)
- d := newDecoder()
- p := newParser(in)
- defer p.destroy()
- node := p.parse()
- if node != nil {
- v := reflect.ValueOf(out)
- if v.Kind() == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
- d.unmarshal(node, v)
- }
- if len(d.terrors) > 0 {
- return &TypeError{d.terrors}
- }
- return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only marshalled if they are exported (have an upper case
-// first letter), and are marshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-// `(...) yaml:"[][,[,]]" (...)`
-//
-// The following flags are currently supported:
-//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Zero valued structs will be omitted if all their public
-// fields are zero, unless they implement an IsZero
-// method (see the IsZeroer interface type), in which
-// case the field will be excluded if IsZero returns true.
-//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps).
-//
-// inline Inline the field, which must be a struct or a map,
-// causing all of its fields or keys to be processed as if
-// they were part of the outer struct. For maps, keys must
-// not conflict with the yaml keys of other struct fields.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
- defer handleErr(&err)
- e := newEncoder()
- defer e.destroy()
- e.marshalDoc("", reflect.ValueOf(in))
- e.finish()
- out = e.out
- return
-}
-
-// An Encoder writes YAML values to an output stream.
-type Encoder struct {
- encoder *encoder
-}
-
-// NewEncoder returns a new encoder that writes to w.
-// The Encoder should be closed after use to flush all data
-// to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- encoder: newEncoderWithWriter(w),
- }
-}
-
-// Encode writes the YAML encoding of v to the stream.
-// If multiple items are encoded to the stream, the
-// second and subsequent document will be preceded
-// with a "---" document separator, but the first will not.
-//
-// See the documentation for Marshal for details about the conversion of Go
-// values to YAML.
-func (e *Encoder) Encode(v interface{}) (err error) {
- defer handleErr(&err)
- e.encoder.marshalDoc("", reflect.ValueOf(v))
- return nil
-}
-
-// Encode encodes value v and stores its representation in n.
-//
-// See the documentation for Marshal for details about the
-// conversion of Go values into YAML.
-func (n *Node) Encode(v interface{}) (err error) {
- defer handleErr(&err)
- e := newEncoder()
- defer e.destroy()
- e.marshalDoc("", reflect.ValueOf(v))
- e.finish()
- p := newParser(e.out)
- p.textless = true
- defer p.destroy()
- doc := p.parse()
- *n = *doc.Content[0]
- return nil
-}
-
-// SetIndent changes the used indentation used when encoding.
-func (e *Encoder) SetIndent(spaces int) {
- if spaces < 0 {
- panic("yaml: cannot indent to a negative number of spaces")
- }
- e.encoder.indent = spaces
-}
-
-// Close closes the encoder by writing any remaining data.
-// It does not write a stream terminating string "...".
-func (e *Encoder) Close() (err error) {
- defer handleErr(&err)
- e.encoder.finish()
- return nil
-}
-
-func handleErr(err *error) {
- if v := recover(); v != nil {
- if e, ok := v.(yamlError); ok {
- *err = e.err
- } else {
- panic(v)
- }
- }
-}
-
-type yamlError struct {
- err error
-}
-
-func fail(err error) {
- panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
- panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
- Errors []string
-}
-
-func (e *TypeError) Error() string {
- return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
-}
-
-type Kind uint32
-
-const (
- DocumentNode Kind = 1 << iota
- SequenceNode
- MappingNode
- ScalarNode
- AliasNode
-)
-
-type Style uint32
-
-const (
- TaggedStyle Style = 1 << iota
- DoubleQuotedStyle
- SingleQuotedStyle
- LiteralStyle
- FoldedStyle
- FlowStyle
-)
-
-// Node represents an element in the YAML document hierarchy. While documents
-// are typically encoded and decoded into higher level types, such as structs
-// and maps, Node is an intermediate representation that allows detailed
-// control over the content being decoded or encoded.
-//
-// It's worth noting that although Node offers access into details such as
-// line numbers, colums, and comments, the content when re-encoded will not
-// have its original textual representation preserved. An effort is made to
-// render the data plesantly, and to preserve comments near the data they
-// describe, though.
-//
-// Values that make use of the Node type interact with the yaml package in the
-// same way any other type would do, by encoding and decoding yaml data
-// directly or indirectly into them.
-//
-// For example:
-//
-// var person struct {
-// Name string
-// Address yaml.Node
-// }
-// err := yaml.Unmarshal(data, &person)
-//
-// Or by itself:
-//
-// var person Node
-// err := yaml.Unmarshal(data, &person)
-//
-type Node struct {
- // Kind defines whether the node is a document, a mapping, a sequence,
- // a scalar value, or an alias to another node. The specific data type of
- // scalar nodes may be obtained via the ShortTag and LongTag methods.
- Kind Kind
-
- // Style allows customizing the apperance of the node in the tree.
- Style Style
-
- // Tag holds the YAML tag defining the data type for the value.
- // When decoding, this field will always be set to the resolved tag,
- // even when it wasn't explicitly provided in the YAML content.
- // When encoding, if this field is unset the value type will be
- // implied from the node properties, and if it is set, it will only
- // be serialized into the representation if TaggedStyle is used or
- // the implicit tag diverges from the provided one.
- Tag string
-
- // Value holds the unescaped and unquoted represenation of the value.
- Value string
-
- // Anchor holds the anchor name for this node, which allows aliases to point to it.
- Anchor string
-
- // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
- Alias *Node
-
- // Content holds contained nodes for documents, mappings, and sequences.
- Content []*Node
-
- // HeadComment holds any comments in the lines preceding the node and
- // not separated by an empty line.
- HeadComment string
-
- // LineComment holds any comments at the end of the line where the node is in.
- LineComment string
-
- // FootComment holds any comments following the node and before empty lines.
- FootComment string
-
- // Line and Column hold the node position in the decoded YAML text.
- // These fields are not respected when encoding the node.
- Line int
- Column int
-}
-
-// IsZero returns whether the node has all of its fields unset.
-func (n *Node) IsZero() bool {
- return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
- n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
-}
-
-
-// LongTag returns the long form of the tag that indicates the data type for
-// the node. If the Tag field isn't explicitly defined, one will be computed
-// based on the node properties.
-func (n *Node) LongTag() string {
- return longTag(n.ShortTag())
-}
-
-// ShortTag returns the short form of the YAML tag that indicates data type for
-// the node. If the Tag field isn't explicitly defined, one will be computed
-// based on the node properties.
-func (n *Node) ShortTag() string {
- if n.indicatedString() {
- return strTag
- }
- if n.Tag == "" || n.Tag == "!" {
- switch n.Kind {
- case MappingNode:
- return mapTag
- case SequenceNode:
- return seqTag
- case AliasNode:
- if n.Alias != nil {
- return n.Alias.ShortTag()
- }
- case ScalarNode:
- tag, _ := resolve("", n.Value)
- return tag
- case 0:
- // Special case to make the zero value convenient.
- if n.IsZero() {
- return nullTag
- }
- }
- return ""
- }
- return shortTag(n.Tag)
-}
-
-func (n *Node) indicatedString() bool {
- return n.Kind == ScalarNode &&
- (shortTag(n.Tag) == strTag ||
- (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
-}
-
-// SetString is a convenience function that sets the node to a string value
-// and defines its style in a pleasant way depending on its content.
-func (n *Node) SetString(s string) {
- n.Kind = ScalarNode
- if utf8.ValidString(s) {
- n.Value = s
- n.Tag = strTag
- } else {
- n.Value = encodeBase64(s)
- n.Tag = binaryTag
- }
- if strings.Contains(n.Value, "\n") {
- n.Style = LiteralStyle
- }
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
- FieldsMap map[string]fieldInfo
- FieldsList []fieldInfo
-
- // InlineMap is the number of the field in the struct that
- // contains an ,inline map, or -1 if there's none.
- InlineMap int
-
- // InlineUnmarshalers holds indexes to inlined fields that
- // contain unmarshaler values.
- InlineUnmarshalers [][]int
-}
-
-type fieldInfo struct {
- Key string
- Num int
- OmitEmpty bool
- Flow bool
- // Id holds the unique field identifier, so we can cheaply
- // check for field duplicates without maintaining an extra map.
- Id int
-
- // Inline holds the field index if the field is part of an inlined struct.
- Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-var unmarshalerType reflect.Type
-
-func init() {
- var v Unmarshaler
- unmarshalerType = reflect.ValueOf(&v).Elem().Type()
-}
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
- fieldMapMutex.RLock()
- sinfo, found := structMap[st]
- fieldMapMutex.RUnlock()
- if found {
- return sinfo, nil
- }
-
- n := st.NumField()
- fieldsMap := make(map[string]fieldInfo)
- fieldsList := make([]fieldInfo, 0, n)
- inlineMap := -1
- inlineUnmarshalers := [][]int(nil)
- for i := 0; i != n; i++ {
- field := st.Field(i)
- if field.PkgPath != "" && !field.Anonymous {
- continue // Private field
- }
-
- info := fieldInfo{Num: i}
-
- tag := field.Tag.Get("yaml")
- if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
- tag = string(field.Tag)
- }
- if tag == "-" {
- continue
- }
-
- inline := false
- fields := strings.Split(tag, ",")
- if len(fields) > 1 {
- for _, flag := range fields[1:] {
- switch flag {
- case "omitempty":
- info.OmitEmpty = true
- case "flow":
- info.Flow = true
- case "inline":
- inline = true
- default:
- return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
- }
- }
- tag = fields[0]
- }
-
- if inline {
- switch field.Type.Kind() {
- case reflect.Map:
- if inlineMap >= 0 {
- return nil, errors.New("multiple ,inline maps in struct " + st.String())
- }
- if field.Type.Key() != reflect.TypeOf("") {
- return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
- }
- inlineMap = info.Num
- case reflect.Struct, reflect.Ptr:
- ftype := field.Type
- for ftype.Kind() == reflect.Ptr {
- ftype = ftype.Elem()
- }
- if ftype.Kind() != reflect.Struct {
- return nil, errors.New("option ,inline may only be used on a struct or map field")
- }
- if reflect.PtrTo(ftype).Implements(unmarshalerType) {
- inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
- } else {
- sinfo, err := getStructInfo(ftype)
- if err != nil {
- return nil, err
- }
- for _, index := range sinfo.InlineUnmarshalers {
- inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
- }
- for _, finfo := range sinfo.FieldsList {
- if _, found := fieldsMap[finfo.Key]; found {
- msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
- if finfo.Inline == nil {
- finfo.Inline = []int{i, finfo.Num}
- } else {
- finfo.Inline = append([]int{i}, finfo.Inline...)
- }
- finfo.Id = len(fieldsList)
- fieldsMap[finfo.Key] = finfo
- fieldsList = append(fieldsList, finfo)
- }
- }
- default:
- return nil, errors.New("option ,inline may only be used on a struct or map field")
- }
- continue
- }
-
- if tag != "" {
- info.Key = tag
- } else {
- info.Key = strings.ToLower(field.Name)
- }
-
- if _, found = fieldsMap[info.Key]; found {
- msg := "duplicated key '" + info.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
-
- info.Id = len(fieldsList)
- fieldsList = append(fieldsList, info)
- fieldsMap[info.Key] = info
- }
-
- sinfo = &structInfo{
- FieldsMap: fieldsMap,
- FieldsList: fieldsList,
- InlineMap: inlineMap,
- InlineUnmarshalers: inlineUnmarshalers,
- }
-
- fieldMapMutex.Lock()
- structMap[st] = sinfo
- fieldMapMutex.Unlock()
- return sinfo, nil
-}
-
-// IsZeroer is used to check whether an object is zero to
-// determine whether it should be omitted when marshaling
-// with the omitempty flag. One notable implementation
-// is time.Time.
-type IsZeroer interface {
- IsZero() bool
-}
-
-func isZero(v reflect.Value) bool {
- kind := v.Kind()
- if z, ok := v.Interface().(IsZeroer); ok {
- if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
- return true
- }
- return z.IsZero()
- }
- switch kind {
- case reflect.String:
- return len(v.String()) == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflect.Slice:
- return v.Len() == 0
- case reflect.Map:
- return v.Len() == 0
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Struct:
- vt := v.Type()
- for i := v.NumField() - 1; i >= 0; i-- {
- if vt.Field(i).PkgPath != "" {
- continue // Private field
- }
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go
deleted file mode 100644
index 40c74de4978..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlh.go
+++ /dev/null
@@ -1,809 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
- "fmt"
- "io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
- major int8 // The major version number.
- minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
- handle []byte // The tag handle.
- prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
- // Let the parser choose the encoding.
- yaml_ANY_ENCODING yaml_encoding_t = iota
-
- yaml_UTF8_ENCODING // The default UTF-8 encoding.
- yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
- yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
- // Let the parser choose the break type.
- yaml_ANY_BREAK yaml_break_t = iota
-
- yaml_CR_BREAK // Use CR for line breaks (Mac style).
- yaml_LN_BREAK // Use LN for line breaks (Unix style).
- yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
- // No error is produced.
- yaml_NO_ERROR yaml_error_type_t = iota
-
- yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
- yaml_READER_ERROR // Cannot read or decode the input stream.
- yaml_SCANNER_ERROR // Cannot scan the input stream.
- yaml_PARSER_ERROR // Cannot parse the input stream.
- yaml_COMPOSER_ERROR // Cannot compose a YAML document.
- yaml_WRITER_ERROR // Cannot write to the output stream.
- yaml_EMITTER_ERROR // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
- index int // The position index.
- line int // The position line.
- column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
-
- yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
- yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
- yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
- yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
- yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
- yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
- yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
- yaml_BLOCK_MAPPING_STYLE // The block mapping style.
- yaml_FLOW_MAPPING_STYLE // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
- // An empty token.
- yaml_NO_TOKEN yaml_token_type_t = iota
-
- yaml_STREAM_START_TOKEN // A STREAM-START token.
- yaml_STREAM_END_TOKEN // A STREAM-END token.
-
- yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
- yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
- yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
- yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
-
- yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
- yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
- yaml_BLOCK_END_TOKEN // A BLOCK-END token.
-
- yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
- yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
- yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
- yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
-
- yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
- yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
- yaml_KEY_TOKEN // A KEY token.
- yaml_VALUE_TOKEN // A VALUE token.
-
- yaml_ALIAS_TOKEN // An ALIAS token.
- yaml_ANCHOR_TOKEN // An ANCHOR token.
- yaml_TAG_TOKEN // A TAG token.
- yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
- switch tt {
- case yaml_NO_TOKEN:
- return "yaml_NO_TOKEN"
- case yaml_STREAM_START_TOKEN:
- return "yaml_STREAM_START_TOKEN"
- case yaml_STREAM_END_TOKEN:
- return "yaml_STREAM_END_TOKEN"
- case yaml_VERSION_DIRECTIVE_TOKEN:
- return "yaml_VERSION_DIRECTIVE_TOKEN"
- case yaml_TAG_DIRECTIVE_TOKEN:
- return "yaml_TAG_DIRECTIVE_TOKEN"
- case yaml_DOCUMENT_START_TOKEN:
- return "yaml_DOCUMENT_START_TOKEN"
- case yaml_DOCUMENT_END_TOKEN:
- return "yaml_DOCUMENT_END_TOKEN"
- case yaml_BLOCK_SEQUENCE_START_TOKEN:
- return "yaml_BLOCK_SEQUENCE_START_TOKEN"
- case yaml_BLOCK_MAPPING_START_TOKEN:
- return "yaml_BLOCK_MAPPING_START_TOKEN"
- case yaml_BLOCK_END_TOKEN:
- return "yaml_BLOCK_END_TOKEN"
- case yaml_FLOW_SEQUENCE_START_TOKEN:
- return "yaml_FLOW_SEQUENCE_START_TOKEN"
- case yaml_FLOW_SEQUENCE_END_TOKEN:
- return "yaml_FLOW_SEQUENCE_END_TOKEN"
- case yaml_FLOW_MAPPING_START_TOKEN:
- return "yaml_FLOW_MAPPING_START_TOKEN"
- case yaml_FLOW_MAPPING_END_TOKEN:
- return "yaml_FLOW_MAPPING_END_TOKEN"
- case yaml_BLOCK_ENTRY_TOKEN:
- return "yaml_BLOCK_ENTRY_TOKEN"
- case yaml_FLOW_ENTRY_TOKEN:
- return "yaml_FLOW_ENTRY_TOKEN"
- case yaml_KEY_TOKEN:
- return "yaml_KEY_TOKEN"
- case yaml_VALUE_TOKEN:
- return "yaml_VALUE_TOKEN"
- case yaml_ALIAS_TOKEN:
- return "yaml_ALIAS_TOKEN"
- case yaml_ANCHOR_TOKEN:
- return "yaml_ANCHOR_TOKEN"
- case yaml_TAG_TOKEN:
- return "yaml_TAG_TOKEN"
- case yaml_SCALAR_TOKEN:
- return "yaml_SCALAR_TOKEN"
- }
- return ""
-}
-
-// The token structure.
-type yaml_token_t struct {
- // The token type.
- typ yaml_token_type_t
-
- // The start/end of the token.
- start_mark, end_mark yaml_mark_t
-
- // The stream encoding (for yaml_STREAM_START_TOKEN).
- encoding yaml_encoding_t
-
- // The alias/anchor/scalar value or tag/tag directive handle
- // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
- value []byte
-
- // The tag suffix (for yaml_TAG_TOKEN).
- suffix []byte
-
- // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
- prefix []byte
-
- // The scalar style (for yaml_SCALAR_TOKEN).
- style yaml_scalar_style_t
-
- // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
- major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
- // An empty event.
- yaml_NO_EVENT yaml_event_type_t = iota
-
- yaml_STREAM_START_EVENT // A STREAM-START event.
- yaml_STREAM_END_EVENT // A STREAM-END event.
- yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
- yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
- yaml_ALIAS_EVENT // An ALIAS event.
- yaml_SCALAR_EVENT // A SCALAR event.
- yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
- yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
- yaml_MAPPING_START_EVENT // A MAPPING-START event.
- yaml_MAPPING_END_EVENT // A MAPPING-END event.
- yaml_TAIL_COMMENT_EVENT
-)
-
-var eventStrings = []string{
- yaml_NO_EVENT: "none",
- yaml_STREAM_START_EVENT: "stream start",
- yaml_STREAM_END_EVENT: "stream end",
- yaml_DOCUMENT_START_EVENT: "document start",
- yaml_DOCUMENT_END_EVENT: "document end",
- yaml_ALIAS_EVENT: "alias",
- yaml_SCALAR_EVENT: "scalar",
- yaml_SEQUENCE_START_EVENT: "sequence start",
- yaml_SEQUENCE_END_EVENT: "sequence end",
- yaml_MAPPING_START_EVENT: "mapping start",
- yaml_MAPPING_END_EVENT: "mapping end",
- yaml_TAIL_COMMENT_EVENT: "tail comment",
-}
-
-func (e yaml_event_type_t) String() string {
- if e < 0 || int(e) >= len(eventStrings) {
- return fmt.Sprintf("unknown event %d", e)
- }
- return eventStrings[e]
-}
-
-// The event structure.
-type yaml_event_t struct {
-
- // The event type.
- typ yaml_event_type_t
-
- // The start and end of the event.
- start_mark, end_mark yaml_mark_t
-
- // The document encoding (for yaml_STREAM_START_EVENT).
- encoding yaml_encoding_t
-
- // The version directive (for yaml_DOCUMENT_START_EVENT).
- version_directive *yaml_version_directive_t
-
- // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
- tag_directives []yaml_tag_directive_t
-
- // The comments
- head_comment []byte
- line_comment []byte
- foot_comment []byte
- tail_comment []byte
-
- // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
- anchor []byte
-
- // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- tag []byte
-
- // The scalar value (for yaml_SCALAR_EVENT).
- value []byte
-
- // Is the document start/end indicator implicit, or the tag optional?
- // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
- implicit bool
-
- // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
- quoted_implicit bool
-
- // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
- yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
- yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
- yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
- yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
- yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
- yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
- yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
- yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
- // Not in original libyaml.
- yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
- yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
-
- yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
- yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
- yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
- // An empty node.
- yaml_NO_NODE yaml_node_type_t = iota
-
- yaml_SCALAR_NODE // A scalar node.
- yaml_SEQUENCE_NODE // A sequence node.
- yaml_MAPPING_NODE // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
- key int // The key of the element.
- value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
- typ yaml_node_type_t // The node type.
- tag []byte // The node tag.
-
- // The node data.
-
- // The scalar parameters (for yaml_SCALAR_NODE).
- scalar struct {
- value []byte // The scalar value.
- length int // The length of the scalar value.
- style yaml_scalar_style_t // The scalar style.
- }
-
- // The sequence parameters (for YAML_SEQUENCE_NODE).
- sequence struct {
- items_data []yaml_node_item_t // The stack of sequence items.
- style yaml_sequence_style_t // The sequence style.
- }
-
- // The mapping parameters (for yaml_MAPPING_NODE).
- mapping struct {
- pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
- pairs_start *yaml_node_pair_t // The beginning of the stack.
- pairs_end *yaml_node_pair_t // The end of the stack.
- pairs_top *yaml_node_pair_t // The top of the stack.
- style yaml_mapping_style_t // The mapping style.
- }
-
- start_mark yaml_mark_t // The beginning of the node.
- end_mark yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
- // The document nodes.
- nodes []yaml_node_t
-
- // The version directive.
- version_directive *yaml_version_directive_t
-
- // The list of tag directives.
- tag_directives_data []yaml_tag_directive_t
- tag_directives_start int // The beginning of the tag directives list.
- tag_directives_end int // The end of the tag directives list.
-
- start_implicit int // Is the document start indicator implicit?
- end_implicit int // Is the document end indicator implicit?
-
- // The start/end of the document.
- start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out] data A pointer to an application data specified by
-// yaml_parser_set_input().
-// [out] buffer The buffer to write the data from the source.
-// [in] size The size of the buffer.
-// [out] size_read The actual number of bytes read from the source.
-//
-// On success, the handler should return 1. If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
- possible bool // Is a simple key possible?
- required bool // Is a simple key required?
- token_number int // The number of the token.
- mark yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
- yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
- yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
- yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
- yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
- yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
- yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
- yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
- yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
- yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
- yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
- yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
- yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
- yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
- yaml_PARSE_END_STATE // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
- switch ps {
- case yaml_PARSE_STREAM_START_STATE:
- return "yaml_PARSE_STREAM_START_STATE"
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_START_STATE:
- return "yaml_PARSE_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
- case yaml_PARSE_DOCUMENT_END_STATE:
- return "yaml_PARSE_DOCUMENT_END_STATE"
- case yaml_PARSE_BLOCK_NODE_STATE:
- return "yaml_PARSE_BLOCK_NODE_STATE"
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
- case yaml_PARSE_FLOW_NODE_STATE:
- return "yaml_PARSE_FLOW_NODE_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
- case yaml_PARSE_END_STATE:
- return "yaml_PARSE_END_STATE"
- }
- return ""
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
- anchor []byte // The anchor.
- index int // The node id.
- mark yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
-
- problem string // Error description.
-
- // The byte about which the problem occurred.
- problem_offset int
- problem_value int
- problem_mark yaml_mark_t
-
- // The error context.
- context string
- context_mark yaml_mark_t
-
- // Reader stuff
-
- read_handler yaml_read_handler_t // Read handler.
-
- input_reader io.Reader // File input data.
- input []byte // String input data.
- input_pos int
-
- eof bool // EOF flag
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- unread int // The number of unread characters in the buffer.
-
- newlines int // The number of line breaks since last non-break/non-blank character
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The input encoding.
-
- offset int // The offset of the current position (in bytes).
- mark yaml_mark_t // The mark of the current position.
-
- // Comments
-
- head_comment []byte // The current head comments
- line_comment []byte // The current line comments
- foot_comment []byte // The current foot comments
- tail_comment []byte // Foot comment that happens at the end of a block.
- stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
-
- comments []yaml_comment_t // The folded comments for all parsed tokens
- comments_head int
-
- // Scanner stuff
-
- stream_start_produced bool // Have we started to scan the input stream?
- stream_end_produced bool // Have we reached the end of the input stream?
-
- flow_level int // The number of unclosed '[' and '{' indicators.
-
- tokens []yaml_token_t // The tokens queue.
- tokens_head int // The head of the tokens queue.
- tokens_parsed int // The number of tokens fetched from the queue.
- token_available bool // Does the tokens queue contain a token ready for dequeueing.
-
- indent int // The current indentation level.
- indents []int // The indentation levels stack.
-
- simple_key_allowed bool // May a simple key occur at the current position?
- simple_keys []yaml_simple_key_t // The stack of simple keys.
- simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
-
- // Parser stuff
-
- state yaml_parser_state_t // The current parser state.
- states []yaml_parser_state_t // The parser states stack.
- marks []yaml_mark_t // The stack of marks.
- tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
- // Dumper stuff
-
- aliases []yaml_alias_data_t // The alias data.
-
- document *yaml_document_t // The currently parsed document.
-}
-
-type yaml_comment_t struct {
-
- scan_mark yaml_mark_t // Position where scanning for comments started
- token_mark yaml_mark_t // Position after which tokens will be associated with this comment
- start_mark yaml_mark_t // Position of '#' comment mark
- end_mark yaml_mark_t // Position where comment terminated
-
- head []byte
- line []byte
- foot []byte
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output. The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out] data A pointer to an application data specified by
-// yaml_emitter_set_output().
-// @param[in] buffer The buffer with bytes to be written.
-// @param[in] size The size of the buffer.
-//
-// @returns On success, the handler should return @c 1. If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
- // Expect STREAM-START.
- yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
- yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
- yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
- yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
- yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
- yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
- yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
- yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
- yaml_EMIT_END_STATE // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal. Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
- problem string // Error description.
-
- // Writer stuff
-
- write_handler yaml_write_handler_t // Write handler.
-
- output_buffer *[]byte // String output data.
- output_writer io.Writer // File output data.
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The stream encoding.
-
- // Emitter stuff
-
- canonical bool // If the output is in the canonical style?
- best_indent int // The number of indentation spaces.
- best_width int // The preferred width of the output lines.
- unicode bool // Allow unescaped non-ASCII characters?
- line_break yaml_break_t // The preferred line break.
-
- state yaml_emitter_state_t // The current emitter state.
- states []yaml_emitter_state_t // The stack of states.
-
- events []yaml_event_t // The event queue.
- events_head int // The head of the event queue.
-
- indents []int // The stack of indentation levels.
-
- tag_directives []yaml_tag_directive_t // The list of tag directives.
-
- indent int // The current indentation level.
-
- compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
-
- flow_level int // The current flow level.
-
- root_context bool // Is it the document root context?
- sequence_context bool // Is it a sequence context?
- mapping_context bool // Is it a mapping context?
- simple_key_context bool // Is it a simple mapping key context?
-
- line int // The current line.
- column int // The current column.
- whitespace bool // If the last character was a whitespace?
- indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
- open_ended bool // If an explicit document end is required?
-
- space_above bool // Is there's an empty line above?
- foot_indent int // The indent used to write the foot comment above, or -1 if none.
-
- // Anchor analysis.
- anchor_data struct {
- anchor []byte // The anchor value.
- alias bool // Is it an alias?
- }
-
- // Tag analysis.
- tag_data struct {
- handle []byte // The tag handle.
- suffix []byte // The tag suffix.
- }
-
- // Scalar analysis.
- scalar_data struct {
- value []byte // The scalar value.
- multiline bool // Does the scalar contain line breaks?
- flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
- block_plain_allowed bool // Can the scalar be expressed in the block plain style?
- single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
- block_allowed bool // Can the scalar be expressed in the literal or folded styles?
- style yaml_scalar_style_t // The output style.
- }
-
- // Comments
- head_comment []byte
- line_comment []byte
- foot_comment []byte
- tail_comment []byte
-
- key_line_comment []byte
-
- // Dumper stuff
-
- opened bool // If the stream was already opened?
- closed bool // If the stream was already closed?
-
- // The information associated with the document nodes.
- anchors *struct {
- references int // The number of references.
- anchor int // The anchor id.
- serialized bool // If the node has been emitted?
- }
-
- last_anchor_id int // The last assigned anchor id.
-
- document *yaml_document_t // The currently emitted document.
-}
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go
deleted file mode 100644
index e88f9c54aec..00000000000
--- a/vendor/sigs.k8s.io/yaml/goyaml.v3/yamlprivateh.go
+++ /dev/null
@@ -1,198 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-const (
- // The size of the input raw buffer.
- input_raw_buffer_size = 512
-
- // The size of the input buffer.
- // It should be possible to decode the whole raw buffer.
- input_buffer_size = input_raw_buffer_size * 3
-
- // The size of the output buffer.
- output_buffer_size = 128
-
- // The size of the output raw buffer.
- // It should be possible to encode the whole output buffer.
- output_raw_buffer_size = (output_buffer_size*2 + 2)
-
- // The size of other stacks and queues.
- initial_stack_size = 16
- initial_queue_size = 16
- initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
- return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
- bi := b[i]
- if bi >= 'A' && bi <= 'F' {
- return int(bi) - 'A' + 10
- }
- if bi >= 'a' && bi <= 'f' {
- return int(bi) - 'a' + 10
- }
- return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
- return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
- return ((b[i] == 0x0A) || // . == #x0A
- (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
- (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
- (b[i] > 0xC2 && b[i] < 0xED) ||
- (b[i] == 0xED && b[i+1] < 0xA0) ||
- (b[i] == 0xEE) ||
- (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
- !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
- !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
- return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
- return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
- return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
- return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
- //return is_space(b, i) || is_tab(b, i)
- return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
- return (b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
- return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
- //return is_break(b, i) || is_z(b, i)
- return (
- // is_break:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- // is_z:
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
- //return is_space(b, i) || is_breakz(b, i)
- return (
- // is_space:
- b[i] == ' ' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
- //return is_blank(b, i) || is_breakz(b, i)
- return (
- // is_blank:
- b[i] == ' ' || b[i] == '\t' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
- // Don't replace these by a switch without first
- // confirming that it is being inlined.
- if b&0x80 == 0x00 {
- return 1
- }
- if b&0xE0 == 0xC0 {
- return 2
- }
- if b&0xF0 == 0xE0 {
- return 3
- }
- if b&0xF8 == 0xF0 {
- return 4
- }
- return 0
-
-}