From f4a9d78e7f266046ee44ca997191825afac69eb5 Mon Sep 17 00:00:00 2001 From: Tyler Yahn Date: Wed, 29 Mar 2023 11:24:25 -0700 Subject: [PATCH] Update Histogram Extrema and Sum to be generic (#3870) * Update Histogram Extrema and Sum to be generic * Update metric SDK * Update exporters * Add changes to changelog --- CHANGELOG.md | 1 + .../internal/transform/metricdata.go | 8 +-- .../internal/transform/metricdata_test.go | 12 ++--- exporters/prometheus/exporter.go | 2 +- sdk/metric/internal/aggregator_test.go | 15 ++++-- sdk/metric/internal/histogram.go | 29 +++++------ sdk/metric/internal/histogram_test.go | 49 +++++++++++-------- sdk/metric/internal/lastvalue_test.go | 4 +- sdk/metric/internal/sum_test.go | 30 ++++++------ sdk/metric/meter_test.go | 15 +++--- sdk/metric/metricdata/data.go | 16 +++--- .../metricdata/metricdatatest/assertion.go | 11 +++-- .../metricdatatest/assertion_test.go | 33 +++++++------ .../metricdata/metricdatatest/comparisons.go | 4 +- 14 files changed, 123 insertions(+), 106 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81cb801b078..cc30053118d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Changed +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) - Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata.go b/exporters/otlp/otlpmetric/internal/transform/metricdata.go index 208e6087831..2f98115b83d 100644 --- a/exporters/otlp/otlpmetric/internal/transform/metricdata.go +++ b/exporters/otlp/otlpmetric/internal/transform/metricdata.go @@ -175,7 +175,7 @@ func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histog func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { - sum := dPt.Sum + sum := float64(dPt.Sum) hdp := &mpb.HistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()), @@ -186,10 +186,12 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint ExplicitBounds: dPt.Bounds, } if v, ok := dPt.Min.Value(); ok { - hdp.Min = &v + vF64 := float64(v) + hdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { - hdp.Max = &v + vF64 := float64(v) + hdp.Max = &vF64 } out = append(out, hdp) } diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go index 72693335cf4..d9a8ddf6a7c 100644 --- a/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go +++ b/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go @@ -59,9 +59,9 @@ var ( Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, - Min: metricdata.NewExtrema(minA), - Max: metricdata.NewExtrema(maxA), - Sum: sumA, + Min: metricdata.NewExtrema(int64(minA)), + Max: metricdata.NewExtrema(int64(maxA)), + Sum: int64(sumA), }, { Attributes: bob, StartTime: start, @@ -69,9 +69,9 @@ var ( Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, - Min: metricdata.NewExtrema(minB), - Max: metricdata.NewExtrema(maxB), - Sum: sumB, + Min: metricdata.NewExtrema(int64(minB)), + Max: metricdata.NewExtrema(int64(maxB)), + Sum: int64(sumB), }} otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{{ Attributes: alice, diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go index 21b5c41de62..357be10038c 100644 --- a/exporters/prometheus/exporter.go +++ b/exporters/prometheus/exporter.go @@ -193,7 +193,7 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra cumulativeCount += dp.BucketCounts[i] buckets[bound] = cumulativeCount } - m, err := prometheus.NewConstHistogram(desc, dp.Count, dp.Sum, buckets, values...) + m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...) if err != nil { otel.Handle(err) continue diff --git a/sdk/metric/internal/aggregator_test.go b/sdk/metric/internal/aggregator_test.go index a544a18ca21..03b9a91c3ea 100644 --- a/sdk/metric/internal/aggregator_test.go +++ b/sdk/metric/internal/aggregator_test.go @@ -38,9 +38,6 @@ var ( bob = attribute.NewSet(attribute.String("user", "bob"), attribute.Bool("admin", false)) carol = attribute.NewSet(attribute.String("user", "carol"), attribute.Bool("admin", false)) - monoIncr = setMap{alice: 1, bob: 10, carol: 2} - nonMonoIncr = setMap{alice: 1, bob: -1, carol: 2} - // Sat Jan 01 2000 00:00:00 GMT+0000. staticTime = time.Unix(946684800, 0) staticNowFunc = func() time.Time { return staticTime } @@ -52,8 +49,16 @@ var ( } ) +func monoIncr[N int64 | float64]() setMap[N] { + return setMap[N]{alice: 1, bob: 10, carol: 2} +} + +func nonMonoIncr[N int64 | float64]() setMap[N] { + return setMap[N]{alice: 1, bob: -1, carol: 2} +} + // setMap maps attribute sets to a number. -type setMap map[attribute.Set]int +type setMap[N int64 | float64] map[attribute.Set]N // expectFunc is a function that returns an Aggregation of expected values for // a cycle that contains m measurements (total across all goroutines). Each @@ -79,7 +84,7 @@ type aggregatorTester[N int64 | float64] struct { CycleN int } -func (at *aggregatorTester[N]) Run(a Aggregator[N], incr setMap, eFunc expectFunc) func(*testing.T) { +func (at *aggregatorTester[N]) Run(a Aggregator[N], incr setMap[N], eFunc expectFunc) func(*testing.T) { m := at.MeasurementN * at.GoroutineN return func(t *testing.T) { t.Run("Comparable", func(t *testing.T) { diff --git a/sdk/metric/internal/histogram.go b/sdk/metric/internal/histogram.go index bb4372d64cb..7ad454e96c4 100644 --- a/sdk/metric/internal/histogram.go +++ b/sdk/metric/internal/histogram.go @@ -24,19 +24,19 @@ import ( "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -type buckets struct { +type buckets[N int64 | float64] struct { counts []uint64 count uint64 - sum float64 - min, max float64 + sum N + min, max N } // newBuckets returns buckets with n bins. -func newBuckets(n int) *buckets { - return &buckets{counts: make([]uint64, n)} +func newBuckets[N int64 | float64](n int) *buckets[N] { + return &buckets[N]{counts: make([]uint64, n)} } -func (b *buckets) bin(idx int, value float64) { +func (b *buckets[N]) bin(idx int, value N) { b.counts[idx]++ b.count++ b.sum += value @@ -52,7 +52,7 @@ func (b *buckets) bin(idx int, value float64) { type histValues[N int64 | float64] struct { bounds []float64 - values map[attribute.Set]*buckets + values map[attribute.Set]*buckets[N] valuesMu sync.Mutex } @@ -66,24 +66,19 @@ func newHistValues[N int64 | float64](bounds []float64) *histValues[N] { sort.Float64s(b) return &histValues[N]{ bounds: b, - values: make(map[attribute.Set]*buckets), + values: make(map[attribute.Set]*buckets[N]), } } // Aggregate records the measurement value, scoped by attr, and aggregates it // into a histogram. func (s *histValues[N]) Aggregate(value N, attr attribute.Set) { - // Accept all types to satisfy the Aggregator interface. However, since - // the Aggregation produced by this Aggregator is only float64, convert - // here to only use this type. - v := float64(value) - // This search will return an index in the range [0, len(s.bounds)], where // it will return len(s.bounds) if value is greater than the last element // of s.bounds. This aligns with the buckets in that the length of buckets // is len(s.bounds)+1, with the last bucket representing: // (s.bounds[len(s.bounds)-1], +∞). - idx := sort.SearchFloat64s(s.bounds, v) + idx := sort.SearchFloat64s(s.bounds, float64(value)) s.valuesMu.Lock() defer s.valuesMu.Unlock() @@ -97,12 +92,12 @@ func (s *histValues[N]) Aggregate(value N, attr attribute.Set) { // Then, // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) - b = newBuckets(len(s.bounds) + 1) + b = newBuckets[N](len(s.bounds) + 1) // Ensure min and max are recorded values (not zero), for new buckets. - b.min, b.max = v, v + b.min, b.max = value, value s.values[attr] = b } - b.bin(idx, v) + b.bin(idx, value) } // NewDeltaHistogram returns an Aggregator that summarizes a set of diff --git a/sdk/metric/internal/histogram_test.go b/sdk/metric/internal/histogram_test.go index e030ce5f2cb..20bd19a4167 100644 --- a/sdk/metric/internal/histogram_test.go +++ b/sdk/metric/internal/histogram_test.go @@ -48,32 +48,32 @@ func testHistogram[N int64 | float64](t *testing.T) { CycleN: defaultCycles, } - incr := monoIncr + incr := monoIncr[N]() eFunc := deltaHistExpecter[N](incr) t.Run("Delta", tester.Run(NewDeltaHistogram[N](histConf), incr, eFunc)) eFunc = cumuHistExpecter[N](incr) t.Run("Cumulative", tester.Run(NewCumulativeHistogram[N](histConf), incr, eFunc)) } -func deltaHistExpecter[N int64 | float64](incr setMap) expectFunc { +func deltaHistExpecter[N int64 | float64](incr setMap[N]) expectFunc { h := metricdata.Histogram[N]{Temporality: metricdata.DeltaTemporality} return func(m int) metricdata.Aggregation { h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPoint[N](a, float64(v), uint64(m))) + h.DataPoints = append(h.DataPoints, hPoint[N](a, v, uint64(m))) } return h } } -func cumuHistExpecter[N int64 | float64](incr setMap) expectFunc { +func cumuHistExpecter[N int64 | float64](incr setMap[N]) expectFunc { var cycle int h := metricdata.Histogram[N]{Temporality: metricdata.CumulativeTemporality} return func(m int) metricdata.Aggregation { cycle++ h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPoint[N](a, float64(v), uint64(cycle*m))) + h.DataPoints = append(h.DataPoints, hPoint[N](a, v, uint64(cycle*m))) } return h } @@ -81,8 +81,8 @@ func cumuHistExpecter[N int64 | float64](incr setMap) expectFunc { // hPoint returns an HistogramDataPoint that started and ended now with multi // number of measurements values v. It includes a min and max (set to v). -func hPoint[N int64 | float64](a attribute.Set, v float64, multi uint64) metricdata.HistogramDataPoint[N] { - idx := sort.SearchFloat64s(bounds, v) +func hPoint[N int64 | float64](a attribute.Set, v N, multi uint64) metricdata.HistogramDataPoint[N] { + idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi return metricdata.HistogramDataPoint[N]{ @@ -94,25 +94,32 @@ func hPoint[N int64 | float64](a attribute.Set, v float64, multi uint64) metricd BucketCounts: counts, Min: metricdata.NewExtrema(v), Max: metricdata.NewExtrema(v), - Sum: v * float64(multi), + Sum: v * N(multi), } } func TestBucketsBin(t *testing.T) { - b := newBuckets(3) - assertB := func(counts []uint64, count uint64, sum, min, max float64) { - assert.Equal(t, counts, b.counts) - assert.Equal(t, count, b.count) - assert.Equal(t, sum, b.sum) - assert.Equal(t, min, b.min) - assert.Equal(t, max, b.max) - } + t.Run("Int64", testBucketsBin[int64]()) + t.Run("Float64", testBucketsBin[float64]()) +} + +func testBucketsBin[N int64 | float64]() func(t *testing.T) { + return func(t *testing.T) { + b := newBuckets[N](3) + assertB := func(counts []uint64, count uint64, sum, min, max N) { + assert.Equal(t, counts, b.counts) + assert.Equal(t, count, b.count) + assert.Equal(t, sum, b.sum) + assert.Equal(t, min, b.min) + assert.Equal(t, max, b.max) + } - assertB([]uint64{0, 0, 0}, 0, 0, 0, 0) - b.bin(1, 2) - assertB([]uint64{0, 1, 0}, 1, 2, 0, 2) - b.bin(0, -1) - assertB([]uint64{1, 1, 0}, 2, 1, -1, 2) + assertB([]uint64{0, 0, 0}, 0, 0, 0, 0) + b.bin(1, 2) + assertB([]uint64{0, 1, 0}, 1, 2, 0, 2) + b.bin(0, -1) + assertB([]uint64{1, 1, 0}, 2, 1, -1, 2) + } } func testHistImmutableBounds[N int64 | float64](newA func(aggregation.ExplicitBucketHistogram) Aggregator[N], getBounds func(Aggregator[N]) []float64) func(t *testing.T) { diff --git a/sdk/metric/internal/lastvalue_test.go b/sdk/metric/internal/lastvalue_test.go index 4d78373c328..6b17198ae98 100644 --- a/sdk/metric/internal/lastvalue_test.go +++ b/sdk/metric/internal/lastvalue_test.go @@ -37,7 +37,7 @@ func testLastValue[N int64 | float64]() func(*testing.T) { CycleN: defaultCycles, } - eFunc := func(increments setMap) expectFunc { + eFunc := func(increments setMap[N]) expectFunc { data := make([]metricdata.DataPoint[N], 0, len(increments)) for a, v := range increments { point := metricdata.DataPoint[N]{Attributes: a, Time: now(), Value: N(v)} @@ -46,7 +46,7 @@ func testLastValue[N int64 | float64]() func(*testing.T) { gauge := metricdata.Gauge[N]{DataPoints: data} return func(int) metricdata.Aggregation { return gauge } } - incr := monoIncr + incr := monoIncr[N]() return tester.Run(NewLastValue[N](), incr, eFunc(incr)) } diff --git a/sdk/metric/internal/sum_test.go b/sdk/metric/internal/sum_test.go index cde79aaa92b..e40089566ed 100644 --- a/sdk/metric/internal/sum_test.go +++ b/sdk/metric/internal/sum_test.go @@ -39,71 +39,71 @@ func testSum[N int64 | float64](t *testing.T) { } t.Run("Delta", func(t *testing.T) { - incr, mono := monoIncr, true + incr, mono := monoIncr[N](), true eFunc := deltaExpecter[N](incr, mono) t.Run("Monotonic", tester.Run(NewDeltaSum[N](mono), incr, eFunc)) - incr, mono = nonMonoIncr, false + incr, mono = nonMonoIncr[N](), false eFunc = deltaExpecter[N](incr, mono) t.Run("NonMonotonic", tester.Run(NewDeltaSum[N](mono), incr, eFunc)) }) t.Run("Cumulative", func(t *testing.T) { - incr, mono := monoIncr, true + incr, mono := monoIncr[N](), true eFunc := cumuExpecter[N](incr, mono) t.Run("Monotonic", tester.Run(NewCumulativeSum[N](mono), incr, eFunc)) - incr, mono = nonMonoIncr, false + incr, mono = nonMonoIncr[N](), false eFunc = cumuExpecter[N](incr, mono) t.Run("NonMonotonic", tester.Run(NewCumulativeSum[N](mono), incr, eFunc)) }) t.Run("PreComputedDelta", func(t *testing.T) { - incr, mono := monoIncr, true + incr, mono := monoIncr[N](), true eFunc := preDeltaExpecter[N](incr, mono) t.Run("Monotonic", tester.Run(NewPrecomputedDeltaSum[N](mono), incr, eFunc)) - incr, mono = nonMonoIncr, false + incr, mono = nonMonoIncr[N](), false eFunc = preDeltaExpecter[N](incr, mono) t.Run("NonMonotonic", tester.Run(NewPrecomputedDeltaSum[N](mono), incr, eFunc)) }) t.Run("PreComputedCumulative", func(t *testing.T) { - incr, mono := monoIncr, true + incr, mono := monoIncr[N](), true eFunc := preCumuExpecter[N](incr, mono) t.Run("Monotonic", tester.Run(NewPrecomputedCumulativeSum[N](mono), incr, eFunc)) - incr, mono = nonMonoIncr, false + incr, mono = nonMonoIncr[N](), false eFunc = preCumuExpecter[N](incr, mono) t.Run("NonMonotonic", tester.Run(NewPrecomputedCumulativeSum[N](mono), incr, eFunc)) }) } -func deltaExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { +func deltaExpecter[N int64 | float64](incr setMap[N], mono bool) expectFunc { sum := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality, IsMonotonic: mono} return func(m int) metricdata.Aggregation { sum.DataPoints = make([]metricdata.DataPoint[N], 0, len(incr)) for a, v := range incr { - sum.DataPoints = append(sum.DataPoints, point(a, N(v*m))) + sum.DataPoints = append(sum.DataPoints, point(a, v*N(m))) } return sum } } -func cumuExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { - var cycle int +func cumuExpecter[N int64 | float64](incr setMap[N], mono bool) expectFunc { + var cycle N sum := metricdata.Sum[N]{Temporality: metricdata.CumulativeTemporality, IsMonotonic: mono} return func(m int) metricdata.Aggregation { cycle++ sum.DataPoints = make([]metricdata.DataPoint[N], 0, len(incr)) for a, v := range incr { - sum.DataPoints = append(sum.DataPoints, point(a, N(v*cycle*m))) + sum.DataPoints = append(sum.DataPoints, point(a, v*cycle*N(m))) } return sum } } -func preDeltaExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { +func preDeltaExpecter[N int64 | float64](incr setMap[N], mono bool) expectFunc { sum := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality, IsMonotonic: mono} last := make(map[attribute.Set]N) return func(int) metricdata.Aggregation { @@ -117,7 +117,7 @@ func preDeltaExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { } } -func preCumuExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { +func preCumuExpecter[N int64 | float64](incr setMap[N], mono bool) expectFunc { sum := metricdata.Sum[N]{Temporality: metricdata.CumulativeTemporality, IsMonotonic: mono} return func(int) metricdata.Aggregation { sum.DataPoints = make([]metricdata.DataPoint[N], 0, len(incr)) diff --git a/sdk/metric/meter_test.go b/sdk/metric/meter_test.go index fafa0c79741..ef8c579d534 100644 --- a/sdk/metric/meter_test.go +++ b/sdk/metric/meter_test.go @@ -168,7 +168,6 @@ func TestCallbackUnregisterConcurrency(t *testing.T) { // Instruments should produce correct ResourceMetrics. func TestMeterCreatesInstruments(t *testing.T) { - extrema := metricdata.NewExtrema(7.) attrs := []attribute.KeyValue{attribute.String("name", "alice")} testCases := []struct { name string @@ -390,9 +389,9 @@ func TestMeterCreatesInstruments(t *testing.T) { Count: 1, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - Min: extrema, - Max: extrema, - Sum: 7.0, + Min: metricdata.NewExtrema[int64](7), + Max: metricdata.NewExtrema[int64](7), + Sum: 7, }, }, }, @@ -454,8 +453,8 @@ func TestMeterCreatesInstruments(t *testing.T) { Count: 1, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - Min: extrema, - Max: extrema, + Min: metricdata.NewExtrema[float64](7.), + Max: metricdata.NewExtrema[float64](7.), Sum: 7.0, }, }, @@ -1213,8 +1212,8 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Count: 2, - Min: metricdata.NewExtrema(1.), - Max: metricdata.NewExtrema(2.), + Min: metricdata.NewExtrema[int64](1), + Max: metricdata.NewExtrema[int64](2), Sum: 3.0, }, }, diff --git a/sdk/metric/metricdata/data.go b/sdk/metric/metricdata/data.go index f474a2a617f..1e32f5eeb02 100644 --- a/sdk/metric/metricdata/data.go +++ b/sdk/metric/metricdata/data.go @@ -127,30 +127,30 @@ type HistogramDataPoint[N int64 | float64] struct { BucketCounts []uint64 // Min is the minimum value recorded. (optional) - Min Extrema + Min Extrema[N] // Max is the maximum value recorded. (optional) - Max Extrema + Max Extrema[N] // Sum is the sum of the values recorded. - Sum float64 + Sum N // Exemplars is the sampled Exemplars collected during the timeseries. Exemplars []Exemplar[N] `json:",omitempty"` } // Extrema is the minimum or maximum value of a dataset. -type Extrema struct { - value float64 +type Extrema[N int64 | float64] struct { + value N valid bool } // NewExtrema returns an Extrema set to v. -func NewExtrema(v float64) Extrema { - return Extrema{value: v, valid: true} +func NewExtrema[N int64 | float64](v N) Extrema[N] { + return Extrema[N]{value: v, valid: true} } // Value returns the Extrema value and true if the Extrema is defined. // Otherwise, if the Extrema is its zero-value, defined will be false. -func (e Extrema) Value() (v float64, defined bool) { +func (e Extrema[N]) Value() (v N, defined bool) { return e.value, e.valid } diff --git a/sdk/metric/metricdata/metricdatatest/assertion.go b/sdk/metric/metricdata/metricdatatest/assertion.go index ed58fdcba75..08bac5131fe 100644 --- a/sdk/metric/metricdata/metricdatatest/assertion.go +++ b/sdk/metric/metricdata/metricdatatest/assertion.go @@ -34,7 +34,8 @@ type Datatypes interface { metricdata.Histogram[int64] | metricdata.HistogramDataPoint[float64] | metricdata.HistogramDataPoint[int64] | - metricdata.Extrema | + metricdata.Extrema[int64] | + metricdata.Extrema[float64] | metricdata.Metrics | metricdata.ResourceMetrics | metricdata.ScopeMetrics | @@ -119,8 +120,10 @@ func AssertEqual[T Datatypes](t *testing.T, expected, actual T, opts ...Option) r = equalHistogramDataPoints(e, aIface.(metricdata.HistogramDataPoint[float64]), cfg) case metricdata.HistogramDataPoint[int64]: r = equalHistogramDataPoints(e, aIface.(metricdata.HistogramDataPoint[int64]), cfg) - case metricdata.Extrema: - r = equalExtrema(e, aIface.(metricdata.Extrema), cfg) + case metricdata.Extrema[int64]: + r = equalExtrema(e, aIface.(metricdata.Extrema[int64]), cfg) + case metricdata.Extrema[float64]: + r = equalExtrema(e, aIface.(metricdata.Extrema[float64]), cfg) case metricdata.Metrics: r = equalMetrics(e, aIface.(metricdata.Metrics), cfg) case metricdata.ResourceMetrics: @@ -183,7 +186,7 @@ func AssertHasAttributes[T Datatypes](t *testing.T, actual T, attrs ...attribute reasons = hasAttributesHistogramDataPoints(e, attrs...) case metricdata.HistogramDataPoint[float64]: reasons = hasAttributesHistogramDataPoints(e, attrs...) - case metricdata.Extrema: + case metricdata.Extrema[int64], metricdata.Extrema[float64]: // Nothing to check. case metricdata.Histogram[int64]: reasons = hasAttributesHistogram(e, attrs...) diff --git a/sdk/metric/metricdata/metricdatatest/assertion_test.go b/sdk/metric/metricdata/metricdatatest/assertion_test.go index 285dec82451..6ad40ebf1b2 100644 --- a/sdk/metric/metricdata/metricdatatest/assertion_test.go +++ b/sdk/metric/metricdata/metricdatatest/assertion_test.go @@ -129,9 +129,12 @@ var ( Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64C}, } - minA = metricdata.NewExtrema(-1.) - minB, maxB = metricdata.NewExtrema(3.), metricdata.NewExtrema(99.) - minC = metricdata.NewExtrema(-1.) + minFloat64A = metricdata.NewExtrema(-1.) + minInt64A = metricdata.NewExtrema[int64](-1) + minFloat64B, maxFloat64B = metricdata.NewExtrema(3.), metricdata.NewExtrema(99.) + minInt64B, maxInt64B = metricdata.NewExtrema[int64](3), metricdata.NewExtrema[int64](99) + minFloat64C = metricdata.NewExtrema(-1.) + minInt64C = metricdata.NewExtrema[int64](-1) histogramDataPointInt64A = metricdata.HistogramDataPoint[int64]{ Attributes: attrA, @@ -140,7 +143,7 @@ var ( Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, - Min: minA, + Min: minInt64A, Sum: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } @@ -151,7 +154,7 @@ var ( Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, - Min: minA, + Min: minFloat64A, Sum: 2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } @@ -162,8 +165,8 @@ var ( Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, - Max: maxB, - Min: minB, + Max: maxInt64B, + Min: minInt64B, Sum: 3, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64B}, } @@ -174,8 +177,8 @@ var ( Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, - Max: maxB, - Min: minB, + Max: maxFloat64B, + Min: minFloat64B, Sum: 3, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64B}, } @@ -186,7 +189,7 @@ var ( Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, - Min: minC, + Min: minInt64C, Sum: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64C}, } @@ -197,7 +200,7 @@ var ( Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, - Min: minC, + Min: minFloat64C, Sum: 2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64C}, } @@ -371,7 +374,8 @@ func TestAssertEqual(t *testing.T) { t.Run("HistogramDataPointFloat64", testDatatype(histogramDataPointFloat64A, histogramDataPointFloat64B, equalHistogramDataPoints[float64])) t.Run("DataPointInt64", testDatatype(dataPointInt64A, dataPointInt64B, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatype(dataPointFloat64A, dataPointFloat64B, equalDataPoints[float64])) - t.Run("Extrema", testDatatype(minA, minB, equalExtrema)) + t.Run("ExtremaInt64", testDatatype(minInt64A, minInt64B, equalExtrema[int64])) + t.Run("ExtremaFloat64", testDatatype(minFloat64A, minFloat64B, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatype(exemplarInt64A, exemplarInt64B, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatype(exemplarFloat64A, exemplarFloat64B, equalExemplars[float64])) } @@ -390,7 +394,8 @@ func TestAssertEqualIgnoreTime(t *testing.T) { t.Run("HistogramDataPointFloat64", testDatatypeIgnoreTime(histogramDataPointFloat64A, histogramDataPointFloat64C, equalHistogramDataPoints[float64])) t.Run("DataPointInt64", testDatatypeIgnoreTime(dataPointInt64A, dataPointInt64C, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatypeIgnoreTime(dataPointFloat64A, dataPointFloat64C, equalDataPoints[float64])) - t.Run("Extrema", testDatatypeIgnoreTime(minA, minC, equalExtrema)) + t.Run("ExtremaInt64", testDatatypeIgnoreTime(minInt64A, minInt64C, equalExtrema[int64])) + t.Run("ExtremaFloat64", testDatatypeIgnoreTime(minFloat64A, minFloat64C, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatypeIgnoreTime(exemplarInt64A, exemplarInt64C, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatypeIgnoreTime(exemplarFloat64A, exemplarFloat64C, equalExemplars[float64])) } @@ -473,7 +478,7 @@ func TestAssertAggregationsEqual(t *testing.T) { } func TestAssertAttributes(t *testing.T) { - AssertHasAttributes(t, minA, attribute.Bool("A", true)) // No-op, always pass. + AssertHasAttributes(t, minFloat64A, attribute.Bool("A", true)) // No-op, always pass. AssertHasAttributes(t, exemplarInt64A, attribute.Bool("filter A", true)) AssertHasAttributes(t, exemplarFloat64A, attribute.Bool("filter A", true)) AssertHasAttributes(t, dataPointInt64A, attribute.Bool("A", true)) diff --git a/sdk/metric/metricdata/metricdatatest/comparisons.go b/sdk/metric/metricdata/metricdatatest/comparisons.go index 623dc3ae700..1187ac1986c 100644 --- a/sdk/metric/metricdata/metricdatatest/comparisons.go +++ b/sdk/metric/metricdata/metricdatatest/comparisons.go @@ -328,14 +328,14 @@ func equalSlices[T comparable](a, b []T) bool { return true } -func equalExtrema(a, b metricdata.Extrema, _ config) (reasons []string) { +func equalExtrema[N int64 | float64](a, b metricdata.Extrema[N], _ config) (reasons []string) { if !eqExtrema(a, b) { reasons = append(reasons, notEqualStr("Extrema", a, b)) } return reasons } -func eqExtrema(a, b metricdata.Extrema) bool { +func eqExtrema[N int64 | float64](a, b metricdata.Extrema[N]) bool { aV, aOk := a.Value() bV, bOk := b.Value()