• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_AUDIO_UTILS_STATISTICS_H
18 #define ANDROID_AUDIO_UTILS_STATISTICS_H
19 
20 #include <sys/cdefs.h>
21 
22 #ifdef __cplusplus
23 
24 #include "variadic_utils.h"
25 
26 // variadic_utils already contains stl headers; in addition:
27 #include <deque> // for ReferenceStatistics implementation
28 #include <sstream>
29 
30 namespace android {
31 namespace audio_utils {
32 
33 /**
34  * Compensated summation is used to accumulate a sequence of floating point
35  * values, with "compensation" information to help preserve precision lost
36  * due to catastrophic cancellation, e.g. (BIG) + (SMALL) - (BIG) = 0.
37  *
38  * We provide two forms of compensated summation:
39  * the Kahan variant (which has better properties if the sum is generally
40  * larger than the data added; and the Neumaier variant which is better if
41  * the sum or delta may alternatively be larger.
42  *
43  * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
44  *
45  * Alternative approaches include divide-and-conquer summation
46  * which provides increased accuracy with log n stack depth (recursion).
47  *
48  * https://en.wikipedia.org/wiki/Pairwise_summation
49  */
50 
51 template <typename T>
52 struct KahanSum {
53     T mSum{};
54     T mCorrection{}; // negative low order bits of mSum.
55 
56     constexpr KahanSum<T>() = default;
57 
KahanSumKahanSum58     explicit constexpr KahanSum<T>(const T& value)
59         : mSum{value}
60     { }
61 
62     // takes T not KahanSum<T>
63     friend constexpr KahanSum<T> operator+(KahanSum<T> lhs, const T& rhs) {
64         const T y = rhs - lhs.mCorrection;
65         const T t = lhs.mSum + y;
66 
67 #ifdef __FAST_MATH__
68 #warning "fast math enabled, could optimize out KahanSum correction"
69 #endif
70 
71         lhs.mCorrection = (t - lhs.mSum) - y; // compiler, please do not optimize with /fp:fast
72         lhs.mSum = t;
73         return lhs;
74     }
75 
76     constexpr KahanSum<T>& operator+=(const T& rhs) { // takes T not KahanSum<T>
77         *this = *this + rhs;
78         return *this;
79     }
80 
TKahanSum81     constexpr operator T() const {
82         return mSum;
83     }
84 
resetKahanSum85     constexpr void reset() {
86         mSum = {};
87         mCorrection = {};
88     }
89 };
90 
91 // A more robust version of Kahan summation for input greater than sum.
92 // TODO: investigate variants that reincorporate mCorrection bits into mSum if possible.
93 template <typename T>
94 struct NeumaierSum {
95     T mSum{};
96     T mCorrection{}; // low order bits of mSum.
97 
98     constexpr NeumaierSum<T>() = default;
99 
NeumaierSumNeumaierSum100     explicit constexpr NeumaierSum<T>(const T& value)
101         : mSum{value}
102     { }
103 
104     friend constexpr NeumaierSum<T> operator+(NeumaierSum<T> lhs, const T& rhs) {
105         const T t = lhs.mSum + rhs;
106 
107         if (const_abs(lhs.mSum) >= const_abs(rhs)) {
108             lhs.mCorrection += (lhs.mSum - t) + rhs;
109         } else {
110             lhs.mCorrection += (rhs - t) + lhs.mSum;
111         }
112         lhs.mSum = t;
113         return lhs;
114     }
115 
116     constexpr NeumaierSum<T>& operator+=(const T& rhs) { // takes T not NeumaierSum<T>
117         *this = *this + rhs;
118         return *this;
119     }
120 
const_absNeumaierSum121     static constexpr T const_abs(T x) {
122         return x < T{} ? -x : x;
123     }
124 
TNeumaierSum125     constexpr operator T() const {
126         return mSum + mCorrection;
127     }
128 
resetNeumaierSum129     constexpr void reset() {
130         mSum = {};
131         mCorrection = {};
132     }
133 };
134 
135 //-------------------------------------------------------------------
136 // Constants and limits
137 
138 template <typename T, typename T2=void>  struct StatisticsConstants;
139 
140 template <typename T>
141 struct StatisticsConstants<T, std::enable_if_t<std::is_arithmetic<T>::value>> {
142     // value closest to negative infinity for type T
143     static constexpr T negativeInfinity() {
144         return std::numeric_limits<T>::has_infinity ?
145                 -std::numeric_limits<T>::infinity() : std::numeric_limits<T>::min();
146     };
147 
148     static constexpr T mNegativeInfinity = negativeInfinity();
149 
150     // value closest to positive infinity for type T
151     static constexpr T positiveInfinity() {
152         return std::numeric_limits<T>::has_infinity ?
153                 std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
154     }
155 
156     static constexpr T mPositiveInfinity = positiveInfinity();
157 };
158 
159 // specialize for tuple and pair
160 template <typename T>
161 struct StatisticsConstants<T, std::enable_if_t<!std::is_arithmetic<T>::value>> {
162 private:
163     template <std::size_t... I >
164     static constexpr auto negativeInfinity(std::index_sequence<I...>) {
165        return T{StatisticsConstants<
166                typename std::tuple_element<I, T>::type>::mNegativeInfinity...};
167     }
168     template <std::size_t... I >
169     static constexpr auto positiveInfinity(std::index_sequence<I...>) {
170        return T{StatisticsConstants<
171                typename std::tuple_element<I, T>::type>::mPositiveInfinity...};
172     }
173 public:
174     static constexpr auto negativeInfinity() {
175        return negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
176     }
177     static constexpr auto mNegativeInfinity =
178         negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
179     static constexpr auto positiveInfinity() {
180        return positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
181     }
182     static constexpr auto mPositiveInfinity =
183         positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
184 };
185 
186 /**
187  * Statistics provides a running weighted average, variance, and standard deviation of
188  * a sample stream. It is more numerically stable for floating point computation than a
189  * naive sum of values, sum of values squared.
190  *
191  * The weighting is like an IIR filter, with the most recent sample weighted as 1, and decaying
192  * by alpha (between 0 and 1).  With alpha == 1. this is rectangular weighting, reducing to
193  * Welford's algorithm.
194  *
195  * The IIR filter weighting emphasizes more recent samples, has low overhead updates,
196  * constant storage, and constant computation (per update or variance read).
197  *
198  * This is a variant of the weighted mean and variance algorithms described here:
199  * https://en.wikipedia.org/wiki/Moving_average
200  * https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
201  * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
202  *
203  * weight = sum_{i=1}^n \alpha^{n-i}
204  * mean = 1/weight * sum_{i=1}^n \alpha^{n-i}x_i
205  * var = 1/weight * sum_{i=1}^n alpha^{n-i}(x_i- mean)^2
206  *
207  * The Statistics class is safe to call from a SCHED_FIFO thread with the exception of
208  * the toString() method, which uses std::stringstream to format data for printing.
209  *
210  * Long term data accumulation and constant alpha:
211  * If the alpha weight is 1 (or not specified) then statistics objects with float
212  * summation types (D, S) should NOT add more than the mantissa-bits elements
213  * without reset to prevent variance increases due to weight precision underflow.
214  * This is 1 << 23 elements for float and 1 << 52 elements for double.
215  *
216  * Setting alpha less than 1 avoids this underflow problem.
217  * Alpha < 1 - (epsilon * 32), where epsilon is std::numeric_limits<D>::epsilon()
218  * is recommended for continuously running statistics (alpha <= 0.999996
219  * for float summation precision).
220  *
221  * Alpha may also change on-the-fly, based on the reliability of
222  * new information.  In that case, alpha may be set temporarily greater
223  * than 1.
224  *
225  * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
226  *
227  * Statistics may also be collected on variadic "vector" object instead of
228  * scalars, where the variance may be computed as an inner product radial squared
229  * distance from the mean; or as an outer product where the variance returned
230  * is a covariance matrix.
231  *
232  * TODO:
233  * 1) Alternative versions of Kahan/Neumaier sum that better preserve precision.
234  * 2) Add binary math ops to corrected sum classes for better precision in lieu of long double.
235  * 3) Add Cholesky decomposition to ensure positive definite covariance matrices if
236  *    the input is a variadic object.
237  */
238 
239 /**
240  * Mean may have catastrophic cancellation of positive and negative sample values,
241  * so we use Kahan summation in the algorithms below (or substitute "D" if not needed).
242  *
243  * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
244  */
245 
246 template <
247     typename T,               // input data type
248     typename D = double,      // output mean data type
249     typename S = KahanSum<D>, // compensated mean summation type, if any
250     typename A = double,      // weight type
251     typename D2 = double,     // output variance "D^2" type
252     typename PRODUCT = std::multiplies<D> // how the output variance is computed
253     >
254 class Statistics {
255 public:
256     /** alpha is the weight (if alpha == 1. we use a rectangular window) */
257     explicit constexpr Statistics(A alpha = A(1.))
258         : mAlpha(alpha)
259     { }
260 
261     template <size_t N>
262     explicit constexpr Statistics(const T (&a)[N], A alpha = A(1.))
263         : mAlpha(alpha)
264     {
265         for (const auto &data : a) {
266             add(data);
267         }
268     }
269 
270     constexpr void setAlpha(A alpha) {
271         mAlpha = alpha;
272     }
273 
274     constexpr void add(const T &value) {
275         // Note: fastest implementation uses fmin fminf but would not be constexpr
276 
277         mMax = audio_utils::max(mMax, value); // order important: reject NaN
278         mMin = audio_utils::min(mMin, value); // order important: reject NaN
279         ++mN;
280         const D delta = value - mMean;
281         /* if (mAlpha == 1.) we have Welford's algorithm
282             ++mN;
283             mMean += delta / mN;
284             mM2 += delta * (value - mMean);
285 
286             Note delta * (value - mMean) should be non-negative.
287         */
288         mWeight = A(1.) + mAlpha * mWeight;
289         mWeight2 = A(1.) + mAlpha * mAlpha * mWeight2;
290         D meanDelta = delta / mWeight;
291         mMean += meanDelta;
292         mM2 = mAlpha * mM2 + PRODUCT()(delta, (value - mMean));
293 
294         /*
295            Alternate variant related to:
296            http://mathworld.wolfram.com/SampleVarianceComputation.html
297 
298            const double sweight = mAlpha * mWeight;
299            mWeight = 1. + sweight;
300            const double dmean = delta / mWeight;
301            mMean += dmean;
302            mM2 = mAlpha * mM2 + mWeight * sweight * dmean * dmean;
303 
304            The update is slightly different than Welford's algorithm
305            showing a by-construction non-negative update to M2.
306         */
307     }
308 
309     constexpr int64_t getN() const {
310         return mN;
311     }
312 
313     constexpr void reset() {
314         mMin = StatisticsConstants<T>::positiveInfinity();
315         mMax = StatisticsConstants<T>::negativeInfinity();
316         mN = 0;
317         mWeight = {};
318         mWeight2 = {};
319         mMean = {};
320         mM2 = {};
321     }
322 
323     constexpr A getWeight() const {
324         return mWeight;
325     }
326 
327     constexpr D getMean() const {
328         return mMean;
329     }
330 
331     constexpr D2 getVariance() const {
332         if (mN < 2) {
333             // must have 2 samples for sample variance.
334             return {};
335         } else {
336             return mM2 / getSampleWeight();
337         }
338     }
339 
340     constexpr D2 getPopVariance() const {
341         if (mN < 1) {
342             return {};
343         } else {
344             return mM2 / mWeight;
345         }
346     }
347 
348     // explicitly use sqrt_constexpr if you need a constexpr version
349     D2 getStdDev() const {
350         return android::audio_utils::sqrt(getVariance());
351     }
352 
353     D2 getPopStdDev() const {
354         return android::audio_utils::sqrt(getPopVariance());
355     }
356 
357     constexpr T getMin() const {
358         return mMin;
359     }
360 
361     constexpr T getMax() const {
362         return mMax;
363     }
364 
365     std::string toString() const {
366         const int64_t N = getN();
367         if (N == 0) return "unavail";
368 
369         std::stringstream ss;
370         ss << "ave=" << getMean();
371         if (N > 1) {
372             // we use the sample standard deviation (not entirely unbiased,
373             // though the sample variance is unbiased).
374             ss << " std=" << getStdDev();
375         }
376         ss << " min=" << getMin();
377         ss << " max=" << getMax();
378         return ss.str();
379     }
380 
381 private:
382     A mAlpha;
383     T mMin{StatisticsConstants<T>::positiveInfinity()};
384     T mMax{StatisticsConstants<T>::negativeInfinity()};
385 
386     int64_t mN = 0;  // running count of samples.
387     A mWeight{};     // sum of weights.
388     A mWeight2{};    // sum of weights squared.
389     S mMean{};       // running mean.
390     D2 mM2{};         // running unnormalized variance.
391 
392     // Reliability correction for unbiasing variance, since mean is estimated
393     // from same sample stream as variance.
394     // if mAlpha == 1 this is mWeight - 1;
395     //
396     // TODO: consider exposing the correction factor.
397     constexpr A getSampleWeight() const {
398         // if mAlpha is constant then the mWeight2 member variable is not
399         // needed, one can use instead:
400         // return (mWeight - D(1.)) * D(2.) / (D(1.) + mAlpha);
401 
402         return mWeight - mWeight2 / mWeight;
403     }
404 };
405 
406 /**
407  * ReferenceStatistics is a naive implementation of the weighted running variance,
408  * which consumes more space and is slower than Statistics.  It is provided for
409  * comparison and testing purposes.  Do not call from a SCHED_FIFO thread!
410  *
411  * Note: Common code not combined for implementation clarity.
412  *       We don't invoke Kahan summation or other tricks.
413  */
414 template <
415     typename T, // input data type
416     typename D = double // output mean/variance data type
417     >
418 class ReferenceStatistics {
419 public:
420     /** alpha is the weight (alpha == 1. is rectangular window) */
421     explicit ReferenceStatistics(D alpha = D(1.))
422         : mAlpha(alpha)
423     { }
424 
425     constexpr void setAlpha(D alpha) {
426         mAlpha = alpha;
427     }
428 
429     // For independent testing, have intentionally slightly different behavior
430     // of min and max than Statistics with respect to Nan.
431     constexpr void add(const T &value) {
432         if (getN() == 0) {
433             mMax = value;
434             mMin = value;
435         } else if (value > mMax) {
436             mMax = value;
437         } else if (value < mMin) {
438             mMin = value;
439         }
440 
441         mData.push_front(value);
442         mAlphaList.push_front(mAlpha);
443     }
444 
445     int64_t getN() const {
446         return mData.size();
447     }
448 
449     void reset() {
450         mMin = {};
451         mMax = {};
452         mData.clear();
453         mAlphaList.clear();
454     }
455 
456     D getWeight() const {
457         D weight{};
458         D alpha_i(1.);
459         for (size_t i = 0; i < mData.size(); ++i) {
460             weight += alpha_i;
461             alpha_i *= mAlphaList[i];
462         }
463         return weight;
464     }
465 
466     D getWeight2() const {
467         D weight2{};
468         D alpha2_i(1.);
469         for (size_t i = 0; i < mData.size(); ++i) {
470             weight2 += alpha2_i;
471             alpha2_i *= mAlphaList[i] * mAlphaList[i];
472         }
473         return weight2;
474     }
475 
476     D getMean() const {
477         D wsum{};
478         D alpha_i(1.);
479         for (size_t i = 0; i < mData.size(); ++i) {
480             wsum += alpha_i * mData[i];
481             alpha_i *= mAlphaList[i];
482         }
483         return wsum / getWeight();
484     }
485 
486     // Should always return a positive value.
487     D getVariance() const {
488         return getUnweightedVariance() / (getWeight() - getWeight2() / getWeight());
489     }
490 
491     // Should always return a positive value.
492     D getPopVariance() const {
493         return getUnweightedVariance() / getWeight();
494     }
495 
496     D getStdDev() const {
497         return sqrt(getVariance());
498     }
499 
500     D getPopStdDev() const {
501         return sqrt(getPopVariance());
502     }
503 
504     T getMin() const {
505         return mMin;
506     }
507 
508     T getMax() const {
509         return mMax;
510     }
511 
512     std::string toString() const {
513         const auto N = getN();
514         if (N == 0) return "unavail";
515 
516         std::stringstream ss;
517         ss << "ave=" << getMean();
518         if (N > 1) {
519             // we use the sample standard deviation (not entirely unbiased,
520             // though the sample variance is unbiased).
521             ss << " std=" << getStdDev();
522         }
523         ss << " min=" << getMin();
524         ss << " max=" << getMax();
525         return ss.str();
526     }
527 
528 private:
529     T mMin{};
530     T mMax{};
531 
532     D mAlpha;                 // current alpha value
533     std::deque<T> mData;      // store all the data for exact summation, mData[0] most recent.
534     std::deque<D> mAlphaList; // alpha value for the data added.
535 
536     D getUnweightedVariance() const {
537         const D mean = getMean();
538         D wsum{};
539         D alpha_i(1.);
540         for (size_t i = 0; i < mData.size(); ++i) {
541             const D diff = mData[i] - mean;
542             wsum += alpha_i * diff * diff;
543             alpha_i *= mAlphaList[i];
544         }
545         return wsum;
546     }
547 };
548 
549 /**
550  * Least squares fitting of a 2D straight line based on the covariance matrix.
551  *
552  * See formula from:
553  * http://mathworld.wolfram.com/LeastSquaresFitting.html
554  *
555  * y = a + b*x
556  *
557  * returns a: y intercept
558  *         b: slope
559  *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
560  *
561  * For better numerical stability, it is suggested to use the slope b only:
562  * as the least squares fit line intersects the mean.
563  *
564  * (y - mean_y) = b * (x - mean_x).
565  *
566  */
567 template <typename T>
568 constexpr void computeYLineFromStatistics(
569         T &a, T& b, T &r2,
570         const T& mean_x,
571         const T& mean_y,
572         const T& var_x,
573         const T& cov_xy,
574         const T& var_y) {
575 
576     // Dimensionally r2 is unitless.  If there is no correlation
577     // then r2 is clearly 0 as cov_xy == 0.  If x and y are identical up to a scale
578     // and shift, then r2 is 1.
579     r2 = cov_xy * cov_xy / (var_x * var_y);
580 
581     // The least squares solution to the overconstrained matrix equation requires
582     // the pseudo-inverse. In 2D, the best-fit slope is the mean removed
583     // (via covariance and variance) dy/dx derived from the joint expectation
584     // (this is also dimensionally correct).
585     b = cov_xy / var_x;
586 
587     // The best fit line goes through the mean, and can be used to find the y intercept.
588     a = mean_y - b * mean_x;
589 }
590 
591 /**
592  * LinearLeastSquaresFit<> class is derived from the Statistics<> class, with a 2 element array.
593  * Arrays are preferred over tuples or pairs because copy assignment is constexpr and
594  * arrays are trivially copyable.
595  */
596 template <typename T>
597 class LinearLeastSquaresFit : public
598     Statistics<std::array<T, 2>, // input
599                std::array<T, 2>, // mean data output
600                std::array<T, 2>, // compensated mean sum
601                T,                // weight type
602                std::array<T, 3>, // covariance_ut
603                audio_utils::outerProduct_UT_array<std::array<T, 2>>>
604 {
605 public:
606     constexpr explicit LinearLeastSquaresFit(const T &alpha = T(1.))
607         : Statistics<std::array<T, 2>,
608              std::array<T, 2>,
609              std::array<T, 2>,
610              T,
611              std::array<T, 3>, // covariance_ut
612              audio_utils::outerProduct_UT_array<std::array<T, 2>>>(alpha) { }
613 
614     /* Note: base class method: add(value)
615 
616     constexpr void add(const std::array<T, 2>& value);
617 
618        use:
619           add({1., 2.});
620        or
621           add(to_array(myTuple));
622     */
623 
624     /**
625      * y = a + b*x
626      *
627      * returns a: y intercept
628      *         b: y slope (dy / dx)
629      *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
630      */
631     constexpr void computeYLine(T &a, T &b, T &r2) const {
632         computeYLineFromStatistics(a, b, r2,
633                 std::get<0>(this->getMean()), /* mean_x */
634                 std::get<1>(this->getMean()), /* mean_y */
635                 std::get<0>(this->getPopVariance()), /* var_x */
636                 std::get<1>(this->getPopVariance()), /* cov_xy */
637                 std::get<2>(this->getPopVariance())); /* var_y */
638     }
639 
640     /**
641      * x = a + b*y
642      *
643      * returns a: x intercept
644      *         b: x slope (dx / dy)
645      *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
646      */
647     constexpr void computeXLine(T &a, T &b, T &r2) const {
648         // reverse x and y for X line computation
649         computeYLineFromStatistics(a, b, r2,
650                 std::get<1>(this->getMean()), /* mean_x */
651                 std::get<0>(this->getMean()), /* mean_y */
652                 std::get<2>(this->getPopVariance()), /* var_x */
653                 std::get<1>(this->getPopVariance()), /* cov_xy */
654                 std::get<0>(this->getPopVariance())); /* var_y */
655     }
656 
657     /**
658      * this returns the estimate of y from a given x
659      */
660     constexpr T getYFromX(const T &x) const {
661         const T var_x = std::get<0>(this->getPopVariance());
662         const T cov_xy = std::get<1>(this->getPopVariance());
663         const T b = cov_xy / var_x;  // dy / dx
664 
665         const T mean_x = std::get<0>(this->getMean());
666         const T mean_y = std::get<1>(this->getMean());
667         return /* y = */ b * (x - mean_x) + mean_y;
668     }
669 
670     /**
671      * this returns the estimate of x from a given y
672      */
673     constexpr T getXFromY(const T &y) const {
674         const T cov_xy = std::get<1>(this->getPopVariance());
675         const T var_y = std::get<2>(this->getPopVariance());
676         const T b = cov_xy / var_y;  // dx / dy
677 
678         const T mean_x = std::get<0>(this->getMean());
679         const T mean_y = std::get<1>(this->getMean());
680         return /* x = */ b * (y - mean_y) + mean_x;
681     }
682 
683     constexpr T getR2() const {
684         const T var_x = std::get<0>(this->getPopVariance());
685         const T cov_xy = std::get<1>(this->getPopVariance());
686         const T var_y = std::get<2>(this->getPopVariance());
687         return cov_xy * cov_xy / (var_x * var_y);
688     }
689 };
690 
691 /**
692  * constexpr statistics functions of form:
693  * algorithm(forward_iterator begin, forward_iterator end)
694  *
695  * These check that the input looks like an iterator, but doesn't
696  * check if __is_forward_iterator<>.
697  *
698  * divide-and-conquer pairwise summation forms will require
699  * __is_random_access_iterator<>.
700  */
701 
702 // returns max of elements, or if no elements negative infinity.
703 template <typename T,
704           std::enable_if_t<is_iterator<T>::value, int> = 0>
705 constexpr auto max(T begin, T end) {
706     using S = std::remove_cv_t<std::remove_reference_t<
707             decltype(*begin)>>;
708     S maxValue = StatisticsConstants<S>::mNegativeInfinity;
709     for (auto it = begin; it != end; ++it) {
710         maxValue = std::max(maxValue, *it);
711     }
712     return maxValue;
713 }
714 
715 // returns min of elements, or if no elements positive infinity.
716 template <typename T,
717           std::enable_if_t<is_iterator<T>::value, int> = 0>
718 constexpr auto min(T begin, T end) {
719     using S = std::remove_cv_t<std::remove_reference_t<
720             decltype(*begin)>>;
721     S minValue = StatisticsConstants<S>::mPositiveInfinity;
722     for (auto it = begin; it != end; ++it) {
723         minValue = std::min(minValue, *it);
724     }
725     return minValue;
726 }
727 
728 template <typename D = double, typename S = KahanSum<D>, typename T,
729           std::enable_if_t<is_iterator<T>::value, int> = 0>
730 constexpr auto sum(T begin, T end) {
731     S sum{};
732     for (auto it = begin; it != end; ++it) {
733         sum += D(*it);
734     }
735     return sum;
736 }
737 
738 template <typename D = double, typename S = KahanSum<D>, typename T,
739           std::enable_if_t<is_iterator<T>::value, int> = 0>
740 constexpr auto sumSqDiff(T begin, T end, D x = {}) {
741     S sum{};
742     for (auto it = begin; it != end; ++it) {
743         const D diff = *it - x;
744         sum += diff * diff;
745     }
746     return sum;
747 }
748 
749 // Form: algorithm(array[]), where array size is known to the compiler.
750 template <typename T, size_t N>
751 constexpr T max(const T (&a)[N]) {
752     return max(&a[0], &a[N]);
753 }
754 
755 template <typename T, size_t N>
756 constexpr T min(const T (&a)[N]) {
757     return min(&a[0], &a[N]);
758 }
759 
760 template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
761 constexpr D sum(const T (&a)[N]) {
762     return sum<D, S>(&a[0], &a[N]);
763 }
764 
765 template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
766 constexpr D sumSqDiff(const T (&a)[N], D x = {}) {
767     return sumSqDiff<D, S>(&a[0], &a[N], x);
768 }
769 
770 // TODO: remove when std::isnan is constexpr
771 template <typename T>
772 constexpr T isnan(T x) {
773     return __builtin_isnan(x);
774 }
775 
776 // constexpr sqrt computed by the Babylonian (Newton's) method.
777 // Please use math libraries for non-constexpr cases.
778 // TODO: remove when there is some std::sqrt which is constexpr.
779 //
780 // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
781 
782 // watch out using the unchecked version, use the checked version below.
783 template <typename T>
784 constexpr T sqrt_constexpr_unchecked(T x, T prev) {
785     static_assert(std::is_floating_point<T>::value, "must be floating point type");
786     const T next = T(0.5) * (prev + x / prev);
787     return next == prev ? next : sqrt_constexpr_unchecked(x, next);
788 }
789 
790 // checked sqrt
791 template <typename T>
792 constexpr T sqrt_constexpr(T x) {
793     static_assert(std::is_floating_point<T>::value, "must be floating point type");
794     if (x < T{}) { // negative values return nan
795         return std::numeric_limits<T>::quiet_NaN();
796     } else if (isnan(x)
797             || x == std::numeric_limits<T>::infinity()
798             || x == T{}) {
799         return x;
800     } else { // good to go.
801         return sqrt_constexpr_unchecked(x, T(1.));
802     }
803 }
804 
805 } // namespace audio_utils
806 } // namespace android
807 
808 #endif // __cplusplus
809 
810 /** \cond */
811  __BEGIN_DECLS
812 /** \endcond */
813 
814 /** Simple stats structure for low overhead statistics gathering.
815  * Designed to be accessed by C (with no functional getters).
816  * Zero initialize {} to clear or reset.
817  */
818 typedef struct {
819    int64_t n;
820    double min;
821    double max;
822    double last;
823    double mean;
824 } simple_stats_t;
825 
826 /** logs new value to the simple_stats_t */
827 static inline void simple_stats_log(simple_stats_t *stats, double value) {
828     if (++stats->n == 1) {
829         stats->min = stats->max = stats->last = stats->mean = value;
830     } else {
831         stats->last = value;
832         if (value < stats->min) {
833             stats->min = value;
834         } else if (value > stats->max) {
835             stats->max = value;
836         }
837         // Welford's algorithm for mean
838         const double delta = value - stats->mean;
839         stats->mean += delta / stats->n;
840     }
841 }
842 
843 /** dumps statistics to a string, returns the length of string excluding null termination. */
844 static inline size_t simple_stats_to_string(simple_stats_t *stats, char *buffer, size_t size) {
845     if (size == 0) {
846         return 0;
847     } else if (stats->n == 0) {
848         return snprintf(buffer, size, "none");
849     } else {
850         return snprintf(buffer, size, "(mean: %lf  min: %lf  max: %lf  last: %lf  n: %lld)",
851                 stats->mean, stats->min, stats->max, stats->last, (long long)stats->n);
852     }
853 }
854 
855 /** \cond */
856 __END_DECLS
857 /** \endcond */
858 
859 #endif // !ANDROID_AUDIO_UTILS_STATISTICS_H
860