├── .gitignore
├── License
├── ReadMe.md
├── pom.xml
└── src
├── main
└── java
│ └── com
│ └── github
│ └── ruananswer
│ ├── anomaly
│ └── DetectAnoms.java
│ ├── statistics
│ ├── OnlineNormalStatistics.java
│ └── QuickMedians.java
│ └── stl
│ ├── STLDecomposition.java
│ ├── STLResult.java
│ └── STLUtility.java
└── test
├── java
└── com
│ └── github
│ └── ruananswer
│ ├── anomaly
│ └── AnomalyDetectionTest.java
│ ├── statistics
│ └── QuickMedianTest.java
│ ├── stl
│ ├── PlotTest.java
│ ├── STLTest.java
│ ├── STLUtilityTest.java
│ └── StlPlotter.java
│ └── testUtility
│ ├── StlFilenameFilter.java
│ └── TestCommon.java
└── resources
├── bumpInDoublePick anoms.csv
├── bumpInDoublePick stl.csv
├── bumpToEarly anoms.csv
├── bumpToEarly stl.csv
├── exponentialGrow anoms.csv
├── exponentialGrow stl.csv
├── flat anoms.csv
├── flat stl.csv
├── floor anoms.csv
├── floor stl.csv
├── gc anoms.csv
├── gc stl.csv
├── growSuddenly anoms.csv
├── growSuddenly stl.csv
├── justGrow anoms.csv
├── justGrow stl.csv
├── justGrowWithError anoms.csv
├── justGrowWithError stl.csv
├── linearGrow anoms.csv
├── linearGrow stl.csv
├── linearGrowWithError anoms.csv
├── linearGrowWithError stl.csv
├── moreNoise anoms.csv
├── moreNoise stl.csv
├── plateau anoms.csv
├── plateau stl.csv
├── removeNoise anoms.csv
├── removeNoise stl.csv
├── sample-timeseries.json
├── smallChange anoms.csv
├── smallChange stl.csv
├── smallChangeOnStrictModel anoms.csv
├── smallChangeOnStrictModel stl.csv
├── speark anoms.csv
├── speark stl.csv
├── stopSuddenly anoms.csv
├── stopSuddenly stl.csv
├── testLoess.csv
├── thoughput anoms.csv
├── thoughput stl.csv
├── timeSeries anoms.csv
├── timeSeries stl.csv
├── tmp anoms.csv
├── tmp stl.csv
├── twitter anoms.csv
└── twitter stl.csv
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | target/
3 | *.iml
4 | .idea/
5 |
--------------------------------------------------------------------------------
/License:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/ReadMe.md:
--------------------------------------------------------------------------------
1 | Twitter-anomalyDetection-java
2 | =============================
3 |
4 | A Java implementation of [twitter anomaly detection](https://github.com/twitter/AnomalyDetection).
5 | But optimized the time complexity from o(n^2) to o(nlogn).
6 |
7 | References
8 | =============================
9 | [1. Vallis, O., Hochenbaum, J. and Kejariwal, A., (2014) “A Novel Technique for Long-Term Anomaly Detection in the Cloud”,
10 | 6th USENIX Workshop on Hot Topics in Cloud Computing, Philadelphia, PA.]
11 | (https://www.usenix.org/system/files/conference/hotcloud14/hotcloud14-vallis.pdf)
12 | [2. Rosner, B., (May 1983), “Percentage Points for a Generalized ESD Many-Outlier Procedure”, Technometrics, 25(2), pp. 165-172.]
13 | [3. STL: A Seasonal-Trend Decomposition Procedure Based on Loess](http://www.wessa.net/download/stl.pdf)
14 |
15 | More
16 | ============================
17 | - STL-java reference (https://github.com/brandtg/stl-java), but we implements the stl in java as [stlplus](https://github.com/hafen/stlplus) described, faster and can handle NA values (some data used stl-java will throw some exception).
18 | - Twitter-anomalyDetection (https://github.com/twitter/AnomalyDetection), we optimize the algorithm from o(n^2) to o(nlogn)
19 | - STL Test and Anomaly Detection Test is from (https://anomaly.io/anomaly-detection-twitter-r/):
20 | - use R stl decompose as stl test benchmark.
21 | - use R twitter anomaly detection as anomaly detection test benchmark
22 | - this lib performs as well as twitter. And it can even find the anomalies those could not be detected by Twitter-anomalyDetection!
23 |
24 | For more information please read code, the ReadMe will update later!
25 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | com.github.ruananswer.stl
8 | stl-java
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 |
14 |
15 | org.apache.maven.plugins
16 | maven-compiler-plugin
17 |
18 | 1.8
19 | 1.8
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 | org.apache.commons
31 | commons-math3
32 | 3.6
33 |
34 |
35 | com.fasterxml.jackson.core
36 | jackson-core
37 | 2.5.1
38 | test
39 |
40 |
41 | com.fasterxml.jackson.core
42 | jackson-databind
43 | 2.5.1
44 | test
45 |
46 |
47 | org.testng
48 | testng
49 | 6.8.7
50 | test
51 |
52 |
53 | org.jfree
54 | jfreechart
55 | 1.0.19
56 | test
57 |
58 |
59 | org.apache.commons
60 | commons-io
61 | 1.3.2
62 | test
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/src/main/java/com/github/ruananswer/anomaly/DetectAnoms.java:
--------------------------------------------------------------------------------
1 | package com.github.ruananswer.anomaly;
2 |
3 |
4 | import com.github.ruananswer.statistics.OnlineNormalStatistics;
5 | import com.github.ruananswer.statistics.QuickMedians;
6 | import com.github.ruananswer.stl.STLDecomposition;
7 | import com.github.ruananswer.stl.STLResult;
8 | import org.apache.commons.math3.distribution.TDistribution;
9 |
10 | import java.util.ArrayList;
11 | import java.util.Comparator;
12 |
13 | /**
14 | * Implementation of the underlying algorithm – referred to as Seasonal Hybrid ESD (S-H-ESD) builds upon the Generalized ESD test for detecting anomalies.
15 | * Created by on 16-4-6.
16 | */
17 | public class DetectAnoms {
18 | private final Config config;
19 |
20 | public DetectAnoms(Config config) {
21 | this.config = config;
22 | }
23 |
24 | /**
25 | * The main parameter of function anomalyDetection
26 | */
27 | public static class Config {
28 | /** Maximum number of anomalies that S-H-ESD will detect as a percentage of the data. */
29 | private double maxAnoms = 0.49;
30 | /** Defines the number of observations in a single period, and used during seasonal decomposition. */
31 | private int numObsPerPeriod = 1440;
32 | /** noms_threshold use the threshold to filter the anoms. such as if anoms_threshold = 1.05,
33 | #' then we will filter the anoms that exceed the exceptional critical value 100%-105% */
34 | private double anomsThreshold = 1.05;
35 | /** The level of statistical significance with which to accept or reject anomalies. */
36 | private double alpha = 0.05;
37 |
38 | public Config() {}
39 |
40 | public double getMaxAnoms() {
41 | return maxAnoms;
42 | }
43 |
44 | public int getNumObsPerPeriod() {
45 | return numObsPerPeriod;
46 | }
47 |
48 | public double getAnomsThreshold() {
49 | return anomsThreshold;
50 | }
51 |
52 | public double getAlpha() {
53 | return alpha;
54 | }
55 |
56 | public void setMaxAnoms(double maxAnoms) {
57 | this.maxAnoms = maxAnoms;
58 | }
59 |
60 | public void setNumObsPerPeriod(int numObsPerPeriod) {
61 | this.numObsPerPeriod = numObsPerPeriod;
62 | }
63 |
64 | public void setAnomsThreshold(double anomsThreshold) {
65 | this.anomsThreshold = anomsThreshold;
66 | }
67 |
68 | public void setAlpha(double alpha) {
69 | this.alpha = alpha;
70 | }
71 | }
72 |
73 | /**
74 | * The detected anomalies in a time series using S-H-ESD
75 | * A list containing the anomalies (anoms) and decomposition components (stl).
76 | */
77 | public class ANOMSResult {
78 | private final long[] anomsIndex;
79 | private final double[] anomsScore;
80 | private final double[] dataDecomp;
81 |
82 | private ANOMSResult(long[] anomsIdx, double[] anomsSc, double[] dataDe) {
83 | this.anomsIndex = anomsIdx;
84 | this.anomsScore = anomsSc;
85 | this.dataDecomp = dataDe;
86 | }
87 |
88 | public long[] getAnomsIndex() {
89 | return anomsIndex;
90 | }
91 |
92 | public double[] getAnomsScore() {
93 | return anomsScore;
94 | }
95 |
96 | public double[] getDataDecomp() {
97 | return dataDecomp;
98 | }
99 | }
100 |
101 | /** Args:
102 | #' series: Time series to perform anomaly detection on.
103 | #' max_Anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the data.
104 | #' alpha: The level of statistical significance with which to accept or reject anomalies.
105 | #' num_obs_per_period: Defines the number of observations in a single period, and used during seasonal decomposition.
106 | #' Returns:
107 | #' A list containing the anomalies (anoms) and decomposition components (stl).
108 | */
109 | private ANOMSResult detectAnoms(long[] timestamps, double[] series) {
110 | if (series == null || series.length < 1) {
111 | throw new IllegalArgumentException("must supply period length for time series decomposition");
112 | }
113 | int numberOfObservations = series.length;
114 | /**
115 | * use StlDec function
116 | * first interpolation to solve problem data to much
117 | */
118 | // -- Step 1: Decompose data. This returns a univarite remainder which will be used for anomaly detection. Optionally, we might NOT decompose.
119 | STLResult data = removeSeasonality(timestamps, series, config.getNumObsPerPeriod());
120 | double[] data_trend = data.getTrend();
121 | double[] data_seasonal = data.getSeasonal();
122 |
123 | // Mean mean = new Mean();
124 | // Variance variance = new Variance();
125 | // Median median = new Median();
126 |
127 | // Remove the seasonal component, and the median of the data to create the univariate remainder
128 | double[] dataForSHESD = new double[numberOfObservations];
129 | double[] dataDecomp = new double[numberOfObservations];
130 |
131 | QuickMedians quickMedian = new QuickMedians(series);
132 | double medianOfSeries = quickMedian.getMedian();//median.evaluate(series);
133 |
134 | // if the data of has no seasonality, directed use the raw_data into function S-H-ESD !!!
135 | for (int i = 0; i < numberOfObservations; ++i) {
136 | dataForSHESD[i] = series[i] - data_seasonal[i] - medianOfSeries;
137 | dataDecomp[i] = data_trend[i] + data_seasonal[i];
138 | }
139 | // Maximum number of outliers that S-H-ESD can detect (e.g. 49% of data)
140 | int maxOutliers = (int)Math.round(numberOfObservations * config.getMaxAnoms());
141 | if (maxOutliers == 0)
142 | throw new IllegalArgumentException("You have " + numberOfObservations + " observations in a period, which is too few. Set a higher value");
143 |
144 | long[] anomsIdx = new long[maxOutliers];
145 | double[] anomsSc = new double[maxOutliers];
146 | int numAnoms = 0;
147 |
148 | OnlineNormalStatistics stat = new OnlineNormalStatistics(dataForSHESD);
149 | QuickMedians quickMedian1 = new QuickMedians(dataForSHESD);
150 | double dataMean = stat.getMean();//mean.evaluate(dataForSHESD);
151 | double dataMedian = quickMedian1.getMedian();//median.evaluate(dataForSHESD);
152 | // use mad replace the variance
153 | // double dataStd = Math.sqrt(stat.getPopulationVariance());//Math.sqrt(variance.evaluate(dataForSHESD));
154 | double[] tempDataForMad = new double[numberOfObservations];
155 | for (int i = 0; i < numberOfObservations; ++i) {
156 | tempDataForMad[i] = Math.abs(dataForSHESD[i] - dataMedian);
157 | }
158 | QuickMedians quickMedian2 = new QuickMedians(tempDataForMad);
159 | double dataStd = quickMedian2.getMedian();
160 |
161 | if (Math.abs(dataStd) <= 1e-10) {
162 | //return null;
163 | throw new IllegalArgumentException("The variance of the series data is zero");
164 | }
165 |
166 | double[] ares = new double[numberOfObservations];
167 | for (int i = 0; i < numberOfObservations; ++i) {
168 | ares[i] = Math.abs(dataForSHESD[i] - dataMedian);
169 | ares[i] /= dataStd;
170 | }
171 |
172 | // here use std for the following iterative calculate datastd
173 | dataStd = Math.sqrt(stat.getPopulationVariance());
174 |
175 | int[] aresOrder = getOrder(ares);
176 | int medianIndex = numberOfObservations / 2;
177 | int left = 0, right = numberOfObservations - 1;
178 | int currentLen = numberOfObservations, tempMaxIdx = 0;
179 | double R = 0.0, p = 0.0;
180 | for (int outlierIdx = 1; outlierIdx <= maxOutliers; ++outlierIdx) {
181 | p = 1.0 - config.getAlpha() / (2 * (numberOfObservations - outlierIdx + 1));
182 | TDistribution tDistribution = new TDistribution(numberOfObservations - outlierIdx - 1);
183 | double t = tDistribution.inverseCumulativeProbability(p);
184 | double lambdaCritical = t * (numberOfObservations - outlierIdx) / Math.sqrt((numberOfObservations
185 | - outlierIdx - 1 + t * t) * (numberOfObservations - outlierIdx + 1));
186 | if (left >= right) break;
187 | if (currentLen < 1) break;
188 |
189 | // remove the largest
190 | if (Math.abs(dataForSHESD[aresOrder[left]] - dataMedian) > Math.abs(dataForSHESD[aresOrder[right]] - dataMedian)) {
191 | tempMaxIdx = aresOrder[left];
192 | ++left;
193 | ++medianIndex;
194 | } else {
195 | tempMaxIdx = aresOrder[right];
196 | --right;
197 | --medianIndex;
198 | }
199 | // get the R
200 | R = Math.abs((dataForSHESD[tempMaxIdx] - dataMedian) / dataStd);
201 | // recalculate the dataMean and dataStd
202 | dataStd = Math.sqrt(((currentLen - 1) * (dataStd * dataStd + dataMean * dataMean) - dataForSHESD[tempMaxIdx] * dataForSHESD[tempMaxIdx] -
203 | ((currentLen - 1) * dataMean - dataForSHESD[tempMaxIdx]) * ((currentLen - 1) * dataMean - dataForSHESD[tempMaxIdx]) /
204 | (currentLen - 2)) / (currentLen - 2));
205 | dataMean = (dataMean * currentLen - dataForSHESD[tempMaxIdx]) / (currentLen - 1);
206 | dataMedian = dataForSHESD[aresOrder[medianIndex]];
207 | --currentLen;
208 |
209 | // record the index
210 | anomsIdx[outlierIdx - 1] = tempMaxIdx;
211 | anomsSc[outlierIdx - 1] = R;
212 | if (R < lambdaCritical * config.getAnomsThreshold() || Double.isNaN(dataStd) || Math.abs(dataStd) <= 1e-10) {
213 | break;
214 | }
215 | numAnoms = outlierIdx;
216 | }
217 | if (numAnoms > 0) {
218 | ArrayList map = new ArrayList();
219 | for (int i = 0; i < numAnoms; ++i) {
220 | map.add(new Pair((int)anomsIdx[i], anomsSc[i]));
221 | }
222 | map.sort(new PairKeyComparator());
223 | long[] idx = new long[numAnoms];
224 | double[] anoms = new double[numAnoms];
225 | for (int i = 0; i < numAnoms; ++i) {
226 | idx[i] = map.get(i).key;
227 | anoms[i] = map.get(i).value;
228 | }
229 | return new ANOMSResult(idx, anoms, dataDecomp);
230 | }
231 | else
232 | return null;
233 | }
234 |
235 | /**
236 | #' @name AnomalyDetectionTs
237 | #' @param timestamps & series Time series as a two column data frame where the first column consists of the
238 | #' timestamps and the second column consists of the observations.
239 | #' @param max_anoms Maximum number of anomalies that S-H-ESD will detect as a percentage of the
240 | #' data.
241 | #' @param numObsPerPeriod the numbers point in one period
242 | #' @param anoms_threshold use the threshold to filter the anoms. such as if anoms_threshold = 1.05,
243 | #' then we will filter the anoms that exceed the exceptional critical value 100%-105%
244 | #' @param alpha The level of statistical significance with which to accept or reject anomalies.
245 | */
246 | public ANOMSResult anomalyDetection(long[] timestamps, double[] series) {
247 | // Sanity check all input parameters
248 | if (timestamps == null || timestamps.length < 1 || series == null || series.length < 1 || timestamps.length != series.length)
249 | throw new IllegalArgumentException("The data is empty or has no equal length.");
250 | if (config.getMaxAnoms() > 0.49) {
251 | throw new IllegalArgumentException("max_anoms must be less than 50% of the data points.");
252 | } else if (config.getMaxAnoms() <= 0) {
253 | throw new IllegalArgumentException("max_anoms must be positive.");
254 | }
255 | /**
256 | * Main analysis: perform S-H-ESD
257 | */
258 | int numberOfObservations = series.length;
259 | if (config.getMaxAnoms() < 1.0 / numberOfObservations)
260 | config.setMaxAnoms(1.0 / numberOfObservations);
261 | removeMissingValuesByAveragingNeighbors(series);
262 |
263 | return detectAnoms(timestamps, series);
264 | }
265 |
266 | private STLResult removeSeasonality(long[] timestamps, double[] series, int seasonality) {
267 | STLDecomposition.Config config = new STLDecomposition.Config();
268 | config.setNumObsPerPeriod(seasonality);
269 | config.setNumberOfDataPoints(timestamps.length);
270 | // if robust
271 | config.setNumberOfInnerLoopPasses(1);
272 | config.setNumberOfRobustnessIterations(15);
273 |
274 | STLDecomposition stl = new STLDecomposition(config);
275 | STLResult res = stl.decompose(timestamps, series);
276 |
277 | return res;
278 | }
279 |
280 | public static void removeMissingValuesByAveragingNeighbors(double[] arr) {
281 | for (int i = 0; i < arr.length; i++) {
282 | if (Double.isNaN(arr[i])) {
283 | double sum = 0.0;
284 | int count = 0;
285 | if (i - 1 >= 0 && !Double.isNaN(arr[i - 1])) {
286 | sum += arr[i - 1];
287 | count++;
288 | }
289 | if (i + 1 < arr.length && !Double.isNaN(arr[i + 1])) {
290 | sum += arr[i + 1];
291 | count++;
292 | }
293 | if (count != 0)
294 | arr[i] = sum / count;
295 | else
296 | arr[i] = 0.0;
297 | }
298 | }
299 | }
300 |
301 | class Pair {
302 | int key;
303 | double value;
304 | public Pair(int k, double v) {
305 | key = k;
306 | value = v;
307 | }
308 | }
309 |
310 | class PairKeyComparator implements Comparator {
311 | public int compare(Pair a, Pair b) {
312 | if (a.key != b.key)
313 | return a.key - b.key;
314 | return (a.value - b.value) > 0.0 ? 1 : -1;
315 | }
316 | }
317 |
318 | class PairValueComparator implements Comparator {
319 | public int compare(Pair a, Pair b) {
320 | if (a.value != b.value) {
321 | return (a.value - b.value) > 0 ? -1 : 1;
322 | }
323 | return b.key - a.key;
324 | }
325 | }
326 |
327 | private int[] getOrder(double[] data) {
328 | if (data == null || data.length < 1)
329 | return null;
330 | int len = data.length;
331 | ArrayList map = new ArrayList();
332 | for (int i = 0; i < len; ++i) {
333 | map.add(new Pair(i, data[i]));
334 | }
335 | map.sort(new PairValueComparator());
336 | int[] returnOrder = new int[len];
337 | for (int i = 0; i < len; ++i) {
338 | returnOrder[i] = map.get(i).key;
339 | }
340 | return returnOrder;
341 | }
342 | }
343 |
--------------------------------------------------------------------------------
/src/main/java/com/github/ruananswer/statistics/OnlineNormalStatistics.java:
--------------------------------------------------------------------------------
1 | package com.github.ruananswer.statistics;
2 |
3 | /**
4 | * Simple, fast, online normal statistics object using Welford's algorithm.
5 | */
6 | public class OnlineNormalStatistics {
7 |
8 | private int _n = 0;
9 | private double _mean = 0;
10 | private double _sumSqDiff = 0;
11 |
12 | public OnlineNormalStatistics() {
13 | // do nothing
14 | }
15 |
16 | public OnlineNormalStatistics(double[] initialValues) {
17 | for (double d : initialValues) {
18 | addValue(d);
19 | }
20 | }
21 |
22 | public void addValue(double value) {
23 | if (Double.isNaN(value))
24 | return;
25 | double old_mean = _mean;
26 | _n++;
27 | _mean += (value - old_mean) / _n;
28 | _sumSqDiff += (value - _mean) * (value - old_mean);
29 | }
30 |
31 | public int getN() {
32 | return _n;
33 | }
34 |
35 | public double getMean() {
36 | return (_n > 0) ? _mean : Double.NaN;
37 | }
38 |
39 | public double getSumSqDev() {
40 | return (_n > 0) ? _sumSqDiff : Double.NaN;
41 | }
42 |
43 | public double getVariance() {
44 | return (_n > 1) ? _sumSqDiff / (_n - 1) : Double.NaN;
45 | }
46 |
47 | public double getPopulationVariance() {
48 | return (_n > 0) ? _sumSqDiff / _n : Double.NaN;
49 | }
50 |
51 | public double getStandardScore(double value) {
52 | return (value - _mean) / Math.sqrt(getVariance());
53 | }
54 |
55 | public void set_n(int _n) {
56 | this._n = _n;
57 | }
58 |
59 | public void set_mean(double _mean) {
60 | this._mean = _mean;
61 | }
62 |
63 | public void set_sumSqDiff(double _sumSqDiff) {
64 | this._sumSqDiff = _sumSqDiff;
65 | }
66 |
67 | public double getSumOfSq() {
68 | return _sumSqDiff + _n * _mean * _mean;
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/com/github/ruananswer/statistics/QuickMedians.java:
--------------------------------------------------------------------------------
1 | package com.github.ruananswer.statistics;
2 |
3 | import java.util.Arrays;
4 |
5 | /**
6 | * Created by on 17-2-28.
7 | */
8 | public class QuickMedians {
9 | private double[] values;
10 | private double _median = 0;
11 | private int _n = 0;
12 |
13 | public QuickMedians(double[] initialValues) {
14 | _n = initialValues.length;
15 | values = Arrays.copyOf(initialValues, _n);
16 | if (_n > 0) {
17 | if (_n % 2 == 0)
18 | _median = (quickSelectK(_n / 2 - 1) + quickSelectK(_n / 2)) / 2.0;
19 | else
20 | _median = quickSelectK(_n / 2);
21 | }
22 | else
23 | _median = Double.NaN;
24 | }
25 |
26 | public int partition(int low, int high) {
27 | int l = low, r = high, i = low - 1;
28 | double x = values[high];
29 | for (int j = l; j < r; ++j) {
30 | if (values[j] <= x) {
31 | ++i;
32 | swap(i, j);
33 | }
34 | }
35 | swap(i + 1, r);
36 | return i + 1;
37 | }
38 |
39 | private void swap(int i, int j) {
40 | double tmp = values[i];
41 | values[i] = values[j];
42 | values[j] = tmp;
43 | }
44 |
45 | public double quickSelectK(int k) {
46 | int l = 0, r = _n - 1, idx = 0, len = 0;
47 | while (l < r) {
48 | idx = partition(l, r);
49 | len = idx - l + 1;
50 | if (len == k + 1)
51 | return values[idx];
52 | else if (len < k + 1) {
53 | k = k - len;
54 | l = idx + 1;
55 | }
56 | else
57 | r = idx - 1;
58 | }
59 | return values[l];
60 | }
61 |
62 | public double getMedian() { return +_median; }
63 | }
64 |
--------------------------------------------------------------------------------
/src/main/java/com/github/ruananswer/stl/STLDecomposition.java:
--------------------------------------------------------------------------------
1 | package com.github.ruananswer.stl;
2 |
3 | import java.util.ArrayList;
4 |
5 | /**
6 | * Implementation of STL: A Seasonal-Trend Decomposition Procedure based on Loess.
7 | *
8 | * Robert B. Cleveland et al., "STL: A Seasonal-Trend Decomposition Procedure based on Loess,"
9 | * in Journal of Official Statistics Vol. 6 No. 1, 1990, pp. 3-73
10 | *
11 | *
12 | * Hafen, R. P. "Local regression models: Advancements, applications, and new methods." (2010).
13 | *
14 | * Created by on 16-4-14.
15 | */
16 | public class STLDecomposition {
17 | private final Config config;
18 |
19 | public STLDecomposition(Config config) {
20 | config.check();
21 | this.config = config;
22 | }
23 |
24 | public static class Config {
25 | /** The number of observations in each cycle of the seasonal component, n_p */
26 | private int numObsPerPeriod = -1;
27 | /** s.window either the character string \code{"periodic"} or the span (in lags) of the loess window for seasonal extraction,
28 | * which should be odd. This has no default. */
29 | private int sWindow = -1;
30 | /** s.degree degree of locally-fitted polynomial in seasonal extraction. Should be 0, 1, or 2. */
31 | private int sDegree = 1;
32 | /** t.window the span (in lags) of the loess window for trend extraction, which should be odd.
33 | * If \code{NULL}, the default, \code{nextodd(ceiling((1.5*period) / (1-(1.5/s.window))))}, is taken.*/
34 | private int tWindow = -1;
35 | /** t.degree degree of locally-fitted polynomial in trend extraction. Should be 0, 1, or 2. */
36 | private int tDegree = 1;
37 | /** l.window the span (in lags) of the loess window of the low-pass filter used for each subseries.
38 | * Defaults to the smallest odd integer greater than or equal to \code{n.p}
39 | * which is recommended since it prevents competition between the trend and seasonal components.
40 | * If not an odd integer its given value is increased to the next odd one.*/
41 | private int lWindow = -1;
42 | /** l.degree degree of locally-fitted polynomial for the subseries low-pass filter. Should be 0, 1, or 2. */
43 | private int lDegree = 1;
44 | /** s.jump s.jump,t.jump,l.jump,fc.jump integers at least one to increase speed of the respective smoother.
45 | * Linear interpolation happens between every \code{*.jump}th value. */
46 | private int sJump = -1;
47 | /** t.jump */
48 | private int tJump = -1;
49 | /** l.jump */
50 | private int lJump = -1;
51 | /** critfreq the critical frequency to use for automatic calculation of smoothing windows for the trend and high-pass filter. */
52 | private double critFreq = 0.05;
53 | /** The number of passes through the inner loop, n_i */
54 | private int numberOfInnerLoopPasses = 2;
55 | /** The number of robustness iterations of the outer loop, n_o */
56 | private int numberOfRobustnessIterations = 1;
57 | /** sub.labels optional vector of length n.p that contains the labels of the subseries in their natural order (such as month name, day of week, etc.),
58 | * used for strip labels when plotting. All entries must be unique. */
59 | private int[] subLabels = null;
60 | /** the number of series to decompose */
61 | private int numberOfDataPoints = -1;
62 |
63 | public int getNumObsPerPeriod() {
64 | return numObsPerPeriod;
65 | }
66 |
67 | public void setNumObsPerPeriod(int numObsPerPeriod) {
68 | this.numObsPerPeriod = numObsPerPeriod;
69 | }
70 |
71 | public int getsWindow() {
72 | return sWindow;
73 | }
74 |
75 | public void setsWindow(int sWindow) {
76 | this.sWindow = sWindow;
77 | }
78 |
79 | public int getsDegree() {
80 | return sDegree;
81 | }
82 |
83 | public void setsDegree(int sDegree) {
84 | this.sDegree = sDegree;
85 | }
86 |
87 | public int gettWindow() {
88 | return tWindow;
89 | }
90 |
91 | public void settWindow(int tWindow) {
92 | this.tWindow = tWindow;
93 | }
94 |
95 | public int gettDegree() {
96 | return tDegree;
97 | }
98 |
99 | public void settDegree(int tDegree) {
100 | this.tDegree = tDegree;
101 | }
102 |
103 | public int getlWindow() {
104 | return lWindow;
105 | }
106 |
107 | public void setlWindow(int lWindow) {
108 | this.lWindow = lWindow;
109 | }
110 |
111 | public int getlDegree() {
112 | return lDegree;
113 | }
114 |
115 | public void setlDegree(int lDegree) {
116 | this.lDegree = lDegree;
117 | }
118 |
119 | public int getsJump() {
120 | return sJump;
121 | }
122 |
123 | public void setsJump(int sJump) {
124 | this.sJump = sJump;
125 | }
126 |
127 | public int gettJump() {
128 | return tJump;
129 | }
130 |
131 | public void settJump(int tJump) {
132 | this.tJump = tJump;
133 | }
134 |
135 | public int getlJump() {
136 | return lJump;
137 | }
138 |
139 | public void setlJump(int lJump) {
140 | this.lJump = lJump;
141 | }
142 |
143 | public double getCritFreq() {
144 | return critFreq;
145 | }
146 |
147 | public void setCritFreq(double critFreq) {
148 | this.critFreq = critFreq;
149 | }
150 |
151 | public int getNumberOfInnerLoopPasses() {
152 | return numberOfInnerLoopPasses;
153 | }
154 |
155 | public void setNumberOfInnerLoopPasses(int numberOfInnerLoopPasses) {
156 | this.numberOfInnerLoopPasses = numberOfInnerLoopPasses;
157 | }
158 |
159 | public int getNumberOfRobustnessIterations() {
160 | return numberOfRobustnessIterations;
161 | }
162 |
163 | public void setNumberOfRobustnessIterations(int numberOfRobustnessIterations) {
164 | this.numberOfRobustnessIterations = numberOfRobustnessIterations;
165 | }
166 |
167 | public int[] getSubLabels() {
168 | return subLabels;
169 | }
170 |
171 | public void setSubLabels(int[] subLabels) {
172 | this.subLabels = subLabels;
173 | }
174 |
175 | public int getNumberOfDataPoints() {
176 | return numberOfDataPoints;
177 | }
178 |
179 | public void setNumberOfDataPoints(int numberOfDataPoints) {
180 | this.numberOfDataPoints = numberOfDataPoints;
181 | }
182 |
183 | public Config() {
184 | }
185 |
186 | public void check() {
187 | checkPeriodicity(numObsPerPeriod, numberOfDataPoints);
188 | }
189 |
190 | private boolean checkPeriodicity(int numObsPerPeriod, int numberOfDataPoints) {
191 | if (numObsPerPeriod == -1)
192 | throw new IllegalArgumentException("Must specify periodicity of seasonal");
193 | if (numObsPerPeriod < 4) {
194 | throw new IllegalArgumentException("Periodicity (numObsPerPeriod) must be >= 4");
195 | }
196 | if (numberOfDataPoints <= 2 * numObsPerPeriod) {
197 | throw new IllegalArgumentException(
198 | "numberOfDataPoints(total length) must contain at least 2 * Periodicity (numObsPerPeriod) points");
199 | }
200 | return true;
201 | }
202 | }
203 |
204 | /**
205 | * Decompose a time series into seasonal, trend and irregular components using \code{loess}, acronym STL.
206 | * A new implementation of STL. Allows for NA values, local quadratic smoothing, post-trend smoothing, and endpoint blending.
207 | * The usage is very similar to that of R's built-in \code{stl()}.
208 | * */
209 | public STLResult decompose(long[] times, double[] series) {
210 | if (times == null || series == null || times.length != series.length)
211 | throw new IllegalArgumentException("times must be same length as time series");
212 | int n = series.length;
213 | int numObsPerPeriod = config.getNumObsPerPeriod();
214 | double[] trend = new double[n];
215 | double[] seasonal = new double[n];
216 | double[] remainder = new double[n];
217 |
218 | for (int i = 0; i < n; i++) {
219 | trend[i] = 0.0;
220 | seasonal[i] = 0.0;
221 | remainder[i] = 0.0;
222 | }
223 |
224 | if (config.getlWindow() == -1)
225 | config.setlWindow(STLUtility.nextOdd(config.getNumObsPerPeriod()));
226 | else
227 | config.setlWindow(STLUtility.nextOdd(config.getlWindow()));
228 |
229 |
230 | if (config.getSubLabels() == null) {
231 | int[] idx = new int[n];
232 | for (int i = 0; i < n; ++i)
233 | idx[i] = i % numObsPerPeriod + 1 ;
234 | config.setSubLabels(idx);
235 | }
236 |
237 | config.setsWindow(10 * n + 1);
238 | config.setsDegree(0);
239 | config.setsJump((int)Math.ceil(config.getsWindow() / 10.0));
240 |
241 | if (config.gettWindow() == -1) {
242 | /** Or use t.window <- nextodd(ceiling(1.5 * n.p/(1 - 1.5 / s.window))) */
243 | config.settWindow(STLUtility.getTWindow(config.gettDegree(), config.getsDegree(), config.getsWindow(), numObsPerPeriod, config.getCritFreq()));
244 | }
245 |
246 | if (config.getsJump() == -1) config.setsJump((int)Math.ceil((double)config.getsWindow() / 10.0));
247 | if (config.gettJump() == -1) config.settJump((int)Math.ceil((double)config.gettWindow() / 10.0));
248 | if (config.getlJump() == -1) config.setlJump((int)Math.ceil((double)config.getlWindow() / 10.0));
249 |
250 | /** start and end indices for after adding in extra n.p before and after */
251 | int startIdx = numObsPerPeriod, endIdx = n - 1 + numObsPerPeriod;
252 |
253 | /** cycleSubIndices will keep track of what part of the
254 | # seasonal each observation belongs to */
255 | int[] cycleSubIndices = new int[n];
256 | double[] weight = new double[n];
257 | for (int i = 0; i < n; ++i) {
258 | cycleSubIndices[i] = i % numObsPerPeriod + 1;
259 | weight[i] = 1.0;
260 | }
261 | // subLabels !!
262 | int lenC = n + 2 * numObsPerPeriod;
263 | double[] C = new double[lenC];
264 | double[] D = new double[n];
265 | double[] detrend = new double[n];
266 | int tempSize = (int)Math.ceil((double)n / (double)numObsPerPeriod) / 2;
267 | ArrayList cycleSub = new ArrayList(tempSize), subWeights = new ArrayList(tempSize);
268 | int[] cs1 = new int[numObsPerPeriod], cs2 = new int[numObsPerPeriod];
269 | for (int i = 0; i < numObsPerPeriod; ++i) {
270 | cs1[i] = cycleSubIndices[i];
271 | cs2[i] = cycleSubIndices[n - numObsPerPeriod + i];
272 | }
273 |
274 | double[] ma3, L = new double[n];
275 | int ljump = config.getlJump(), tjump = config.gettJump();
276 | int lenLev = (int)Math.ceil((double)n / (double)ljump), lenTev = (int)Math.ceil((double)n / (double)tjump);
277 | int[] lEv = new int[lenLev], tEv = new int[lenTev];
278 | double weightMeanAns = 0.0;
279 |
280 | for (int oIter = 1; oIter <= config.getNumberOfRobustnessIterations(); ++oIter) {
281 | for (int iIter = 1; iIter <= config.getNumberOfInnerLoopPasses(); ++iIter) {
282 | /** Step 1: detrending */
283 | for (int i = 0; i < n; ++i)
284 | detrend[i] = series[i] - trend[i];
285 |
286 | /** Step 2: smoothing of cycle-subseries */
287 | for (int i = 0; i < numObsPerPeriod; ++i) {
288 | cycleSub.clear(); subWeights.clear();
289 | for (int j = i; j < n; j += numObsPerPeriod) {
290 | if (cycleSubIndices[j] == i + 1) {
291 | cycleSub.add(detrend[j]);
292 | subWeights.add(weight[j]);
293 | }
294 | }
295 | /**
296 | C[c(cs1, cycleSubIndices, cs2) == i] <- rep(weighted.mean(cycleSub,
297 | w = w[cycleSubIndices == i], na.rm = TRUE), cycleSub.length + 2)
298 | */
299 | weightMeanAns = weightMean(cycleSub, subWeights);
300 | for (int j = i; j < numObsPerPeriod; j += numObsPerPeriod)
301 | if (cs1[j] == i + 1)
302 | C[j] = weightMeanAns;
303 | for (int j = i; j < n; j += numObsPerPeriod)
304 | if (cycleSubIndices[j] == i + 1)
305 | C[j + numObsPerPeriod] = weightMeanAns;
306 | for (int j = 0; j < numObsPerPeriod; ++j)
307 | if (cs2[j] == i + 1)
308 | C[j + numObsPerPeriod + n] = weightMeanAns;
309 | }
310 |
311 | /** Step 3: Low-pass filtering of collection of all the cycle-subseries
312 | # moving averages*/
313 | ma3 = STLUtility.cMa(C, numObsPerPeriod);
314 |
315 | for (int i = 0, j = 0; i < lenLev; ++i, j += ljump)
316 | lEv[i] = j + 1;
317 | if (lEv[lenLev - 1] != n) {
318 | int[] tempLev = new int[lenLev + 1];
319 | System.arraycopy(lEv, 0, tempLev, 0, lenLev);
320 | tempLev[lenLev] = n;
321 | L = STLUtility.loessSTL(null, ma3, config.getlWindow(), config.getlDegree(), tempLev, weight, config.getlJump());
322 | } else {
323 | L = STLUtility.loessSTL(null, ma3, config.getlWindow(), config.getlDegree(), lEv, weight, config.getlJump());
324 | }
325 |
326 | /** Step 4: Detrend smoothed cycle-subseries */
327 | /** Step 5: Deseasonalize */
328 | for (int i = 0; i < n; ++i) {
329 | seasonal[i] = C[startIdx + i] - L[i];
330 | D[i] = series[i] - seasonal[i];
331 | }
332 |
333 | /** Step 6: Trend Smoothing */
334 | for (int i = 0, j = 0; i < lenTev; ++i, j += tjump)
335 | tEv[i] = j + 1;
336 | if (tEv[lenTev - 1] != n) {
337 | int[] tempTev = new int[lenTev + 1];
338 | System.arraycopy(tEv, 0, tempTev, 0, lenTev);
339 | tempTev[lenTev] = n;
340 | trend = STLUtility.loessSTL(null, D, config.gettWindow(), config.gettDegree(), tempTev, weight, config.gettJump());
341 | } else {
342 | trend = STLUtility.loessSTL(null, D, config.gettWindow(), config.gettDegree(), tEv, weight, config.gettJump());
343 | }
344 | }
345 |
346 | }
347 | // Calculate remainder
348 | for (int i = 0; i < n; i++) {
349 | remainder[i] = series[i] - trend[i] - seasonal[i];
350 | }
351 | return new STLResult(trend, seasonal, remainder);
352 | }
353 |
354 | private double weightMean(ArrayList x, ArrayList w) {
355 | double sum = 0.0, sumW = 0.0;
356 | int len = x.size();
357 | for (int i = 0; i < len; ++i) {
358 | if (!Double.isNaN(x.get(i))) {
359 | sum += (x.get(i) * w.get(i));
360 | sumW += w.get(i);
361 | }
362 | }
363 | return sum / sumW;
364 | }
365 | }
366 |
--------------------------------------------------------------------------------
/src/main/java/com/github/ruananswer/stl/STLResult.java:
--------------------------------------------------------------------------------
1 | package com.github.ruananswer.stl;
2 |
3 | /**
4 | * The STL decomposition of a time series.
5 | *