├── .github
└── workflows
│ └── test.yml
├── .gitignore
├── .pylintrc
├── .readthedocs.yaml
├── CITATION.cff
├── LICENSE.txt
├── README.md
├── docs
├── Makefile
├── make.bat
└── source
│ ├── LICENSE.rst
│ ├── _static
│ └── logo_PyNumDiff.png
│ ├── code.rst
│ ├── conf.py
│ ├── contact.rst
│ ├── contributing.rst
│ ├── finite_difference.rst
│ ├── index.rst
│ ├── kalman_smooth.rst
│ ├── linear_model.rst
│ ├── optimize.rst
│ ├── optimize
│ ├── __finite_difference__.rst
│ ├── __kalman_smooth__.rst
│ ├── __linear_model__.rst
│ ├── __optimize__.rst
│ ├── __smooth_finite_difference__.rst
│ └── __total_variation_regularization__.rst
│ ├── smooth_finite_difference.rst
│ ├── total_variation_regularization.rst
│ ├── utils.rst
│ └── utils
│ ├── _pi_cruise_control.rst
│ ├── evaluate.rst
│ ├── simulate.rst
│ └── utility.rst
├── examples
├── 1_basic_tutorial.ipynb
├── 2a_optimizing_parameters_with_dxdt_known.ipynb
└── 2b_optimizing_parameters_with_dxdt_unknown.ipynb
├── linting.py
├── pynumdiff
├── .gitignore
├── __init__.py
├── finite_difference
│ ├── __init__.py
│ └── _finite_difference.py
├── kalman_smooth
│ ├── __init__.py
│ └── _kalman_smooth.py
├── linear_model
│ ├── __init__.py
│ └── _linear_model.py
├── optimize
│ ├── __init__.py
│ ├── __optimize__.py
│ ├── finite_difference
│ │ ├── __finite_difference__.py
│ │ └── __init__.py
│ ├── kalman_smooth
│ │ ├── __init__.py
│ │ └── __kalman_smooth__.py
│ ├── linear_model
│ │ ├── __init__.py
│ │ └── __linear_model__.py
│ ├── smooth_finite_difference
│ │ ├── __init__.py
│ │ └── __smooth_finite_difference__.py
│ └── total_variation_regularization
│ │ ├── __init__.py
│ │ └── __total_variation_regularization__.py
├── smooth_finite_difference
│ ├── __init__.py
│ └── _smooth_finite_difference.py
├── tests
│ ├── __init__.py
│ ├── test_finite_difference.py
│ ├── test_kalman_smooth.py
│ ├── test_linear_model.py
│ ├── test_optimize.py
│ ├── test_smooth_finite_difference.py
│ ├── test_total_variation_regularization.py
│ └── test_utils.py
├── total_variation_regularization
│ ├── __chartrand_tvregdiff__.py
│ ├── __init__.py
│ └── _total_variation_regularization.py
└── utils
│ ├── __init__.py
│ ├── _pi_cruise_control.py
│ ├── evaluate.py
│ ├── old_pi_cruise_control.py
│ ├── simulate.py
│ └── utility.py
└── pyproject.toml
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - master
5 | pull_request:
6 |
7 | jobs:
8 | Linux:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v4
12 | - uses: actions/setup-python@v5
13 | with:
14 | python-version: '3.x'
15 |
16 | - name: tests
17 | run: |
18 | pip install -e .[advanced,dev]
19 | pytest pynumdiff
20 |
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | develop-eggs/
12 | dist/
13 | downloads/
14 | eggs/
15 | .eggs/
16 | lib/
17 | lib64/
18 | parts/
19 | sdist/
20 | var/
21 | wheels/
22 | *.egg-info/
23 | .installed.cfg
24 | *.egg
25 | MANIFEST
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *.cover
46 | .hypothesis/
47 | .pytest_cache/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 | db.sqlite3
57 |
58 | # Flask stuff:
59 | instance/
60 | .webassets-cache
61 |
62 | # Scrapy stuff:
63 | .scrapy
64 |
65 | # PyBuilder
66 | target/
67 |
68 | # Jupyter Notebook
69 | .ipynb_checkpoints
70 |
71 | # pyenv
72 | .python-version
73 |
74 | # celery beat schedule file
75 | celerybeat-schedule
76 |
77 | # SageMath parsed files
78 | *.sage.py
79 |
80 | # Environments
81 | .env
82 | .venv
83 | env/
84 | venv/
85 | ENV/
86 | env.bak/
87 | venv.bak/
88 |
89 | # Spyder project settings
90 | .spyderproject
91 | .spyproject
92 |
93 | # Rope project settings
94 | .ropeproject
95 |
96 | # mkdocs documentation
97 | /site
98 |
99 | # mypy
100 | .mypy_cache/
101 |
102 | # add
103 | *.[oa]
104 | *~
105 | .idea
106 | .DS_Store
107 | *.nc
108 | *.csv
109 | *.mat
110 | .coverage
111 | __pycache__
112 | *.tar
113 | *.tar.gz
114 | *.zip
115 | *.egg-info
116 | dist
117 | *.whl
118 | .vscode
119 | docs/build
120 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 |
3 | # A comma-separated list of package or module names from where C extensions may
4 | # be loaded. Extensions are loading into the active Python interpreter and may
5 | # run arbitrary code.
6 | extension-pkg-allow-list=
7 |
8 | # A comma-separated list of package or module names from where C extensions may
9 | # be loaded. Extensions are loading into the active Python interpreter and may
10 | # run arbitrary code. (This is an alternative name to extension-pkg-allow-list
11 | # for backward compatibility.)
12 | extension-pkg-whitelist=
13 |
14 | # Specify a score threshold to be exceeded before program exits with error.
15 | fail-under=10.0
16 |
17 | # Files or directories to be skipped. They should be base names, not paths.
18 | ignore=CVS
19 |
20 | # Files or directories matching the regex patterns are skipped. The regex
21 | # matches against base names, not paths.
22 | ignore-patterns=
23 |
24 | # Python code to execute, usually for sys.path manipulation such as
25 | # pygtk.require().
26 | #init-hook=
27 |
28 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
29 | # number of processors available to use.
30 | jobs=1
31 |
32 | # Control the amount of potential inferred values when inferring a single
33 | # object. This can help the performance when dealing with large functions or
34 | # complex, nested conditions.
35 | limit-inference-results=100
36 |
37 | # List of plugins (as comma separated values of python module names) to load,
38 | # usually to register additional checkers.
39 | load-plugins=
40 |
41 | # Pickle collected data for later comparisons.
42 | persistent=yes
43 |
44 | # When enabled, pylint would attempt to guess common misconfiguration and emit
45 | # user-friendly hints instead of false-positive error messages.
46 | suggestion-mode=yes
47 |
48 | # Allow loading of arbitrary C extensions. Extensions are imported into the
49 | # active Python interpreter and may run arbitrary code.
50 | unsafe-load-any-extension=no
51 |
52 |
53 | [MESSAGES CONTROL]
54 |
55 | # Only show warnings with the listed confidence levels. Leave empty to show
56 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
57 | confidence=
58 |
59 | # Disable the message, report, category or checker with the given id(s). You
60 | # can either give multiple identifiers separated by comma (,) or put this
61 | # option multiple times (only on the command line, not in the configuration
62 | # file where it should appear only once). You can also use "--disable=all" to
63 | # disable everything first and then reenable specific checks. For example, if
64 | # you want to run only the similarities checker, you can use "--disable=all
65 | # --enable=similarities". If you want to run only the classes checker, but have
66 | # no Warning level messages displayed, use "--disable=all --enable=classes
67 | # --disable=W".
68 | disable=dangerous-default-value,
69 | unused-argument,
70 | unused-variable,
71 | too-few-public-methods,
72 | line-too-long,
73 | too-many-arguments,
74 | too-many-locals,
75 | duplicate-code,
76 | invalid-name,
77 | invalid-unary-operand-type,
78 | print-statement,
79 | parameter-unpacking,
80 | unpacking-in-except,
81 | old-raise-syntax,
82 | backtick,
83 | long-suffix,
84 | old-ne-operator,
85 | old-octal-literal,
86 | import-star-module-level,
87 | non-ascii-bytes-literal,
88 | raw-checker-failed,
89 | bad-inline-option,
90 | locally-disabled,
91 | file-ignored,
92 | suppressed-message,
93 | useless-suppression,
94 | deprecated-pragma,
95 | use-symbolic-message-instead,
96 | apply-builtin,
97 | basestring-builtin,
98 | buffer-builtin,
99 | cmp-builtin,
100 | coerce-builtin,
101 | execfile-builtin,
102 | file-builtin,
103 | long-builtin,
104 | raw_input-builtin,
105 | reduce-builtin,
106 | standarderror-builtin,
107 | unicode-builtin,
108 | xrange-builtin,
109 | coerce-method,
110 | delslice-method,
111 | getslice-method,
112 | setslice-method,
113 | no-absolute-import,
114 | old-division,
115 | dict-iter-method,
116 | dict-view-method,
117 | next-method-called,
118 | metaclass-assignment,
119 | indexing-exception,
120 | raising-string,
121 | reload-builtin,
122 | oct-method,
123 | hex-method,
124 | nonzero-method,
125 | cmp-method,
126 | input-builtin,
127 | round-builtin,
128 | intern-builtin,
129 | unichr-builtin,
130 | map-builtin-not-iterating,
131 | zip-builtin-not-iterating,
132 | range-builtin-not-iterating,
133 | filter-builtin-not-iterating,
134 | using-cmp-argument,
135 | eq-without-hash,
136 | div-method,
137 | idiv-method,
138 | rdiv-method,
139 | exception-message-attribute,
140 | invalid-str-codec,
141 | sys-max-int,
142 | bad-python3-import,
143 | deprecated-string-function,
144 | deprecated-str-translate-call,
145 | deprecated-itertools-function,
146 | deprecated-types-field,
147 | next-method-defined,
148 | dict-items-not-iterating,
149 | dict-keys-not-iterating,
150 | dict-values-not-iterating,
151 | deprecated-operator-function,
152 | deprecated-urllib-function,
153 | xreadlines-attribute,
154 | deprecated-sys-function,
155 | exception-escape,
156 | comprehension-escape
157 |
158 | # Enable the message, report, category or checker with the given id(s). You can
159 | # either give multiple identifier separated by comma (,) or put this option
160 | # multiple time (only on the command line, not in the configuration file where
161 | # it should appear only once). See also the "--disable" option for examples.
162 | enable=c-extension-no-member
163 |
164 |
165 | [REPORTS]
166 |
167 | # Python expression which should return a score less than or equal to 10. You
168 | # have access to the variables 'error', 'warning', 'refactor', and 'convention'
169 | # which contain the number of messages in each category, as well as 'statement'
170 | # which is the total number of statements analyzed. This score is used by the
171 | # global evaluation report (RP0004).
172 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
173 |
174 | # Template used to display messages. This is a python new-style format string
175 | # used to format the message information. See doc for all details.
176 | #msg-template=
177 |
178 | # Set the output format. Available formats are text, parseable, colorized, json
179 | # and msvs (visual studio). You can also give a reporter class, e.g.
180 | # mypackage.mymodule.MyReporterClass.
181 | output-format=text
182 |
183 | # Tells whether to display a full report or only the messages.
184 | reports=no
185 |
186 | # Activate the evaluation score.
187 | score=yes
188 |
189 |
190 | [REFACTORING]
191 |
192 | # Maximum number of nested blocks for function / method body
193 | max-nested-blocks=5
194 |
195 | # Complete name of functions that never returns. When checking for
196 | # inconsistent-return-statements if a never returning function is called then
197 | # it will be considered as an explicit return statement and no message will be
198 | # printed.
199 | never-returning-functions=sys.exit,argparse.parse_error
200 |
201 |
202 | [LOGGING]
203 |
204 | # The type of string formatting that logging methods do. `old` means using %
205 | # formatting, `new` is for `{}` formatting.
206 | logging-format-style=old
207 |
208 | # Logging modules to check that the string format arguments are in logging
209 | # function parameter format.
210 | logging-modules=logging
211 |
212 |
213 | [SPELLING]
214 |
215 | # Limits count of emitted suggestions for spelling mistakes.
216 | max-spelling-suggestions=4
217 |
218 | # Spelling dictionary name. Available dictionaries: none. To make it work,
219 | # install the 'python-enchant' package.
220 | spelling-dict=
221 |
222 | # List of comma separated words that should be considered directives if they
223 | # appear and the beginning of a comment and should not be checked.
224 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
225 |
226 | # List of comma separated words that should not be checked.
227 | spelling-ignore-words=
228 |
229 | # A path to a file that contains the private dictionary; one word per line.
230 | spelling-private-dict-file=
231 |
232 | # Tells whether to store unknown words to the private dictionary (see the
233 | # --spelling-private-dict-file option) instead of raising a message.
234 | spelling-store-unknown-words=no
235 |
236 |
237 | [MISCELLANEOUS]
238 |
239 | # List of note tags to take in consideration, separated by a comma.
240 | notes=FIXME,
241 | XXX,
242 | TODO
243 |
244 | # Regular expression of note tags to take in consideration.
245 | #notes-rgx=
246 |
247 |
248 | [TYPECHECK]
249 |
250 | # List of decorators that produce context managers, such as
251 | # contextlib.contextmanager. Add to this list to register other decorators that
252 | # produce valid context managers.
253 | contextmanager-decorators=contextlib.contextmanager
254 |
255 | # List of members which are set dynamically and missed by pylint inference
256 | # system, and so shouldn't trigger E1101 when accessed. Python regular
257 | # expressions are accepted.
258 | generated-members=
259 |
260 | # Tells whether missing members accessed in mixin class should be ignored. A
261 | # mixin class is detected if its name ends with "mixin" (case insensitive).
262 | ignore-mixin-members=yes
263 |
264 | # Tells whether to warn about missing members when the owner of the attribute
265 | # is inferred to be None.
266 | ignore-none=yes
267 |
268 | # This flag controls whether pylint should warn about no-member and similar
269 | # checks whenever an opaque object is returned when inferring. The inference
270 | # can return multiple potential results while evaluating a Python object, but
271 | # some branches might not be evaluated, which results in partial inference. In
272 | # that case, it might be useful to still emit no-member and other checks for
273 | # the rest of the inferred objects.
274 | ignore-on-opaque-inference=yes
275 |
276 | # List of class names for which member attributes should not be checked (useful
277 | # for classes with dynamically set attributes). This supports the use of
278 | # qualified names.
279 | ignored-classes=optparse.Values,thread._local,_thread._local
280 |
281 | # List of module names for which member attributes should not be checked
282 | # (useful for modules/projects where namespaces are manipulated during runtime
283 | # and thus existing member attributes cannot be deduced by static analysis). It
284 | # supports qualified module names, as well as Unix pattern matching.
285 | ignored-modules=
286 |
287 | # Show a hint with possible names when a member name was not found. The aspect
288 | # of finding the hint is based on edit distance.
289 | missing-member-hint=yes
290 |
291 | # The minimum edit distance a name should have in order to be considered a
292 | # similar match for a missing member name.
293 | missing-member-hint-distance=1
294 |
295 | # The total number of similar names that should be taken in consideration when
296 | # showing a hint for a missing member.
297 | missing-member-max-choices=1
298 |
299 | # List of decorators that change the signature of a decorated function.
300 | signature-mutators=
301 |
302 |
303 | [VARIABLES]
304 |
305 | # List of additional names supposed to be defined in builtins. Remember that
306 | # you should avoid defining new builtins when possible.
307 | additional-builtins=
308 |
309 | # Tells whether unused global variables should be treated as a violation.
310 | allow-global-unused-variables=yes
311 |
312 | # List of names allowed to shadow builtins
313 | allowed-redefined-builtins=
314 |
315 | # List of strings which can identify a callback function by name. A callback
316 | # name must start or end with one of those strings.
317 | callbacks=cb_,
318 | _cb
319 |
320 | # A regular expression matching the name of dummy variables (i.e. expected to
321 | # not be used).
322 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
323 |
324 | # Argument names that match this expression will be ignored. Default to name
325 | # with leading underscore.
326 | ignored-argument-names=_.*|^ignored_|^unused_
327 |
328 | # Tells whether we should check for unused import in __init__ files.
329 | init-import=no
330 |
331 | # List of qualified module names which can have objects that can redefine
332 | # builtins.
333 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
334 |
335 |
336 | [FORMAT]
337 |
338 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
339 | expected-line-ending-format=
340 |
341 | # Regexp for a line that is allowed to be longer than the limit.
342 | ignore-long-lines=^\s*(# )??$
343 |
344 | # Number of spaces of indent required inside a hanging or continued line.
345 | indent-after-paren=4
346 |
347 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
348 | # tab).
349 | indent-string=' '
350 |
351 | # Maximum number of characters on a single line.
352 | max-line-length=100
353 |
354 | # Maximum number of lines in a module.
355 | max-module-lines=1000
356 |
357 | # Allow the body of a class to be on the same line as the declaration if body
358 | # contains single statement.
359 | single-line-class-stmt=no
360 |
361 | # Allow the body of an if to be on the same line as the test if there is no
362 | # else.
363 | single-line-if-stmt=no
364 |
365 |
366 | [SIMILARITIES]
367 |
368 | # Ignore comments when computing similarities.
369 | ignore-comments=yes
370 |
371 | # Ignore docstrings when computing similarities.
372 | ignore-docstrings=yes
373 |
374 | # Ignore imports when computing similarities.
375 | ignore-imports=no
376 |
377 | # Minimum lines number of a similarity.
378 | min-similarity-lines=4
379 |
380 |
381 | [BASIC]
382 |
383 | # Naming style matching correct argument names.
384 | argument-naming-style=snake_case
385 |
386 | # Regular expression matching correct argument names. Overrides argument-
387 | # naming-style.
388 | #argument-rgx=
389 |
390 | # Naming style matching correct attribute names.
391 | attr-naming-style=snake_case
392 |
393 | # Regular expression matching correct attribute names. Overrides attr-naming-
394 | # style.
395 | #attr-rgx=
396 |
397 | # Bad variable names which should always be refused, separated by a comma.
398 | bad-names=foo,
399 | bar,
400 | baz,
401 | toto,
402 | tutu,
403 | tata
404 |
405 | # Bad variable names regexes, separated by a comma. If names match any regex,
406 | # they will always be refused
407 | bad-names-rgxs=
408 |
409 | # Naming style matching correct class attribute names.
410 | class-attribute-naming-style=any
411 |
412 | # Regular expression matching correct class attribute names. Overrides class-
413 | # attribute-naming-style.
414 | #class-attribute-rgx=
415 |
416 | # Naming style matching correct class constant names.
417 | class-const-naming-style=UPPER_CASE
418 |
419 | # Regular expression matching correct class constant names. Overrides class-
420 | # const-naming-style.
421 | #class-const-rgx=
422 |
423 | # Naming style matching correct class names.
424 | class-naming-style=PascalCase
425 |
426 | # Regular expression matching correct class names. Overrides class-naming-
427 | # style.
428 | #class-rgx=
429 |
430 | # Naming style matching correct constant names.
431 | const-naming-style=UPPER_CASE
432 |
433 | # Regular expression matching correct constant names. Overrides const-naming-
434 | # style.
435 | #const-rgx=
436 |
437 | # Minimum line length for functions/classes that require docstrings, shorter
438 | # ones are exempt.
439 | docstring-min-length=-1
440 |
441 | # Naming style matching correct function names.
442 | function-naming-style=snake_case
443 |
444 | # Regular expression matching correct function names. Overrides function-
445 | # naming-style.
446 | #function-rgx=
447 |
448 | # Good variable names which should always be accepted, separated by a comma.
449 | good-names=i,
450 | j,
451 | k,
452 | ex,
453 | Run,
454 | _
455 |
456 | # Good variable names regexes, separated by a comma. If names match any regex,
457 | # they will always be accepted
458 | good-names-rgxs=
459 |
460 | # Include a hint for the correct naming format with invalid-name.
461 | include-naming-hint=no
462 |
463 | # Naming style matching correct inline iteration names.
464 | inlinevar-naming-style=any
465 |
466 | # Regular expression matching correct inline iteration names. Overrides
467 | # inlinevar-naming-style.
468 | #inlinevar-rgx=
469 |
470 | # Naming style matching correct method names.
471 | method-naming-style=snake_case
472 |
473 | # Regular expression matching correct method names. Overrides method-naming-
474 | # style.
475 | #method-rgx=
476 |
477 | # Naming style matching correct module names.
478 | module-naming-style=snake_case
479 |
480 | # Regular expression matching correct module names. Overrides module-naming-
481 | # style.
482 | #module-rgx=
483 |
484 | # Colon-delimited sets of names that determine each other's naming style when
485 | # the name regexes allow several styles.
486 | name-group=
487 |
488 | # Regular expression which should only match function or class names that do
489 | # not require a docstring.
490 | no-docstring-rgx=^_
491 |
492 | # List of decorators that produce properties, such as abc.abstractproperty. Add
493 | # to this list to register other decorators that produce valid properties.
494 | # These decorators are taken in consideration only for invalid-name.
495 | property-classes=abc.abstractproperty
496 |
497 | # Naming style matching correct variable names.
498 | variable-naming-style=snake_case
499 |
500 | # Regular expression matching correct variable names. Overrides variable-
501 | # naming-style.
502 | #variable-rgx=
503 |
504 |
505 | [STRING]
506 |
507 | # This flag controls whether inconsistent-quotes generates a warning when the
508 | # character used as a quote delimiter is used inconsistently within a module.
509 | check-quote-consistency=no
510 |
511 | # This flag controls whether the implicit-str-concat should generate a warning
512 | # on implicit string concatenation in sequences defined over several lines.
513 | check-str-concat-over-line-jumps=no
514 |
515 |
516 | [IMPORTS]
517 |
518 | # List of modules that can be imported at any level, not just the top level
519 | # one.
520 | allow-any-import-level=
521 |
522 | # Allow wildcard imports from modules that define __all__.
523 | allow-wildcard-with-all=no
524 |
525 | # Analyse import fallback blocks. This can be used to support both Python 2 and
526 | # 3 compatible code, which means that the block might have code that exists
527 | # only in one or another interpreter, leading to false positives when analysed.
528 | analyse-fallback-blocks=no
529 |
530 | # Deprecated modules which should not be used, separated by a comma.
531 | deprecated-modules=optparse,tkinter.tix
532 |
533 | # Output a graph (.gv or any supported image format) of external dependencies
534 | # to the given file (report RP0402 must not be disabled).
535 | ext-import-graph=
536 |
537 | # Output a graph (.gv or any supported image format) of all (i.e. internal and
538 | # external) dependencies to the given file (report RP0402 must not be
539 | # disabled).
540 | import-graph=
541 |
542 | # Output a graph (.gv or any supported image format) of internal dependencies
543 | # to the given file (report RP0402 must not be disabled).
544 | int-import-graph=
545 |
546 | # Force import order to recognize a module as part of the standard
547 | # compatibility libraries.
548 | known-standard-library=
549 |
550 | # Force import order to recognize a module as part of a third party library.
551 | known-third-party=enchant
552 |
553 | # Couples of modules and preferred modules, separated by a comma.
554 | preferred-modules=
555 |
556 |
557 | [CLASSES]
558 |
559 | # Warn about protected attribute access inside special methods
560 | check-protected-access-in-special-methods=no
561 |
562 | # List of method names used to declare (i.e. assign) instance attributes.
563 | defining-attr-methods=__init__,
564 | __new__,
565 | setUp,
566 | __post_init__
567 |
568 | # List of member names, which should be excluded from the protected access
569 | # warning.
570 | exclude-protected=_asdict,
571 | _fields,
572 | _replace,
573 | _source,
574 | _make
575 |
576 | # List of valid names for the first argument in a class method.
577 | valid-classmethod-first-arg=cls
578 |
579 | # List of valid names for the first argument in a metaclass class method.
580 | valid-metaclass-classmethod-first-arg=cls
581 |
582 |
583 | [DESIGN]
584 |
585 | # Maximum number of arguments for function / method.
586 | max-args=5
587 |
588 | # Maximum number of attributes for a class (see R0902).
589 | max-attributes=7
590 |
591 | # Maximum number of boolean expressions in an if statement (see R0916).
592 | max-bool-expr=5
593 |
594 | # Maximum number of branch for function / method body.
595 | max-branches=12
596 |
597 | # Maximum number of locals for function / method body.
598 | max-locals=15
599 |
600 | # Maximum number of parents for a class (see R0901).
601 | max-parents=7
602 |
603 | # Maximum number of public methods for a class (see R0904).
604 | max-public-methods=20
605 |
606 | # Maximum number of return / yield for function / method body.
607 | max-returns=6
608 |
609 | # Maximum number of statements in function / method body.
610 | max-statements=50
611 |
612 | # Minimum number of public methods for a class (see R0903).
613 | min-public-methods=2
614 |
615 |
616 | [EXCEPTIONS]
617 |
618 | # Exceptions that will emit a warning when being caught. Defaults to
619 | # "BaseException, Exception".
620 | overgeneral-exceptions=BaseException,
621 | Exception
622 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: ubuntu-22.04
5 | docs_dir: docs
6 | builder: html # Use the 'sphinx' builder (default for 'make html')
7 |
8 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.1.0
2 | message: "If you use this software, please at least cite the JOSS publication referenced below."
3 | authors:
4 | - family-names: van Breugel
5 | given-names: Floris
6 | - family-names: Liu
7 | given-names: Yuying
8 | - family-names: Brunton
9 | given-names: Bing W.
10 | - family-names: Kutz
11 | given-names: J. Nathan
12 | title: "PyNumDiff: A Python package for numerical differentiation of noisy time-series data"
13 | version: 0.1.2.4
14 | date-released: 2022-03-21
15 | doi: 10.5281/zenodo.6374098
16 | references:
17 | - type: article
18 | title: "PyNumDiff: A Python package for numerical differentiation of noisy time-series data"
19 | authors:
20 | - family-names: van Breugel
21 | given-names: Floris
22 | - family-names: Liu
23 | given-names: Yuying
24 | - family-names: Brunton
25 | given-names: Bing W.
26 | - family-names: Kutz
27 | given-names: J. Nathan
28 | journal: Journal of Open Source Software
29 | year: 2022
30 | volume: 7
31 | number: 71
32 | pages: 4078
33 | doi: 10.21105/joss.04078
34 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2021
4 | Floris van Breugel, The University of Nevada, Reno, Mechanical Engineering Department
5 | Yuying Liu, The University of Washington Applied Math Department
6 |
7 | Permission is hereby granted, free of charge, to any person obtaining a copy
8 | of this software and associated documentation files (the "Software"), to deal
9 | in the Software without restriction, including without limitation the rights
10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | copies of the Software, and to permit persons to whom the Software is
12 | furnished to do so, subject to the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be included in all
15 | copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyNumDiff
2 |
3 | Python methods for numerical differentiation of noisy data, including multi-objective optimization routines for automated parameter selection.
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | ## Table of contents
23 | - [PyNumDiff](#pynumdiff)
24 | - [Table of contents](#table-of-contents)
25 | - [Introduction](#introduction)
26 | - [Structure](#structure)
27 | - [Citation](#citation)
28 | - [PyNumDiff python package:](#pynumdiff-python-package)
29 | - [Optimization algorithm:](#optimization-algorithm)
30 | - [Getting Started](#getting-started)
31 | - [Prerequisite](#prerequisite)
32 | - [Installing](#installing)
33 | - [Usage](#usage)
34 | - [Basic usages](#basic-usages)
35 | - [Notebook examples](#notebook-examples)
36 | - [Important notes](#important-notes)
37 | - [Running the tests](#running-the-tests)
38 | - [License](#license)
39 |
40 | ## Introduction
41 |
42 | PyNumDiff is a Python package that implements various methods for computing numerical derivatives of noisy data, which
43 | can be a critical step in developing dynamic models or designing control. There are four different families of methods
44 | implemented in this repository: smoothing followed by finite difference calculation, local approximation with linear
45 | models, Kalman filtering based methods and total variation regularization methods. Most of these methods have multiple
46 | parameters involved to tune. We take a principled approach and propose a multi-objective optimization framework for
47 | choosing parameters that minimize a loss function to balance the faithfulness and smoothness of the derivative estimate.
48 | For more details, refer to [this paper](https://doi.org/10.1109/ACCESS.2020.3034077).
49 |
50 | ## Structure
51 |
52 | PyNumDiff/
53 | |- README.md
54 | |- pynumdiff/
55 | |- __init__.py
56 | |- __version__.py
57 | |- finite_difference/
58 | |- kalman_smooth/
59 | |- linear_model/
60 | |- smooth_finite_difference/
61 | |- total_variation_regularization/
62 | |- utils/
63 | |- optimize/
64 | |- __init__.py
65 | |- __optimize__.py
66 | |- finite_difference/
67 | |- kalman_smooth/
68 | |- linear_model/
69 | |- smooth_finite_difference/
70 | |- total_variation_regularization/
71 | |- tests/
72 | |- examples
73 | |- 1_basic_tutorial.ipynb
74 | |- 2a_optimizing_parameters_with_dxdt_known.ipynb
75 | |- 2b_optimizing_parameters_with_dxdt_unknown.ipynb
76 | |- docs/
77 | |- Makefile
78 | |- make.bat
79 | |- build/
80 | |- source/
81 | |- _static
82 | |- _summaries
83 | |- conf.py
84 | |- index.rst
85 | |- ...
86 | |- .gitignore
87 | |- LICENSE.txt
88 | |- pyproject.toml
89 |
90 | ## Citation
91 |
92 | See CITATION.cff file as well as the following references.
93 |
94 | #### PyNumDiff python package:
95 |
96 | @article{PyNumDiff2022,
97 | doi = {10.21105/joss.04078},
98 | url = {https://doi.org/10.21105/joss.04078},
99 | year = {2022},
100 | publisher = {The Open Journal},
101 | volume = {7},
102 | number = {71},
103 | pages = {4078},
104 | author = {Floris van Breugel and Yuying Liu and Bingni W. Brunton and J. Nathan Kutz},
105 | title = {PyNumDiff: A Python package for numerical differentiation of noisy time-series data},
106 | journal = {Journal of Open Source Software}
107 | }
108 |
109 |
110 | #### Optimization algorithm:
111 |
112 | @article{ParamOptimizationDerivatives2020,
113 | doi={10.1109/ACCESS.2020.3034077}
114 | author={F. {van Breugel} and J. {Nathan Kutz} and B. W. {Brunton}},
115 | journal={IEEE Access},
116 | title={Numerical differentiation of noisy data: A unifying multi-objective optimization framework},
117 | year={2020}
118 | }
119 |
120 | ## Getting Started
121 |
122 | ### Prerequisite
123 |
124 | PyNumDiff requires common packages like `numpy`, `scipy`, `matplotlib`, `pytest` (for unittests), `pylint`
125 | (for PEP8 style check). For a full list, you can check the file [pyproject.toml](pyproject.toml)
126 |
127 | In addition, it also requires certain additional packages for select functions, though these are not required for a successful install of PyNumDiff:
128 | * Total Variation Regularization methods: [`cvxpy`](http://www.cvxpy.org/install/index.html)
129 |
130 | When using `cvxpy`, our default solver is set to be `MOSEK` (highly recommended), you would need to download their
131 | free academic license from their [website](https://www.mosek.com/products/academic-licenses/). Otherwise, you can also
132 | use other solvers which are listed [here](https://www.cvxpy.org/tutorial/advanced/index.html).
133 |
134 | ### Installing
135 |
136 | The code is compatible with >=Python 3.5. It can be installed using pip or directly from the source code. Basic installation options include:
137 |
138 | * From PyPI using pip: `pip install pynumdiff`.
139 | * From source using pip git+: `pip install git+https://github.com/florisvb/PyNumDiff`
140 | * From local source code using setup.py: Run `pip install .` from inside this directory. See below for example.
141 |
142 | For additional solvers, run `pip install pynumdiff[advanced]`. This includes `cvxpy`,
143 | which can be tricky when compiling from source. If an error occurs in installing
144 | `cvxpy`, see [cvxpy install documentation](https://www.cvxpy.org/install/), install
145 | `cvxpy` according to those instructions, and try `pip install pynumdiff[advanced]`
146 | again.
147 |
148 | Note: If using the optional MOSEK solver for cvxpy you will also need a [MOSEK license](https://www.mosek.com/products/academic-licenses/), free academic license.
149 |
150 |
151 | ## Usage
152 |
153 | **PyNumDiff** uses [Sphinx](http://www.sphinx-doc.org/en/stable/) for code documentation.
154 | So you can see more details about the API usage [there](https://pynumdiff.readthedocs.io/en/latest/).
155 |
156 | ### Basic usages
157 |
158 | * Basic Usage: you provide the parameters
159 | ```bash
160 | x_hat, dxdt_hat = pynumdiff.sub_module.method(x, dt, params, options)
161 | ```
162 | * Intermediate usage: automated parameter selection through multi-objective optimization
163 | ```bash
164 | params, val = pynumdiff.optimize.sub_module.method(x, dt, params=None,
165 | tvgamma=tvgamma, # hyperparameter
166 | dxdt_truth=None, # no ground truth data
167 | options={})
168 | print('Optimal parameters: ', params)
169 | x_hat, dxdt_hat = pynumdiff.sub_module.method(x, dt, params, options={'smooth': True})`
170 | ```
171 | * Advanced usage: automated parameter selection through multi-objective optimization using a user-defined cutoff frequency
172 | ```bash
173 | # cutoff_freq: estimate by (a) counting the number of true peaks per second in the data or (b) look at power spectra and choose cutoff
174 | log_gamma = -1.6*np.log(cutoff_frequency) -0.71*np.log(dt) - 5.1 # see: https://ieeexplore.ieee.org/abstract/document/9241009
175 | tvgamma = np.exp(log_gamma)
176 |
177 | params, val = pynumdiff.optimize.sub_module.method(x, dt, params=None,
178 | tvgamma=tvgamma, # hyperparameter
179 | dxdt_truth=None, # no ground truth data
180 | options={})
181 | print('Optimal parameters: ', params)
182 | x_hat, dxdt_hat = pynumdiff.sub_module.method(x, dt, params, options={'smooth': True})`
183 | ```
184 |
185 | ### Notebook examples
186 |
187 | We will frequently update simple examples for demo purposes, and here are currently exisiting ones:
188 | * Differentiation with different methods: [1_basic_tutorial.ipynb](examples/1_basic_tutorial.ipynb)
189 | * Parameter Optimization with known ground truth (only for demonstration purpose): [2a_optimizing_parameters_with_dxdt_known.ipynb](examples/2a_optimizing_parameters_with_dxdt_known.ipynb)
190 | * Parameter Optimization with unknown ground truth: [2b_optimizing_parameters_with_dxdt_unknown.ipynb](./examples/2b_optimizing_parameters_with_dxdt_unknown.ipynb)
191 |
192 |
193 | ### Important notes
194 |
195 | * Larger values of `tvgamma` produce smoother derivatives
196 | * The value of `tvgamma` is largely universal across methods, making it easy to compare method results
197 | * The optimization is not fast. Run it on subsets of your data if you have a lot of data. It will also be much faster with faster differentiation methods, like savgoldiff and butterdiff, and probably too slow for sliding methods like sliding DMD and sliding LTI fit.
198 | * The following heuristic works well for choosing `tvgamma`, where `cutoff_frequency` is the highest frequency content of the signal in your data, and `dt` is the timestep: `tvgamma=np.exp(-1.6*np.log(cutoff_frequency)-0.71*np.log(dt)-5.1)`
199 |
200 |
201 | ### Running the tests
202 |
203 | We are using Travis CI for continuous intergration testing. You can check out the current status
204 | [here](https://travis-ci.com/github/florisvb/PyNumDiff).
205 |
206 | To run tests locally, type:
207 | ```bash
208 | > pytest pynumdiff
209 | ```
210 |
211 |
212 | ## License
213 |
214 | This project utilizes the [MIT LICENSE](LICENSE.txt).
215 | 100% open-source, feel free to utilize the code however you like.
216 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/LICENSE.rst:
--------------------------------------------------------------------------------
1 | License
2 | ==============
3 |
4 | The MIT License (MIT)
5 |
6 | Copyright (c) 2021
7 | Floris van Breugel, The University of Nevada, Reno, Mechanical Engineering Department
8 | Yuying Liu, The University of Washington Applied Math Department
9 | Pavel Komarov, Univerity of Washington Electrical and Computer Engineering
10 |
11 | Permission is hereby granted, free of charge, to any person obtaining a copy
12 | of this software and associated documentation files (the "Software"), to deal
13 | in the Software without restriction, including without limitation the rights
14 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 | copies of the Software, and to permit persons to whom the Software is
16 | furnished to do so, subject to the following conditions:
17 |
18 | The above copyright notice and this permission notice shall be included in all
19 | copies or substantial portions of the Software.
20 |
21 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 | SOFTWARE.
--------------------------------------------------------------------------------
/docs/source/_static/logo_PyNumDiff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/florisvb/PyNumDiff/b171ac5582732c703f24976e01651730a146f50c/docs/source/_static/logo_PyNumDiff.png
--------------------------------------------------------------------------------
/docs/source/code.rst:
--------------------------------------------------------------------------------
1 | API documentation
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | finite_difference
8 | kalman_smooth
9 | linear_model
10 | smooth_finite_difference
11 | total_variation_regularization
12 | optimize
13 | utils
14 |
15 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | sys.path.insert(0, os.path.abspath('.'))
17 | sys.path.insert(0, os.path.abspath('..'))
18 | sys.path.insert(0, os.path.abspath('../..'))
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 | project = 'PyNumDiff'
23 | copyright = '2025, Floris van Breugel, Yuying Liu, Pavel Komarov'
24 | author = 'Floris van Breugel, Yuying Liu, Pavel Komarov'
25 |
26 | # The full version, including alpha/beta/rc tags
27 | release = '0.1.2'
28 |
29 |
30 | # -- General configuration ---------------------------------------------------
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary'
36 | ]
37 |
38 | # Add any paths that contain templates here, relative to this directory.
39 | templates_path = ['_templates']
40 |
41 | # List of patterns, relative to source directory, that match files and
42 | # directories to ignore when looking for source files.
43 | # This pattern also affects html_static_path and html_extra_path.
44 | exclude_patterns = []
45 |
46 |
47 | # -- Options for HTML output -------------------------------------------------
48 |
49 | # The theme to use for HTML and HTML Help pages. See the documentation for
50 | # a list of builtin themes.
51 | #
52 | html_theme = 'sphinx_rtd_theme'
53 |
54 | # Add any paths that contain custom static files (such as style sheets) here,
55 | # relative to this directory. They are copied after the builtin static files,
56 | # so a file named "default.css" will overwrite the builtin "default.css".
57 | html_static_path = ['_static']
58 |
59 | master_doc = 'index'
--------------------------------------------------------------------------------
/docs/source/contact.rst:
--------------------------------------------------------------------------------
1 | Contact
2 | =======
3 |
4 | Feel free to contact the author for any information.
5 |
6 | - Floris van Breugel: fvanbreugel@unr.edu
7 | - Yuying Liu: liuyuyingufo@gmail.com
--------------------------------------------------------------------------------
/docs/source/contributing.rst:
--------------------------------------------------------------------------------
1 | How to contribute
2 | ===================
3 |
4 | We'd love to accept your patches and contributions to this project. There are
5 | just a few small guidelines you need to follow.
6 |
7 | Submitting a patch:
8 |
9 | 1. It's generally best to start by opening a new issue describing the bug or feature you're intending to fix. Even if you think it's relatively minor, it's helpful to know what people are working on. Mention in the initial issue that you are planning to work on that bug or feature so that it can be assigned to you.
10 |
11 | 2. Follow the normal process of forking the project, and setup a new branch to work in. It's important that each group of changes be done in separate branches in order to ensure that a pull request only includes the commits related to that bug or feature.
12 |
13 | 3. To ensure properly formatted code, please make sure to use 4 spaces to indent the code. You should also run pylint over your code. It's not strictly necessary that your code be completely "lint-free", but this will help you find common style issues.
14 |
15 | 4. Any significant changes should almost always be accompanied by tests. The project already has good test coverage, so look at some of the existing tests if you're unsure how to go about it. We're using coveralls that is an invaluable tools for seeing which parts of your code aren't being exercised by your tests.
16 |
17 | 5. Do your best to have well-formed commit messages for each change. This provides consistency throughout the project, and ensures that commit messages are able to be formatted properly by various git tools.
18 |
19 | 6. Finally, push the commits to your fork and submit a pull request. Please, remember to rebase properly in order to maintain a clean, linear git history.
--------------------------------------------------------------------------------
/docs/source/finite_difference.rst:
--------------------------------------------------------------------------------
1 | finite_difference
2 | =================
3 |
4 | .. automodule:: pynumdiff.finite_difference
5 | :members:
6 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | PyNumDiff
2 | =========
3 |
4 | Python methods for numerical differentiation of noisy data, including
5 | multi-objective optimization routines for automated parameter selection.
6 |
7 | Table of contents
8 | -----------------
9 |
10 | - `Introduction <#introduction>`__
11 | - `Structure <#structure>`__
12 | - `Getting Started <#getting-started>`__
13 |
14 | - `Prerequisite <#prerequisite>`__
15 | - `Installing <#installing>`__
16 |
17 | - `Usage <#usage>`__
18 |
19 | - `Basic usages <#basic-usages>`__
20 | - `Notebook examples <#notebook-examples>`__
21 | - `Important notes <#important-notes>`__
22 | - `Running the tests <#running-the-tests>`__
23 |
24 | - `Citation <#citation>`__
25 | - `License <#license>`__
26 |
27 | Introduction
28 | ------------
29 |
30 | PyNumDiff is a Python package that implements various methods for
31 | computing numerical derivatives of noisy data, which can be a critical
32 | step in developing dynamic models or designing control. There are four
33 | different families of methods implemented in this repository: smoothing
34 | followed by finite difference calculation, local approximation with
35 | linear models, Kalman filtering based methods and total variation
36 | regularization methods. Most of these methods have multiple parameters
37 | involved to tune. We take a principled approach and propose a
38 | multi-objective optimization framework for choosing parameters that
39 | minimize a loss function to balance the faithfulness and smoothness of
40 | the derivative estimate. For more details, refer to `this
41 | paper `__.
42 |
43 | Structure
44 | ---------
45 |
46 | ::
47 |
48 | PyNumDiff/
49 | |- README.md
50 | |- pynumdiff/
51 | |- __init__.py
52 | |- __version__.py
53 | |- finite_difference/
54 | |- kalman_smooth/
55 | |- linear_model/
56 | |- smooth_finite_difference/
57 | |- total_variation_regularization/
58 | |- utils/
59 | |- optimize/
60 | |- __init__.py
61 | |- __optimize__.py
62 | |- finite_difference/
63 | |- kalman_smooth/
64 | |- linear_model/
65 | |- smooth_finite_difference/
66 | |- total_variation_regularization/
67 | |- tests/
68 | |- examples
69 | |- 1_basic_tutorial.ipynb
70 | |- 2a_optimizing_parameters_with_dxdt_known.ipynb
71 | |- 2b_optimizing_parameters_with_dxdt_unknown.ipynb
72 | |- docs/
73 | |- Makefile
74 | |- make.bat
75 | |- build/
76 | |- source/
77 | |- _static
78 | |- _summaries
79 | |- conf.py
80 | |- index.rst
81 | |- ...
82 | |- setup.py
83 | |- .gitignore
84 | |- LICENSE.txt
85 | |- pyproject.toml
86 |
87 | Getting Started
88 | ---------------
89 |
90 | Prerequisite
91 | ~~~~~~~~~~~~
92 |
93 | PyNumDiff requires common packages like ``numpy``, ``scipy``,
94 | ``matplotlib``, ``pytest`` (for unittests), ``pylint`` (for PEP8 style
95 | check). For a full list, you can check the file
96 | `pyproject.toml `__
97 |
98 | In addition, it also requires certain additional packages for select
99 | functions, though these are not required for a successful install of
100 | PyNumDiff:
101 |
102 | - Total Variation Regularization methods: `cvxpy `__
103 |
104 | When using ``cvxpy``, our default solver is set to be ``MOSEK`` (highly
105 | recommended), you would need to download their free academic license
106 | from their
107 | `website `__.
108 | Otherwise, you can also use other solvers which are listed
109 | `here `__.
110 |
111 | Installing
112 | ~~~~~~~~~~
113 |
114 | The code is compatible with >=Python 3.5. It can be installed using pip
115 | or directly from the source code. Basic installation options include:
116 |
117 | - From PyPI using pip: ``pip install pynumdiff``.
118 | - From source using pip git+:
119 | ``pip install git+https://github.com/florisvb/PyNumDiff``
120 | - From local source code: Run ``pip install .`` from inside this directory.
121 |
122 | For additional solvers, run ``pip install pynumdiff[advanced]``. This includes ``cvxpy``,
123 | which can be tricky when compiling from source. If an error occurs in installing
124 | ``cvxpy``, see `cvxpy install documentation `__, install
125 | ``cvxpy`` according to those instructions, and try ``pip install pynumdiff[advanced]``
126 | again.
127 |
128 |
129 | Note: If using the optional MOSEK solver for cvxpy you will also need a
130 | `MOSEK license `__,
131 | free academic license.
132 |
133 | Usage
134 | -----
135 |
136 | Basic usages
137 | ~~~~~~~~~~~~
138 |
139 | - Basic Usage: you provide the parameters
140 |
141 | .. code:: bash
142 |
143 | x_hat, dxdt_hat = pynumdiff.sub_module.method(x, dt, params, options)
144 |
145 | - Advanced usage: automated parameter selection through multi-objective
146 | optimization
147 |
148 | .. code:: bash
149 |
150 | params, val = pynumdiff.optimize.sub_module.method(x, dt, params=None,
151 | tvgamma=tvgamma, # hyperparameter
152 | dxdt_truth=None, # no ground truth data
153 | options={})
154 | print('Optimal parameters: ', params)
155 | x_hat, dxdt_hat = pynumdiff.sub_module.method(x, dt, params, options={'smooth': True})`
156 |
157 | Notebook examples
158 | ~~~~~~~~~~~~~~~~~
159 |
160 | - Differentiation with different methods: `1\_basic\_tutorial.ipynb `__
161 | - Parameter Optimization with known ground truth (only for demonstration purpose): `2a\_optimizing\_parameters\_with\_dxdt\_known.ipynb `__
162 | - Parameter Optimization with unknown ground truth: `2b\_optimizing\_parameters\_with\_dxdt\_unknown.ipynb `__
163 |
164 |
165 | Important notes
166 | ~~~~~~~~~~~~~~~
167 |
168 | - Larger values of ``tvgamma`` produce smoother derivatives
169 | - The value of ``tvgamma`` is largely universal across methods, making
170 | it easy to compare method results
171 | - The optimization is not fast. Run it on subsets of your data if you
172 | have a lot of data. It will also be much faster with faster
173 | differentiation methods, like savgoldiff and butterdiff, and probably
174 | too slow for sliding methods like sliding DMD and sliding LTI fit.
175 | - The following heuristic works well for choosing ``tvgamma``, where
176 | ``cutoff_frequency`` is the highest frequency content of the signal
177 | in your data, and ``dt`` is the timestep:
178 | ``tvgamma=np.exp(-1.6*np.log(cutoff_frequency)-0.71*np.log(dt)-5.1)``
179 |
180 | Running the tests
181 | ~~~~~~~~~~~~~~~~~
182 |
183 | To run tests locally, type:
184 |
185 | .. code:: bash
186 |
187 | > pytest pynumdiff
188 |
189 | Citation
190 | --------
191 |
192 | @ARTICLE{9241009, author={F. {van Breugel} and J. {Nathan Kutz} and B.
193 | W. {Brunton}}, journal={IEEE Access}, title={Numerical differentiation
194 | of noisy data: A unifying multi-objective optimization framework},
195 | year={2020}, volume={}, number={}, pages={1-1},
196 | doi={10.1109/ACCESS.2020.3034077}}
197 |
198 | Developer's Guide
199 | -----------------
200 |
201 | .. toctree::
202 | :maxdepth: 1
203 |
204 | code
205 | contact
206 | contributing
207 | LICENSE
--------------------------------------------------------------------------------
/docs/source/kalman_smooth.rst:
--------------------------------------------------------------------------------
1 | kalman_smooth
2 | =============
3 |
4 | .. automodule:: pynumdiff.kalman_smooth
5 | :members:
6 |
--------------------------------------------------------------------------------
/docs/source/linear_model.rst:
--------------------------------------------------------------------------------
1 | linear_model
2 | ============
3 |
4 | .. automodule:: pynumdiff.linear_model
5 | :members:
--------------------------------------------------------------------------------
/docs/source/optimize.rst:
--------------------------------------------------------------------------------
1 | optimize
2 | =========
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | optimize/__optimize__
8 | optimize/__finite_difference__
9 | optimize/__kalman_smooth__
10 | optimize/__linear_model__
11 | optimize/__smooth_finite_difference__
12 | optimize/__total_variation_regularization__
13 |
--------------------------------------------------------------------------------
/docs/source/optimize/__finite_difference__.rst:
--------------------------------------------------------------------------------
1 | __finite_difference__
2 | ======================
3 |
4 | .. currentmodule:: pynumdiff.optimize.finite_difference.__finite_difference__
5 |
6 | .. automodule:: pynumdiff.optimize.finite_difference.__finite_difference__
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/optimize/__kalman_smooth__.rst:
--------------------------------------------------------------------------------
1 | __kalman_smooth__
2 | ==================
3 |
4 | .. currentmodule:: pynumdiff.optimize.kalman_smooth.__kalman_smooth__
5 |
6 | .. automodule:: pynumdiff.optimize.kalman_smooth.__kalman_smooth__
7 | :members:
--------------------------------------------------------------------------------
/docs/source/optimize/__linear_model__.rst:
--------------------------------------------------------------------------------
1 | __linear_model__
2 | ================
3 |
4 | .. currentmodule:: pynumdiff.optimize.linear_model.__linear_model__
5 |
6 | .. automodule:: pynumdiff.optimize.linear_model.__linear_model__
7 | :members:
--------------------------------------------------------------------------------
/docs/source/optimize/__optimize__.rst:
--------------------------------------------------------------------------------
1 | __optimize__
2 | ==============
3 |
4 | .. currentmodule:: pynumdiff.optimize.__optimize__
5 |
6 | .. automodule:: pynumdiff.optimize.__optimize__
7 | :special-members: __optimize__
8 |
--------------------------------------------------------------------------------
/docs/source/optimize/__smooth_finite_difference__.rst:
--------------------------------------------------------------------------------
1 | __smooth_finite_difference__
2 | ============================
3 |
4 | .. currentmodule:: pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__
5 |
6 | .. automodule:: pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__
7 | :members:
--------------------------------------------------------------------------------
/docs/source/optimize/__total_variation_regularization__.rst:
--------------------------------------------------------------------------------
1 | __total_variation_regularization__
2 | ===================================
3 |
4 | .. currentmodule:: pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__
5 |
6 | .. automodule:: pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__
7 | :members:
--------------------------------------------------------------------------------
/docs/source/smooth_finite_difference.rst:
--------------------------------------------------------------------------------
1 | smooth_finite_difference
2 | ========================
3 |
4 | .. automodule:: pynumdiff.smooth_finite_difference
5 | :members:
--------------------------------------------------------------------------------
/docs/source/total_variation_regularization.rst:
--------------------------------------------------------------------------------
1 | total_variation_regularization
2 | ==============================
3 |
4 | .. automodule:: pynumdiff.total_variation_regularization
5 | :members:
--------------------------------------------------------------------------------
/docs/source/utils.rst:
--------------------------------------------------------------------------------
1 | utils
2 | =====
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | utils/_pi_cruise_control
8 | utils/evaluate
9 | utils/simulate
10 | utils/utility
--------------------------------------------------------------------------------
/docs/source/utils/_pi_cruise_control.rst:
--------------------------------------------------------------------------------
1 | _pi_cruise_control
2 | ==================
3 |
4 | .. currentmodule:: pynumdiff.utils._pi_cruise_control
5 |
6 | .. automodule:: pynumdiff.utils._pi_cruise_control
7 | :members:
--------------------------------------------------------------------------------
/docs/source/utils/evaluate.rst:
--------------------------------------------------------------------------------
1 | evaluate
2 | ========
3 |
4 | .. currentmodule:: pynumdiff.utils.evaluate
5 |
6 | .. automodule:: pynumdiff.utils.evaluate
7 | :members:
--------------------------------------------------------------------------------
/docs/source/utils/simulate.rst:
--------------------------------------------------------------------------------
1 | simulate
2 | ========
3 |
4 | .. currentmodule:: pynumdiff.utils.simulate
5 |
6 | .. automodule:: pynumdiff.utils.simulate
7 | :members:
--------------------------------------------------------------------------------
/docs/source/utils/utility.rst:
--------------------------------------------------------------------------------
1 | utility
2 | =======
3 |
4 | .. currentmodule:: pynumdiff.utils.utility
5 |
6 | .. automodule:: pynumdiff.utils.utility
7 | :members:
--------------------------------------------------------------------------------
/linting.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import re
3 | import sys
4 | import argparse
5 | from pylint import lint
6 |
7 | THRESHOLD = 8.5
8 |
9 | if len(sys.argv) < 2:
10 | raise argparse.ArgumentError("Module to evaluate needs to be the first argument")
11 |
12 | sys.argv[1] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[1])
13 | run = lint.Run([sys.argv[1]], do_exit=False)
14 | score = run.linter.stats['global_note']
15 |
16 | if score < THRESHOLD:
17 | print("Your code doesn't pass the PEP8 style score threshold: %f!" % THRESHOLD)
18 | sys.exit(1)
19 |
20 | print("Congratulations! Your code has passed the PEP8 style score threshold: %f!" % THRESHOLD)
--------------------------------------------------------------------------------
/pynumdiff/.gitignore:
--------------------------------------------------------------------------------
1 | _version.py
--------------------------------------------------------------------------------
/pynumdiff/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Import useful functions from all modules
3 | """
4 | from pynumdiff._version import __version__
5 | from pynumdiff.finite_difference import first_order, second_order
6 | from pynumdiff.smooth_finite_difference import mediandiff, meandiff, gaussiandiff, \
7 | friedrichsdiff, butterdiff, splinediff
8 | from pynumdiff.total_variation_regularization import *
9 | from pynumdiff.linear_model import *
10 | from pynumdiff.kalman_smooth import constant_velocity, constant_acceleration, constant_jerk, \
11 | known_dynamics
12 |
13 |
--------------------------------------------------------------------------------
/pynumdiff/finite_difference/__init__.py:
--------------------------------------------------------------------------------
1 | """This module implements some common finite difference schemes
2 | """
3 | from ._finite_difference import first_order, second_order
4 |
5 | __all__ = ['first_order', 'second_order'] # So these get treated as direct members of the module by sphinx
6 |
--------------------------------------------------------------------------------
/pynumdiff/finite_difference/_finite_difference.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pynumdiff.utils import utility
3 | from warnings import warn
4 |
5 |
6 | def first_order(x, dt, params=None, options={}, num_iterations=None):
7 | """First-order centered difference method
8 |
9 | :param np.array[float] x: array of time series to differentiate
10 | :param float dt: time step size
11 | :param list[float] or float params: (**deprecated**, prefer :code:`num_iterations`)
12 | :param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary consisting of {'iterate': (bool)}
13 | :param int num_iterations: If performing iterated FD to smooth the estimates, give the number of iterations.
14 | If ungiven, FD will not be iterated.
15 |
16 | :return: tuple[np.array, np.array] of\n
17 | - **x_hat** -- estimated (smoothed) x
18 | - **dxdt_hat** -- estimated derivative of x
19 | """
20 | if params != None and 'iterate' in options:
21 | warn("""`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.""")
22 | if isinstance(params, list): params = params[0]
23 | return _iterate_first_order(x, dt, params)
24 | elif num_iterations:
25 | return _iterate_first_order(x, dt, num_iterations)
26 |
27 | dxdt_hat = np.diff(x) / dt # Calculate the finite difference
28 | dxdt_hat = np.hstack((dxdt_hat[0], dxdt_hat, dxdt_hat[-1])) # Pad the data
29 | dxdt_hat = np.mean((dxdt_hat[0:-1], dxdt_hat[1:]), axis=0) # Re-finite dxdt_hat using linear interpolation
30 |
31 | return x, dxdt_hat
32 |
33 |
34 | def second_order(x, dt):
35 | """Second-order centered difference method
36 |
37 | :param np.array[float] x: array of time series to differentiate
38 | :param float dt: time step size
39 |
40 | :return: tuple[np.array, np.array] of\n
41 | - **x_hat** -- estimated (smoothed) x
42 | - **dxdt_hat** -- estimated derivative of x
43 | """
44 | dxdt_hat = (x[2:] - x[0:-2]) / (2 * dt)
45 | first_dxdt_hat = (-3 * x[0] + 4 * x[1] - x[2]) / (2 * dt)
46 | last_dxdt_hat = (3 * x[-1] - 4 * x[-2] + x[-3]) / (2 * dt)
47 | dxdt_hat = np.hstack((first_dxdt_hat, dxdt_hat, last_dxdt_hat))
48 | return x, dxdt_hat
49 |
50 |
51 | def _x_hat_using_finite_difference(x, dt):
52 | """Find a smoothed estimate of the true function by taking FD and then integrating with trapezoids
53 | """
54 | x_hat, dxdt_hat = first_order(x, dt)
55 | x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
56 | x0 = utility.estimate_initial_condition(x, x_hat)
57 | return x_hat + x0
58 |
59 |
60 | def _iterate_first_order(x, dt, num_iterations):
61 | """Iterative first order centered finite difference.
62 |
63 | :param np.array[float] x: array of time series to differentiate
64 | :param float dt: time step size
65 | :param int num_iterations: number of iterations
66 |
67 | :return: tuple[np.array, np.array] of\n
68 | - **x_hat** -- estimated (smoothed) x
69 | - **dxdt_hat** -- estimated derivative of x
70 | """
71 | w = np.arange(len(x)) / (len(x) - 1) # set up weights, [0., ... 1.0]
72 |
73 | # forward backward passes
74 | for _ in range(num_iterations):
75 | xf = _x_hat_using_finite_difference(x, dt)
76 | xb = _x_hat_using_finite_difference(x[::-1], dt)
77 | x = xf * w + xb[::-1] * (1 - w)
78 |
79 | x_hat, dxdt_hat = first_order(x, dt)
80 |
81 | return x_hat, dxdt_hat
82 |
--------------------------------------------------------------------------------
/pynumdiff/kalman_smooth/__init__.py:
--------------------------------------------------------------------------------
1 | """This module implements Kalman filters
2 | """
3 | from ._kalman_smooth import constant_velocity, constant_acceleration, constant_jerk, known_dynamics, savgol_const_accel
4 |
5 | __all__ = ['constant_velocity', 'constant_acceleration', 'constant_jerk', 'known_dynamics', 'savgol_const_accel'] # So these get treated as direct members of the module by sphinx
6 |
--------------------------------------------------------------------------------
/pynumdiff/kalman_smooth/_kalman_smooth.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import numpy as np
3 |
4 | from pynumdiff.linear_model import savgoldiff
5 |
6 | ####################
7 | # Helper functions #
8 | ####################
9 |
10 |
11 | def __kalman_forward_update__(xhat_fm, P_fm, y, u, A, B, C, R, Q):
12 | """
13 | :param xhat_fm:
14 | :param P_fm:
15 | :param y:
16 | :param u:
17 | :param A:
18 | :param B:
19 | :param C:
20 | :param R:
21 | :param Q:
22 | :return:
23 | """
24 | I = np.array(np.eye(A.shape[0]))
25 | gammaW = np.array(np.eye(A.shape[0]))
26 |
27 | K_f = P_fm@C.T@np.linalg.pinv(C@P_fm@C.T + R)
28 |
29 | if y is not None and not np.isnan(y):
30 | xhat_fp = xhat_fm + K_f@(y - C@xhat_fm)
31 | P_fp = (I - K_f@C)@P_fm
32 | xhat_fm = A@xhat_fp + B@u
33 | P_fm = A@P_fp@A.T + gammaW@Q@gammaW.T
34 | else:
35 | xhat_fp = xhat_fm
36 | P_fp = (I - K_f@C)@P_fm
37 | xhat_fm = A@xhat_fp + B@u
38 | P_fm = A@P_fp@A.T + gammaW@Q@gammaW.T
39 |
40 | return xhat_fp, xhat_fm, P_fp, P_fm
41 |
42 |
43 | def __kalman_forward_filter__(xhat_fm, P_fm, y, u, A, B, C, R, Q):
44 | """
45 | :param xhat_fm:
46 | :param P_fm:
47 | :param y:
48 | :param u:
49 | :param A:
50 | :param B:
51 | :param C:
52 | :param R:
53 | :param Q:
54 | :return:
55 | """
56 | if u is None:
57 | u = np.array(np.zeros([B.shape[1], y.shape[1]]))
58 |
59 | xhat_fp = None
60 | P_fp = []
61 | P_fm = [P_fm]
62 |
63 | for i in range(y.shape[1]):
64 | _xhat_fp, _xhat_fm, _P_fp, _P_fm = __kalman_forward_update__(xhat_fm[:, [-1]], P_fm[-1], y[:, [i]], u[:, [i]],
65 | A, B, C, R, Q)
66 | if xhat_fp is None:
67 | xhat_fp = _xhat_fp
68 | else:
69 | xhat_fp = np.hstack((xhat_fp, _xhat_fp))
70 | xhat_fm = np.hstack((xhat_fm, _xhat_fm))
71 |
72 | P_fp.append(_P_fp)
73 | P_fm.append(_P_fm)
74 |
75 | return xhat_fp, xhat_fm, P_fp, P_fm
76 |
77 |
78 | def __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A):
79 | """
80 | :param xhat_fp:
81 | :param xhat_fm:
82 | :param P_fp:
83 | :param P_fm:
84 | :param A:
85 | :return:
86 | """
87 | N = xhat_fp.shape[1]
88 |
89 | xhat_smooth = copy.copy(xhat_fp)
90 | P_smooth = copy.copy(P_fp)
91 | for t in range(N-2, -1, -1):
92 | L = P_fp[t]@A.T@np.linalg.pinv(P_fm[t])
93 | xhat_smooth[:, [t]] = xhat_fp[:, [t]] + L@(xhat_smooth[:, [t+1]] - xhat_fm[:, [t+1]])
94 | P_smooth[t] = P_fp[t] - L@(P_smooth[t+1] - P_fm[t+1])
95 |
96 | return xhat_smooth, P_smooth
97 |
98 |
99 | #####################
100 | # Constant Velocity #
101 | #####################
102 |
103 |
104 | def __constant_velocity__(x, dt, params, options=None):
105 | """
106 | Run a forward-backward constant acceleration RTS Kalman smoother to estimate the derivative.
107 |
108 | :param x: (np.array of floats, 1xN) time series to differentiate
109 | :param dt: (float) time step size
110 | :param params: (list) [r, : (float) covariance of the x noise
111 | q] : (float) covariance of the constant velocity model
112 | :param options: (dict) {'backward'} : (bool) run smoother backwards in time
113 | :return:
114 | """
115 | if options is None:
116 | options = {'backward': False}
117 |
118 | r, q = params
119 |
120 | if len(x.shape) == 2:
121 | y = x
122 | else:
123 | y = np.reshape(x, [1, len(x)])
124 |
125 | A = np.array([[1, dt], [0, 1]])
126 | B = np.array([[0], [0]])
127 | C = np.array([[1, 0]])
128 | R = np.array([[r]])
129 | Q = np.array([[1e-16, 0], [0, q]])
130 | x0 = np.array([[x[0,0]], [0]])
131 | P0 = np.array(100*np.eye(2))
132 | u = None
133 |
134 |
135 |
136 | if options['backward']:
137 | A = np.linalg.pinv(A)
138 | y = y[:, ::-1]
139 |
140 | xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)
141 | xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)
142 |
143 | x_hat = np.ravel(xhat_smooth[0, :])
144 | dxdt_hat = np.ravel(xhat_smooth[1, :])
145 |
146 | if not options['backward']:
147 | return x_hat, dxdt_hat
148 |
149 | return x_hat[::-1], dxdt_hat[::-1]
150 |
151 |
152 | def constant_velocity(x, dt, params, options=None):
153 | """
154 | Run a forward-backward constant velocity RTS Kalman smoother to estimate the derivative.
155 |
156 | :param x: array of time series to differentiate
157 | :type x: np.array (float)
158 |
159 | :param dt: time step size
160 | :type dt: float
161 |
162 | :param params: a list of two elements:
163 |
164 | - r: covariance of the x noise
165 | - q: covariance of the constant velocity model
166 |
167 | :type params: list (float)
168 |
169 |
170 | :param options: a dictionary indicating whether to run smoother forwards and backwards
171 | (usually achieves better estimate at end points)
172 | :type params: dict {'forwardbackward': boolean}, optional
173 |
174 | :return: a tuple consisting of:
175 |
176 | - x_hat: estimated (smoothed) x
177 | - dxdt_hat: estimated derivative of x
178 |
179 |
180 | :rtype: tuple -> (np.array, np.array)
181 | """
182 | if len(x.shape) == 2:
183 | pass
184 | else:
185 | x = np.reshape(x, [1, len(x)])
186 |
187 | if options is None:
188 | options = {'forwardbackward': True}
189 |
190 | if options['forwardbackward']:
191 | x_hat_f, smooth_dxdt_hat_f = __constant_velocity__(x, dt, params, options={'backward': False})
192 | x_hat_b, smooth_dxdt_hat_b = __constant_velocity__(x, dt, params, options={'backward': True})
193 |
194 | w = np.arange(0, len(x_hat_f), 1)
195 | w = w/np.max(w)
196 |
197 | x_hat = x_hat_f*w + x_hat_b*(1-w)
198 | smooth_dxdt_hat = smooth_dxdt_hat_f*w + smooth_dxdt_hat_b*(1-w)
199 |
200 | smooth_dxdt_hat_corrected = np.mean((smooth_dxdt_hat, smooth_dxdt_hat_f), axis=0)
201 |
202 | return x_hat, smooth_dxdt_hat_corrected
203 |
204 | return __constant_velocity__(x, dt, params, options={'backward': False})
205 |
206 |
207 | #########################
208 | # Constant Acceleration #
209 | #########################
210 |
211 |
212 | def __constant_acceleration__(x, dt, params, options=None):
213 | """
214 | Run a forward-backward constant acceleration RTS Kalman smoother to estimate the derivative.
215 |
216 | :param x: array of time series to differentiate
217 | :type x: np.array (float)
218 |
219 | :param dt: time step size
220 | :type dt: float
221 |
222 | :param params: a list of two elements:
223 |
224 | - r: covariance of the x noise
225 | - q: covariance of the constant velocity model
226 |
227 | :type params: list (float)
228 |
229 |
230 | :param options: a dictionary indicating whether to run smoother backwards in time
231 | :type params: dict {'backward': boolean}, optional
232 |
233 | :return: a tuple consisting of:
234 |
235 | - x_hat: estimated (smoothed) x
236 | - dxdt_hat: estimated derivative of x
237 |
238 | :rtype: tuple -> (np.array, np.array)
239 | """
240 |
241 | if options is None:
242 | options = {'backward': False}
243 |
244 | r, q = params
245 |
246 | if len(x.shape) == 2:
247 | y = x
248 | else:
249 | y = np.reshape(x, [1, len(x)])
250 |
251 | A = np.array([[1, dt, 0],
252 | [0, 1, dt],
253 | [0, 0, 1]])
254 | B = np.array([[0], [0], [0]])
255 | C = np.array([[1, 0, 0]])
256 | R = np.array([[r]])
257 | Q = np.array([[1e-16, 0, 0],
258 | [0, 1e-16, 0],
259 | [0, 0, q]])
260 | x0 = np.array([[x[0,0]], [0], [0]])
261 | P0 = np.array(10*np.eye(3))
262 | u = None
263 |
264 | if options['backward']:
265 | A = np.linalg.pinv(A)
266 | y = y[:, ::-1]
267 |
268 | xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)
269 | xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)
270 |
271 | x_hat = np.ravel(xhat_smooth[0, :])
272 | dxdt_hat = np.ravel(xhat_smooth[1, :])
273 |
274 | if not options['backward']:
275 | return x_hat, dxdt_hat
276 |
277 | return x_hat[::-1], dxdt_hat[::-1]
278 |
279 |
280 | def constant_acceleration(x, dt, params, options=None):
281 | """
282 | Run a forward-backward constant acceleration RTS Kalman smoother to estimate the derivative.
283 |
284 | :param x: array of time series to differentiate
285 | :type x: np.array (float)
286 |
287 | :param dt: time step size
288 | :type dt: float
289 |
290 | :param params: a list of two elements:
291 |
292 | - r: covariance of the x noise
293 | - q: covariance of the constant velocity model
294 |
295 | :type params: list (float)
296 |
297 |
298 | :param options: a dictionary indicating whether to run smoother forwards and backwards
299 | (usually achieves better estimate at end points)
300 | :type params: dict {'forwardbackward': boolean}, optional
301 |
302 | :return: a tuple consisting of:
303 |
304 | - x_hat: estimated (smoothed) x
305 | - dxdt_hat: estimated derivative of x
306 |
307 |
308 | :rtype: tuple -> (np.array, np.array)
309 | """
310 | if len(x.shape) == 2:
311 | pass
312 | else:
313 | x = np.reshape(x, [1, len(x)])
314 |
315 | if options is None:
316 | options = {'forwardbackward': True}
317 |
318 | if options['forwardbackward']:
319 | x_hat_f, smooth_dxdt_hat_f = __constant_acceleration__(x, dt, params, options={'backward': False})
320 | x_hat_b, smooth_dxdt_hat_b = __constant_acceleration__(x, dt, params, options={'backward': True})
321 |
322 | w = np.arange(0, len(x_hat_f), 1)
323 | w = w/np.max(w)
324 |
325 | x_hat = x_hat_f*w + x_hat_b*(1-w)
326 | smooth_dxdt_hat = smooth_dxdt_hat_f*w + smooth_dxdt_hat_b*(1-w)
327 |
328 | smooth_dxdt_hat_corrected = np.mean((smooth_dxdt_hat, smooth_dxdt_hat_f), axis=0)
329 |
330 | return x_hat, smooth_dxdt_hat_corrected
331 |
332 | return __constant_acceleration__(x, dt, params, options={'backward': False})
333 |
334 |
335 | #################
336 | # Constant Jerk #
337 | #################
338 |
339 |
340 | def __constant_jerk__(x, dt, params, options=None):
341 | """
342 | Run a forward-backward constant jerk RTS Kalman smoother to estimate the derivative.
343 |
344 | :param x: array of time series to differentiate
345 | :type x: np.array (float)
346 |
347 | :param dt: time step size
348 | :type dt: float
349 |
350 | :param params: a list of two elements:
351 |
352 | - r: covariance of the x noise
353 | - q: covariance of the constant velocity model
354 |
355 | :type params: list (float)
356 |
357 |
358 | :param options: a dictionary indicating whether to run smoother backwards in time
359 | :type params: dict {'backward': boolean}, optional
360 |
361 | :return: a tuple consisting of:
362 |
363 | - x_hat: estimated (smoothed) x
364 | - dxdt_hat: estimated derivative of x
365 |
366 | :rtype: tuple -> (np.array, np.array)
367 | """
368 |
369 | if options is None:
370 | options = {'backward': False}
371 |
372 | r, q = params
373 |
374 | if len(x.shape) == 2:
375 | y = x
376 | else:
377 | y = np.reshape(x, [1, len(x)])
378 |
379 | A = np.array([[1, dt, 0, 0],
380 | [0, 1, dt, 0],
381 | [0, 0, 1, dt],
382 | [0, 0, 0, 1]])
383 | B = np.array([[0], [0], [0], [0]])
384 | C = np.array([[1, 0, 0, 0]])
385 | R = np.array([[r]])
386 | Q = np.array([[1e-16, 0, 0, 0],
387 | [0, 1e-16, 0, 0],
388 | [0, 0, 1e-16, 0],
389 | [0, 0, 0, q]])
390 | x0 = np.array([[x[0,0]], [0], [0], [0]])
391 | P0 = np.array(10*np.eye(4))
392 | y = np.array(x)
393 | u = None
394 |
395 | if options['backward']:
396 | A = np.linalg.pinv(A)
397 | y = y[:, ::-1]
398 |
399 | xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)
400 | xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)
401 |
402 | x_hat = np.ravel(xhat_smooth[0,:])
403 | dxdt_hat = np.ravel(xhat_smooth[1,:])
404 |
405 | if not options['backward']:
406 | return x_hat, dxdt_hat
407 |
408 | return x_hat[::-1], dxdt_hat[::-1]
409 |
410 |
411 | def constant_jerk(x, dt, params, options=None):
412 | """
413 | Run a forward-backward constant jerk RTS Kalman smoother to estimate the derivative.
414 |
415 | :param x: array of time series to differentiate
416 | :type x: np.array (float)
417 |
418 | :param dt: time step size
419 | :type dt: float
420 |
421 | :param params: a list of two elements:
422 |
423 | - r: covariance of the x noise
424 | - q: covariance of the constant velocity model
425 |
426 | :type params: list (float)
427 |
428 |
429 | :param options: a dictionary indicating whether to run smoother forwards and backwards
430 | (usually achieves better estimate at end points)
431 | :type params: dict {'forwardbackward': boolean}, optional
432 |
433 | :return: a tuple consisting of:
434 |
435 | - x_hat: estimated (smoothed) x
436 | - dxdt_hat: estimated derivative of x
437 |
438 |
439 | :rtype: tuple -> (np.array, np.array)
440 | """
441 | if len(x.shape) == 2:
442 | pass
443 | else:
444 | x = np.reshape(x, [1, len(x)])
445 |
446 | if options is None:
447 | options = {'forwardbackward': True}
448 |
449 | if options['forwardbackward']:
450 | x_hat_f, smooth_dxdt_hat_f = __constant_jerk__(x, dt, params, options={'backward': False})
451 | x_hat_b, smooth_dxdt_hat_b = __constant_jerk__(x, dt, params, options={'backward': True})
452 |
453 | w = np.arange(0, len(x_hat_f), 1)
454 | w = w/np.max(w)
455 |
456 | x_hat = x_hat_f*w + x_hat_b*(1-w)
457 | smooth_dxdt_hat = smooth_dxdt_hat_f*w + smooth_dxdt_hat_b*(1-w)
458 |
459 | smooth_dxdt_hat_corrected = np.mean((smooth_dxdt_hat, smooth_dxdt_hat_f), axis=0)
460 |
461 | return x_hat, smooth_dxdt_hat_corrected
462 |
463 | return __constant_jerk__(x, dt, params, options={'backward': False})
464 |
465 |
466 | def known_dynamics(x, params, u=None, options=None):
467 | """
468 | Run a forward RTS Kalman smoother given known dynamics to estimate the derivative.
469 |
470 | :param x: matrix of time series of (noisy) measurements
471 | :type x: np.array (float)
472 |
473 | :param params: a list of:
474 | - x0: inital condition, matrix of Nx1, N = number of states
475 | - P0: initial covariance matrix of NxN
476 | - A: dynamics matrix, NxN
477 | - B: control input matrix, NxM, M = number of measurements
478 | - C: measurement dynamics, MxN
479 | - R: covariance matrix for the measurements, MxM
480 | - Q: covariance matrix for the model, NxN
481 | :type params: list (matrix)
482 |
483 | :param u: matrix of time series of control inputs
484 | :type u: np.array (float)
485 |
486 | :param options: a dictionary indicating whether to run smoother
487 | :type params: dict {'smooth': boolean}, optional
488 |
489 | :return: matrix:
490 | - xhat_smooth: smoothed estimates of the full state x
491 |
492 | :rtype: tuple -> (np.array, np.array)
493 | """
494 | if len(x.shape) == 2:
495 | y = x
496 | else:
497 | y = np.reshape(x, [1, len(x)])
498 |
499 | if options is None:
500 | options = {'smooth': True}
501 |
502 | x0, P0, A, B, C, R, Q = params
503 |
504 | xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)
505 | xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)
506 |
507 | if not options['smooth']:
508 | return xhat_fp
509 |
510 | return xhat_smooth
511 |
512 |
513 |
514 | ###################################################################################################
515 | # Constant Acceleration with Savitzky-Golay pre-estimate (not worth the parameter tuning trouble) #
516 | ###################################################################################################
517 |
518 |
519 | def __savgol_const_accel__(x, sg_dxdt_hat, dt, params, options=None):
520 | """
521 | Run a forward-backward constant acceleration RTS Kalman smoother to estimate the derivative, where initial estimates of the velocity are first estimated using the savitzky-golay filter.
522 |
523 | :param x: array of time series to differentiate
524 | :type x: np.array (float)
525 |
526 | :param sg_dxdt_hat: initial velocity estimate
527 | :type sg_dxdt_hat: np.array (float)
528 |
529 | :param dt: time step size
530 | :type dt: float
531 |
532 | :param params: a list of two elements:
533 | - r1: covariance of the x noise
534 | - r2: covariance of the vel noise
535 | - q: covariance of the constant velocity model
536 |
537 | :type params: list (float)
538 |
539 |
540 | :param options: a dictionary indicating whether to run smoother backwards in time
541 | :type params: dict {'backward': boolean}, optional
542 |
543 | :return: a tuple consisting of:
544 |
545 | - x_hat: estimated (smoothed) x
546 | - dxdt_hat: estimated derivative of x
547 |
548 | :rtype: tuple -> (np.array, np.array)
549 | """
550 |
551 | if options is None:
552 | options = {'backward': False}
553 |
554 | r1, r2, q = params
555 | A = np.array([[1, dt, 0],
556 | [0, 1, dt],
557 | [0, 0, 1]])
558 | B = np.array([[0], [0], [0]])
559 | C = np.array([[1, 0, 0],
560 | [0, 1, 0]])
561 | R = np.array([[r1, 0],
562 | [0, r2]])
563 | Q = np.array([[1e-16, 0, 0],
564 | [0, 1e-16, 0],
565 | [0, 0, q]])
566 | x0 = np.array([[x[0]], [sg_dxdt_hat[0]], [0]])
567 | P0 = np.array(10*np.eye(3))
568 | y = np.array(np.vstack((x, sg_dxdt_hat)))
569 | u = None
570 |
571 | if options['backward']:
572 | A = np.linalg.pinv(A)
573 | y = y[:, ::-1]
574 |
575 | xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)
576 | xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)
577 |
578 | x_hat = np.ravel(xhat_smooth[0, :])
579 | dxdt_hat = np.ravel(xhat_smooth[1, :])
580 |
581 | if not options['backward']:
582 | return x_hat, dxdt_hat
583 |
584 | return x_hat[::-1], dxdt_hat[::-1]
585 |
586 |
587 | def savgol_const_accel(x, dt, params, options=None):
588 | """
589 | Run a forward-backward constant acceleration RTS Kalman smoother to estimate the derivative, where initial estimates of the velocity are first estimated using the savitzky-golay filter.
590 |
591 | :param x: array of time series to differentiate
592 | :type x: np.array (float)
593 |
594 | :param dt: time step size
595 | :type dt: float
596 |
597 | :param params: a list of six elements:
598 | - N: for savgoldiff, order of the polynomial
599 | - window_size: for savgoldiff, size of the sliding window, must be odd (if not, 1 is added)
600 | - smoothing_win: for savgoldiff, size of the window used for gaussian smoothing, a good default is window_size, but smaller for high frequnecy data
601 | - r1: covariance of the x noise
602 | - r2: covariance of the vel noise
603 | - q: covariance of the constant velocity model
604 |
605 | :type params: list (float)
606 |
607 |
608 | :param options: a dictionary indicating whether to run smoother forwards and backwards
609 | (usually achieves better estimate at end points)
610 | :type params: dict {'forwardbackward': boolean}, optional
611 |
612 | :return: a tuple consisting of:
613 |
614 | - x_hat: estimated (smoothed) x
615 | - dxdt_hat: estimated derivative of x
616 |
617 |
618 | :rtype: tuple -> (np.array, np.array)
619 | """
620 | if options is None:
621 | options = {'forwardbackward': True}
622 |
623 | N, window_size, smoothing_win, r1, r2, q = params
624 |
625 | _, sg_dxdt_hat = savgoldiff(x, dt, [N, window_size, smoothing_win])
626 |
627 | if options['forwardbackward']:
628 | x_hat_f, smooth_dxdt_hat_f = __savgol_const_accel__(x, sg_dxdt_hat, dt, [r1, r2, q],
629 | options={'backward': False})
630 | x_hat_b, smooth_dxdt_hat_b = __savgol_const_accel__(x, sg_dxdt_hat, dt, [r1, r2, q],
631 | options={'backward': True})
632 |
633 | w = np.arange(0, len(x_hat_f), 1)
634 | w = w/np.max(w)
635 |
636 | x_hat = x_hat_f*w + x_hat_b*(1-w)
637 | smooth_dxdt_hat = smooth_dxdt_hat_f*w + smooth_dxdt_hat_b*(1-w)
638 |
639 | smooth_dxdt_hat_corrected = np.mean((smooth_dxdt_hat, smooth_dxdt_hat_f), axis=0)
640 |
641 | return x_hat, smooth_dxdt_hat_corrected
642 |
643 | return __constant_acceleration__(x, dt, params, options={'backward': False})
644 |
--------------------------------------------------------------------------------
/pynumdiff/linear_model/__init__.py:
--------------------------------------------------------------------------------
1 | """This module implements interpolation-based differentiation schemes.
2 | """
3 | try:
4 | import cvxpy
5 | from ._linear_model import lineardiff
6 | except:
7 | from warnings import warn
8 | warn("""Limited Linear Model Support Detected! CVXPY is not installed.
9 | Install CVXPY to use lineardiff derivatives. You can still use other methods.""")
10 |
11 | from ._linear_model import savgoldiff, polydiff, spectraldiff
12 |
13 | __all__ = ['lineardiff', 'savgoldiff', 'polydiff', 'spectraldiff'] # So these get treated as direct members of the module by sphinx
14 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Import useful functions from the optimize module
3 | """
4 |
5 | import logging as _logging
6 | _logging.basicConfig(
7 | level=_logging.INFO,
8 | format="%(asctime)s [%(levelname)s] %(message)s",
9 | handlers=[
10 | _logging.FileHandler("debug.log"),
11 | _logging.StreamHandler()
12 | ]
13 | )
14 |
15 | try:
16 | import cvxpy
17 | __cvxpy_installed__ = True
18 | except ImportError:
19 | _logging.info( 'Limited support for total variation regularization and linear model detected!\n\
20 | ---> Some functions in pynumdiff.optimize.total_variation_regularization and require CVXPY to be installed.\n\
21 | ---> Some functions in pynumdiff.linear_model and require CVXPY to be installed.\n\
22 | You can still pynumdiff.optimize for other functions.')
23 | __cvxpy_installed__ = False
24 |
25 | from pynumdiff.optimize import finite_difference
26 | from pynumdiff.optimize import smooth_finite_difference
27 | from pynumdiff.optimize import total_variation_regularization
28 | from pynumdiff.optimize import linear_model
29 | from pynumdiff.optimize import kalman_smooth
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/__optimize__.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 | Optimization functions
4 | """
5 | from multiprocessing import Pool
6 | import scipy.optimize
7 | import numpy as np
8 |
9 | from pynumdiff.utils import utility
10 | from pynumdiff.utils import evaluate
11 |
12 |
13 | def __correct_params__(params, params_types, params_low, params_high):
14 | """
15 | :param params:
16 | :param params_types:
17 | :param params_low:
18 | :param params_high:
19 | :return:
20 | """
21 | _params = []
22 | for p, param in enumerate(params):
23 | param = params_types[p](param)
24 | param = np.max([param, params_low[p]])
25 | param = np.min([param, params_high[p]])
26 | param = params_types[p](param)
27 | _params.append(param)
28 | return _params
29 |
30 |
31 | def __objective_function__(params, *args):
32 | """
33 | :param params:
34 | :param args:
35 | :return:
36 | """
37 | func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric = args[0]
38 |
39 | # keep param in bounds and correct type
40 | params = __correct_params__(params, params_types, params_low, params_high)
41 |
42 | # estimate x and dxdt
43 | x_hat, dxdt_hat = func(x, dt, params, options)
44 |
45 | # evaluate estimate
46 | # pylint: disable=no-else-return
47 | if dxdt_truth is not None: # then minimize ||dxdt_hat - dxdt_truth||2
48 | if metric == 'rmse':
49 | rms_rec_x, rms_x, rms_dxdt = evaluate.metrics(x, dt, x_hat, dxdt_hat, x_truth=None,
50 | dxdt_truth=dxdt_truth, padding=padding)
51 | # print('rms_rec_x: ', rms_rec_x, 'tv x hat: ', utility.total_variation(x_hat))
52 | return rms_dxdt
53 | elif metric == 'error_correlation':
54 | error_correlation = evaluate.error_correlation(dxdt_hat, dxdt_truth, padding=padding)
55 | return error_correlation
56 | else:
57 | raise ValueError('metric should either be rmse or error_correlation!')
58 | else: # then minimize [ || integral(dxdt_hat) - x||2 + gamma*TV(dxdt_hat) ]
59 | # print('Optimizing with [ || integral(dxdt_hat) - x||2 + gamma*TV(dxdt_hat) ]')
60 | rms_rec_x, rms_x, rms_dxdt = evaluate.metrics(x, dt, x_hat, dxdt_hat, x_truth=None,
61 | dxdt_truth=None, padding=padding)
62 |
63 | return rms_rec_x + tvgamma*utility.total_variation(dxdt_hat[padding:-padding])
64 |
65 |
66 | def __go__(input_args):
67 | """
68 | :param input_args:
69 | :return:
70 | """
71 | paramset, args, optimization_method, optimization_options, params_types, params_low, params_high = input_args
72 | try:
73 | result = scipy.optimize.minimize(__objective_function__, paramset, args=args, method=optimization_method,
74 | options=optimization_options)
75 | p = __correct_params__(result.x, params_types, params_low, params_high)
76 | except:
77 | return __correct_params__(paramset, params_types, params_low, params_high), 1000000000
78 |
79 | return p, result.fun
80 |
81 |
82 | def __optimize__(params, args, optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}):
83 | """
84 | Find the optimal parameters for a given differentiation method. This function gets called by optimize.METHOD_FAMILY.METHOD.
85 | For example, optimize.smooth_finite_difference.butterdiff will call this function to determine the optimal parameters for butterdiff.
86 | This function is a wrapper that parallelizes the __go__ function, which is a wrapper for __objective_function__.
87 |
88 | :param params: Initial guess for params, list of guesses, or None
89 | :type params: list, list of lists, or None
90 |
91 | :param args: list of the following: function, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric
92 | - function : function to optimize parameters for, i.e. optimize.smooth_finite_difference.butterdiff
93 | - x : (np.array of floats, 1xN) time series to differentiate
94 | - dt : (float) time step
95 | - params_types : (list of types) types for each parameter, i.e. [int, int], or [int, float, float]
96 | - params_low : (list) list of lowest allowable values for each parameter
97 | - params_high : (list) list of highest allowable values for each parameter
98 | - options : (dict) options, see for example finite_difference.first_order
99 | - dxdt_truth : (like x) actual time series of the derivative of x, if known
100 | - tvgamma : (float) regularization value used to select for parameters that yield a smooth derivative. Larger value results in a smoother derivative
101 | - padding : (int) number of time steps to ignore at the beginning and end of the time series in the optimization. Larger value causes the optimization to emphasize the accuracy of dxdt in the middle of the time series
102 | - metric : (string) either 'rmse' or 'error_correlation', only applies if dxdt_truth is not None, see __objective_function__
103 |
104 | :type args: list -> (function reference, np.array, float, list, list, list, dict, np.array, float, int, string)
105 |
106 | :return: a tuple containing:
107 | - optimal parameters
108 | - optimal values of objective function
109 | :rtype: tuple -> (list, float)
110 | """
111 | func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric = args
112 | if padding == 'auto':
113 | padding = max(int(0.025*len(x)), 1)
114 | args[-2] = padding
115 |
116 | # use_cpus = int(0.6*multiprocessing.cpu_count())
117 |
118 | # minimize with multiple initial conditions
119 | if not isinstance(params[0], list):
120 | params = [[p] for p in params]
121 |
122 | all_input_values = []
123 | for paramset in params:
124 | input_args = [paramset, args, optimization_method, optimization_options,
125 | params_types, params_low, params_high]
126 | all_input_values.append(input_args)
127 |
128 | pool = Pool()
129 | result = pool.map(__go__, all_input_values)
130 | pool.close()
131 | pool.join()
132 |
133 | opt_params = []
134 | opt_vals = []
135 | for r in result:
136 | opt_params.append(r[0])
137 | opt_vals.append(r[1])
138 |
139 | opt_vals = np.array(opt_vals)
140 | opt_vals[np.where(np.isnan(opt_vals))] = 1000000000 # np.inf # avoid nans
141 | idx = np.argmin(opt_vals)
142 | opt_params = opt_params[idx]
143 |
144 | return list(opt_params), opt_vals[idx]
145 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/finite_difference/__finite_difference__.py:
--------------------------------------------------------------------------------
1 | """
2 | functions for optimizing finite difference
3 | """
4 | import pynumdiff.finite_difference
5 | from pynumdiff.optimize.__optimize__ import __optimize__
6 |
7 |
8 | def first_order(x, dt, params=None, options={'iterate': True}, dxdt_truth=None,
9 | tvgamma=1e-2, padding='auto', metric='rmse'):
10 | """
11 |
12 | Optimize the parameters for pynumdiff.finite_difference.first_order
13 | See pynumdiff.optimize.__optimize__ and pynumdiff.finite_difference.first_order for detailed documentation.
14 |
15 | """
16 | # initial condition
17 | if params is None:
18 | if not options['iterate']:
19 | return []
20 |
21 | params = [[5], [10], [30], [50]]
22 |
23 | # param types and bounds
24 | params_types = [int, ]
25 | params_low = [1, ]
26 | params_high = [1e3, ]
27 |
28 | # function
29 | func = pynumdiff.finite_difference.first_order
30 |
31 | # optimize
32 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
33 | opt_params, opt_val = __optimize__(params, args)
34 |
35 | return opt_params, opt_val
36 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/finite_difference/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | import useful functions from __finite_difference__
3 | """
4 | from pynumdiff.optimize.finite_difference.__finite_difference__ import first_order
5 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/kalman_smooth/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | import useful functions from __kalman_smooth__
3 | """
4 | from pynumdiff.optimize.kalman_smooth.__kalman_smooth__ import constant_velocity
5 | from pynumdiff.optimize.kalman_smooth.__kalman_smooth__ import constant_acceleration
6 | from pynumdiff.optimize.kalman_smooth.__kalman_smooth__ import constant_jerk
7 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/kalman_smooth/__kalman_smooth__.py:
--------------------------------------------------------------------------------
1 | """
2 | functions for optimizing kalman smoothing approaches
3 | """
4 | import numpy as np
5 | import pynumdiff
6 | from pynumdiff.optimize.__optimize__ import __optimize__
7 | from pynumdiff.linear_model import polydiff
8 |
9 | ####################
10 | # Helper functions #
11 | ####################
12 |
13 |
14 | def __estimate_noise__(x, dt, window_size=50):
15 | """
16 | :param x:
17 | :param dt:
18 | :param window_size:
19 | :return:
20 | """
21 | x_hat, dxdt_hat = polydiff(x, dt, [5, window_size],
22 | options={'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'})
23 | noise_hat = x-x_hat
24 | noise_std_hat = np.std(noise_hat)
25 | return noise_std_hat**2
26 |
27 |
28 | def __optimize_kalman__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
29 | optimization_method, optimization_options, metric):
30 | """
31 | :param func:
32 | :param x:
33 | :param dt:
34 | :param params:
35 | :param options:
36 | :param dxdt_truth:
37 | :param tvgamma:
38 | :param padding:
39 | :param optimization_method:
40 | :param optimization_options:
41 | :param metric:
42 | :return:
43 | """
44 | # initial condition
45 | # r = estimate_noise__(x, dt) # estimate noise using a 5th order sliding polynomial smoother
46 | if params is None:
47 | rs = [1e-8, 1e-4, 1e-1, 1e1, 1e4, 1e8]
48 | qs = [1e-8, 1e-4, 1e-1, 1e1, 1e4, 1e8]
49 | params = []
50 | for r in rs:
51 | for q in qs:
52 | params.append([r, q])
53 |
54 | # param types and bounds
55 | params_types = [float, float]
56 | params_low = [1e-10, 1e-10]
57 | params_high = [1e10, 1e10]
58 |
59 | # optimize
60 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
61 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
62 | optimization_options=optimization_options)
63 |
64 | return opt_params, opt_val
65 |
66 | ######################
67 | # Optimize functions #
68 | ######################
69 |
70 |
71 | def constant_velocity(x, dt, params=None, options={'forwardbackward': True}, dxdt_truth=None, tvgamma=1e-2,
72 | padding='auto', optimization_method='Nelder-Mead', optimization_options={'maxiter': 10},
73 | metric='rmse'):
74 | """
75 | Optimize the parameters for pynumdiff.kalman_smooth.constant_velocity
76 | See pynumdiff.optimize.__optimize__ and pynumdiff.kalman_smooth.constant_velocity for detailed documentation.
77 | """
78 |
79 | # optimize
80 | func = pynumdiff.kalman_smooth.constant_velocity
81 | opt_params, opt_val = __optimize_kalman__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
82 | optimization_method, optimization_options, metric)
83 |
84 | return opt_params, opt_val
85 |
86 |
87 | def constant_acceleration(x, dt, params=None, options={'forwardbackward': True}, dxdt_truth=None, tvgamma=1e-2,
88 | padding='auto', optimization_method='Nelder-Mead',
89 | optimization_options={'maxiter': 10}, metric='rmse'):
90 | """
91 | Optimize the parameters for pynumdiff.kalman_smooth.constant_acceleration
92 | See pynumdiff.optimize.__optimize__ and pynumdiff.kalman_smooth.constant_acceleration for detailed documentation.
93 | """
94 |
95 | # optimize
96 | func = pynumdiff.kalman_smooth.constant_acceleration
97 | opt_params, opt_val = __optimize_kalman__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
98 | optimization_method, optimization_options, metric)
99 |
100 | return opt_params, opt_val
101 |
102 |
103 | def constant_jerk(x, dt, params=None, options={'forwardbackward': True}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
104 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 10}, metric='rmse'):
105 | """
106 | Optimize the parameters for pynumdiff.kalman_smooth.constant_jerk
107 | See pynumdiff.optimize.__optimize__ and pynumdiff.kalman_smooth.constant_jerk for detailed documentation.
108 | """
109 | # optimize
110 | func = pynumdiff.kalman_smooth.constant_jerk
111 | opt_params, opt_val = __optimize_kalman__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
112 | optimization_method, optimization_options, metric)
113 |
114 | return opt_params, opt_val
115 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/linear_model/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | import useful functions from __linear_model__
3 | """
4 | from pynumdiff.optimize.linear_model.__linear_model__ import savgoldiff
5 | from pynumdiff.optimize.linear_model.__linear_model__ import spectraldiff
6 | from pynumdiff.optimize.linear_model.__linear_model__ import polydiff
7 | from pynumdiff.optimize.linear_model.__linear_model__ import chebydiff
8 | from pynumdiff.optimize.linear_model.__linear_model__ import lineardiff
9 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/linear_model/__linear_model__.py:
--------------------------------------------------------------------------------
1 | """
2 | functions for optimizing linear models
3 | """
4 | import pynumdiff.linear_model
5 | from pynumdiff.optimize.__optimize__ import __optimize__
6 |
7 |
8 | def spectraldiff(x, dt, params=None, options={'even_extension': True, 'pad_to_zero_dxdt': True}, dxdt_truth=None,
9 | tvgamma=1e-2, padding='auto', optimization_method='Nelder-Mead',
10 | optimization_options={'maxiter': 10}, metric='rmse'):
11 | """
12 | Optimize the parameters for pynumdiff.linear_model.spectraldiff
13 | See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.spectraldiff for detailed documentation.
14 | """
15 | # initial condition
16 | if params is None:
17 | params = [[1e-3], [5e-2], [1e-2], [5e-2], [1e-1]]
18 |
19 | # param types and bounds
20 | params_types = [float, ]
21 | params_low = [1e-5, ]
22 | params_high = [1-1e-5, ]
23 |
24 | # optimize
25 | func = pynumdiff.linear_model.spectraldiff
26 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
27 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
28 | optimization_options=optimization_options)
29 |
30 | return opt_params, opt_val
31 |
32 |
33 | def polydiff(x, dt, params=None, options={'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'},
34 | dxdt_truth=None, tvgamma=1e-2, padding='auto', optimization_method='Nelder-Mead',
35 | optimization_options={'maxiter': 10}, metric='rmse'):
36 | """
37 | Optimize the parameters for pynumdiff.linear_model.polydiff
38 | See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.polydiff for detailed documentation.
39 | """
40 | # initial condition
41 | if params is None:
42 | orders = [2, 3, 5, 7]
43 | if options['sliding']:
44 | window_sizes = [10, 30, 50, 90, 130]
45 | params = []
46 | for order in orders:
47 | for window_size in window_sizes:
48 | params.append([order, window_size])
49 | else:
50 | params = []
51 | for order in orders:
52 | params.append([order])
53 |
54 | # param types and bounds
55 | if options['sliding']:
56 | params_types = [int, int]
57 | params_low = [1, 10]
58 | params_high = [8, 1e3]
59 | else:
60 | params_types = [int, ]
61 | params_low = [1, ]
62 | params_high = [8, ]
63 |
64 | # optimize
65 | func = pynumdiff.linear_model.polydiff
66 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
67 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
68 | optimization_options=optimization_options)
69 |
70 | return opt_params, opt_val
71 |
72 |
73 | def savgoldiff(x, dt, params=None, options={}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
74 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 10}, metric='rmse'):
75 | """
76 | Optimize the parameters for pynumdiff.linear_model.savgoldiff
77 | See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.savgoldiff for detailed documentation.
78 | """
79 | # initial condition
80 | if params is None:
81 | orders = [2, 3, 5, 7, 9, 11, 13]
82 | window_sizes = [3, 10, 30, 50, 90, 130, 200, 300]
83 | smoothing_wins = [3, 10, 30, 50, 90, 130, 200, 300]
84 | params = []
85 | for order in orders:
86 | for window_size in window_sizes:
87 | for smoothing_win in smoothing_wins:
88 | params.append([order, window_size, smoothing_win])
89 |
90 | # param types and bounds
91 | params_types = [int, int, int]
92 | params_low = [1, 3, 3]
93 | params_high = [12, 1e3, 1e3]
94 |
95 | # optimize
96 | func = pynumdiff.linear_model.savgoldiff
97 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
98 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
99 | optimization_options=optimization_options)
100 |
101 | return opt_params, opt_val
102 |
103 |
104 | def chebydiff(x, dt, params=None, options={'sliding': True, 'step_size': 1, 'kernel_name': 'friedrichs'},
105 | dxdt_truth=None, tvgamma=1e-2, padding='auto', optimization_method='Nelder-Mead',
106 | optimization_options={'maxiter': 10}, metric='rmse'):
107 | """
108 | Optimize the parameters for pynumdiff.linear_model.chebydiff
109 | See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.chebydiff for detailed documentation.
110 | """
111 | # initial condition
112 | if params is None:
113 | orders = [3, 5, 7, 9]
114 | if options['sliding']:
115 | window_sizes = [10, 30, 50, 90, 130]
116 | params = []
117 | for order in orders:
118 | for window_size in window_sizes:
119 | params.append([order, window_size])
120 | else:
121 | params = []
122 | for order in orders:
123 | params.append([order])
124 |
125 | # param types and bounds
126 | if options['sliding']:
127 | params_types = [int, int]
128 | params_low = [1, 10]
129 | params_high = [10, 1e3]
130 | else:
131 | params_types = [int, ]
132 | params_low = [1, ]
133 | params_high = [10, ]
134 |
135 | # optimize
136 | func = pynumdiff.linear_model.chebydiff
137 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
138 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
139 | optimization_options=optimization_options)
140 |
141 | return opt_params, opt_val
142 |
143 |
144 | def lineardiff(x, dt, params=None, options={'sliding': True, 'step_size': 10, 'kernel_name': 'gaussian'},
145 | dxdt_truth=None, tvgamma=1e-2, padding='auto', optimization_method='Nelder-Mead',
146 | optimization_options={'maxiter': 10}, metric='rmse'):
147 | """
148 | Optimize the parameters for pynumdiff.linear_model.lineardiff
149 | See pynumdiff.optimize.__optimize__ and pynumdiff.linear_model.lineardiff for detailed documentation.
150 | """
151 | # initial condition
152 | if params is None:
153 | Ns = [3]
154 | gammas = [1e-1, 1, 10, 100]
155 | if options['sliding']:
156 | window_sizes = [10, 30, 50, 90, 130]
157 | params = []
158 | for N in Ns:
159 | for gamma in gammas:
160 | for window_size in window_sizes:
161 | params.append([N, gamma, window_size])
162 | else:
163 | params = []
164 | for N in Ns:
165 | for gamma in gammas:
166 | params.append([N, gamma])
167 |
168 | # param types and bounds
169 | if options['sliding']:
170 | params_types = [int, float, int]
171 | params_low = [3, 1e-3, 15]
172 | params_high = [3, 1e3, 1e3]
173 | else:
174 | params_types = [int, float, ]
175 | params_low = [3, 1e-3, ]
176 | params_high = [3, 1e3, ]
177 |
178 | # optimize
179 | func = pynumdiff.linear_model.lineardiff
180 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
181 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
182 | optimization_options=optimization_options)
183 |
184 | return opt_params, opt_val
185 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/smooth_finite_difference/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | import useful functions from __smooth_finite_difference__
3 | """
4 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import mediandiff
5 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import meandiff
6 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import gaussiandiff
7 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import friedrichsdiff
8 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import butterdiff
9 | from pynumdiff.optimize.smooth_finite_difference.__smooth_finite_difference__ import splinediff
10 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/smooth_finite_difference/__smooth_finite_difference__.py:
--------------------------------------------------------------------------------
1 | """
2 | functions for optimizing smoothed finite difference
3 | """
4 | import pynumdiff.smooth_finite_difference
5 | from pynumdiff.optimize.__optimize__ import __optimize__
6 |
7 | ####################
8 | # Helper functions #
9 | ####################
10 |
11 |
12 | def __kerneldiff__(func, x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2,
13 | padding='auto', metric='rmse'):
14 | """
15 | :param func:
16 | :param x:
17 | :param dt:
18 | :param params:
19 | :param options:
20 | :param dxdt_truth:
21 | :param tvgamma:
22 | :param padding:
23 | :param metric:
24 | :return:
25 | """
26 | # initial condition
27 | if params is None:
28 | if options['iterate'] is False:
29 | params = [[5], [15], [30], [50]]
30 | else:
31 | window_sizes = [5, 15, 30, 50]
32 | iterations = [1, 5, 10]
33 | params = []
34 | for window_size in window_sizes:
35 | for iteration in iterations:
36 | params.append([window_size, iteration])
37 |
38 | # param types and bounds
39 | if options['iterate'] is False:
40 | params_types = [int, ]
41 | params_low = [1, ]
42 | params_high = [1e6, ]
43 | else:
44 | params_types = [int, int]
45 | params_low = [1, 1]
46 | params_high = [len(x)-1, 1e2]
47 |
48 | # optimize
49 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
50 | opt_params, opt_val = __optimize__(params, args)
51 |
52 | return opt_params, opt_val
53 |
54 | ######################
55 | # Optimize functions #
56 | ######################
57 |
58 |
59 | def mediandiff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2,
60 | padding='auto', metric='rmse'):
61 | """
62 | Optimize the parameters for pynumdiff.smooth_finite_difference.mediandiff
63 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.mediandiff for detailed documentation.
64 | """
65 | func = pynumdiff.smooth_finite_difference.mediandiff
66 | opt_params, opt_val = __kerneldiff__(func, x, dt, params, options, dxdt_truth, tvgamma, padding, metric)
67 | return opt_params, opt_val
68 |
69 |
70 | def meandiff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2, padding='auto', metric='rmse'):
71 | """
72 | Optimize the parameters for pynumdiff.smooth_finite_difference.meandiff
73 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.meandiff for detailed documentation.
74 | """
75 | func = pynumdiff.smooth_finite_difference.meandiff
76 | opt_params, opt_val = __kerneldiff__(func, x, dt, params, options, dxdt_truth, tvgamma, padding, metric)
77 | return opt_params, opt_val
78 |
79 |
80 | def gaussiandiff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2,
81 | padding='auto', metric='rmse'):
82 | """
83 | Optimize the parameters for pynumdiff.smooth_finite_difference.gaussiandiff
84 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.gaussiandiff for detailed documentation.
85 | """
86 | func = pynumdiff.smooth_finite_difference.gaussiandiff
87 | opt_params, opt_val = __kerneldiff__(func, x, dt, params, options, dxdt_truth, tvgamma, padding, metric)
88 | return opt_params, opt_val
89 |
90 |
91 | def friedrichsdiff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2,
92 | padding='auto', metric='rmse'):
93 | """
94 | Optimize the parameters for pynumdiff.smooth_finite_difference.friedrichsdiff
95 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.friedrichsdiff for detailed documentation.
96 | """
97 | func = pynumdiff.smooth_finite_difference.friedrichsdiff
98 | opt_params, opt_val = __kerneldiff__(func, x, dt, params, options, dxdt_truth, tvgamma, padding, metric)
99 | return opt_params, opt_val
100 |
101 |
102 | def butterdiff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
103 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
104 | """
105 | Optimize the parameters for pynumdiff.smooth_finite_difference.butterdiff
106 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.butterdiff for detailed documentation.
107 | """
108 | # initial condition
109 | if params is None:
110 | ns = [1, 2, 3, 4, 5, 6, 7]
111 | wns = [0.0001, 0.001, 0.005, 0.01, 0.1, 0.5]
112 | if options['iterate'] is False:
113 | params = []
114 | for n in ns:
115 | for wn in wns:
116 | params.append([n, wn])
117 | else:
118 | iterations = [1, 5, 10]
119 | params = []
120 | for n in ns:
121 | for wn in wns:
122 | for i in iterations:
123 | params.append([n, wn, i])
124 |
125 | # param types and bounds
126 | if options['iterate'] is False:
127 | params_types = [int, float]
128 | params_low = [1, 1e-4]
129 | params_high = [10, 1-1e-2]
130 | else:
131 | params_types = [int, float, int]
132 | params_low = [3, 1e-4, 1]
133 | params_high = [10, 1, 1e3]
134 |
135 | # optimize
136 | func = pynumdiff.smooth_finite_difference.butterdiff
137 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
138 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
139 | optimization_options=optimization_options)
140 |
141 | return opt_params, opt_val
142 |
143 |
144 | def splinediff(x, dt, params=None, options={'iterate': False}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
145 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
146 | """
147 | Optimize the parameters for pynumdiff.smooth_finite_difference.splinediff
148 | See pynumdiff.optimize.__optimize__ and pynumdiff.smooth_finite_difference.splinediff for detailed documentation.
149 | """
150 | # initial condition
151 | if params is None:
152 | ks = [3, 5]
153 | ss = [0.5, 0.9, 0.95, 1, 10, 100]
154 | if options['iterate'] is False:
155 | params = []
156 | for s in ss:
157 | for k in ks:
158 | params.append([k, s])
159 | else:
160 | iterations = [1, 5, 10]
161 | params = []
162 | for s in ss:
163 | for k in ks:
164 | for i in iterations:
165 | params.append([k, s, i])
166 |
167 | # param types and bounds
168 | if options['iterate'] is False:
169 | params_types = [int, float]
170 | params_low = [3, 1e-2]
171 | params_high = [5, 1e6]
172 | else:
173 | params_types = [int, float, int]
174 | params_low = [3, 1e-2, 1]
175 | params_high = [5, 1e6, 10]
176 |
177 | # optimize
178 | func = pynumdiff.smooth_finite_difference.splinediff
179 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
180 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
181 | optimization_options=optimization_options)
182 |
183 | return opt_params, opt_val
184 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/total_variation_regularization/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | import useful functions from __total_variation_regularization__
3 | """
4 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import iterative_velocity
5 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import velocity
6 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import acceleration
7 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import jerk
8 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import jerk_sliding
9 | from pynumdiff.optimize.total_variation_regularization.__total_variation_regularization__ import smooth_acceleration
10 |
--------------------------------------------------------------------------------
/pynumdiff/optimize/total_variation_regularization/__total_variation_regularization__.py:
--------------------------------------------------------------------------------
1 | """
2 | functions for optimizing total variation regularization
3 | """
4 | import pynumdiff.total_variation_regularization
5 | from pynumdiff.optimize.__optimize__ import __optimize__
6 |
7 | ####################
8 | # Helper functions #
9 | ####################
10 |
11 |
12 | def __optimize_tvr__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
13 | optimization_method, optimization_options, metric):
14 | """
15 | :param func:
16 | :param x:
17 | :param dt:
18 | :param params:
19 | :param options:
20 | :param dxdt_truth:
21 | :param tvgamma:
22 | :param padding:
23 | :param optimization_method:
24 | :param optimization_options:
25 | :param metric:
26 | :return:
27 | """
28 | # initial condition
29 | if params is None:
30 | params = [[1e-2], [1e-1], [1], [10], [100], [1000]]
31 |
32 | # param types and bounds
33 | params_types = [float, ]
34 | params_low = [1e-4, ]
35 | params_high = [1e7, ]
36 |
37 | # optimize
38 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
39 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
40 | optimization_options=optimization_options)
41 |
42 | return opt_params, opt_val
43 |
44 | ######################
45 | # Optimize functions #
46 | ######################
47 |
48 |
49 | def iterative_velocity(x, dt, params=None, options={'cg_maxiter': 1000, 'scale': 'small'},
50 | dxdt_truth=None, tvgamma=1e-2, padding='auto',
51 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
52 | """
53 | Optimize the parameters for pynumdiff.total_variation_regularization.iterative_velocity
54 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.iterative_velocity for detailed documentation.
55 | """
56 |
57 | # optimize
58 | func = pynumdiff.total_variation_regularization.iterative_velocity
59 |
60 | # initial condition
61 | if params is None:
62 | params = [[1, 1e-2], [1, 1e-1], [1, 1], [1, 10], [1, 100], [1, 1000]]
63 |
64 | # param types and bounds
65 | params_types = [int, float, ]
66 | params_low = [1, 1e-4, ]
67 | params_high = [100, 1e7, ]
68 |
69 | # optimize
70 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
71 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
72 | optimization_options=optimization_options)
73 |
74 | return opt_params, opt_val
75 |
76 |
77 | def velocity(x, dt, params=None, options={'solver': 'MOSEK'}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
78 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
79 | """
80 | Optimize the parameters for pynumdiff.total_variation_regularization.velocity
81 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.velocity for detailed documentation.
82 | """
83 |
84 | # optimize
85 | func = pynumdiff.total_variation_regularization.velocity
86 | opt_params, opt_val = __optimize_tvr__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
87 | optimization_method, optimization_options, metric)
88 |
89 | return opt_params, opt_val
90 |
91 |
92 | def acceleration(x, dt, params=None, options={'solver': 'MOSEK'}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
93 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
94 | """
95 | Optimize the parameters for pynumdiff.total_variation_regularization.acceleration
96 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.acceleration for detailed documentation.
97 | """
98 |
99 | # optimize
100 | func = pynumdiff.total_variation_regularization.acceleration
101 | opt_params, opt_val = __optimize_tvr__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
102 | optimization_method, optimization_options, metric)
103 |
104 | return opt_params, opt_val
105 |
106 |
107 | def jerk(x, dt, params=None, options={'solver': 'MOSEK'}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
108 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
109 | """
110 | Optimize the parameters for pynumdiff.total_variation_regularization.jerk
111 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.jerk for detailed documentation.
112 | """
113 | # optimize
114 | function = pynumdiff.total_variation_regularization.jerk
115 | opt_params, opt_val = __optimize_tvr__(function, x, dt, params, options, dxdt_truth, tvgamma, padding,
116 | optimization_method, optimization_options, metric)
117 |
118 | return opt_params, opt_val
119 |
120 |
121 | def jerk_sliding(x, dt, params=None, options={'solver': 'MOSEK'}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
122 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
123 | """
124 | Optimize the parameters for pynumdiff.total_variation_regularization.jerk_sliding
125 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.jerk_sliding for detailed documentation.
126 | """
127 |
128 | # optimize
129 | func = pynumdiff.total_variation_regularization.jerk_sliding
130 | opt_params, opt_val = __optimize_tvr__(func, x, dt, params, options, dxdt_truth, tvgamma, padding,
131 | optimization_method, optimization_options, metric)
132 |
133 | return opt_params, opt_val
134 |
135 |
136 | def smooth_acceleration(x, dt, params=None, options={'solver': 'MOSEK'}, dxdt_truth=None, tvgamma=1e-2, padding='auto',
137 | optimization_method='Nelder-Mead', optimization_options={'maxiter': 20}, metric='rmse'):
138 | """
139 | Optimize the parameters for pynumdiff.total_variation_regularization.smooth_acceleration
140 | See pynumdiff.optimize.__optimize__ and pynumdiff.total_variation_regularization.smooth_acceleration for detailed documentation.
141 | """
142 | # initial condition
143 | if params is None:
144 | gammas = [1e-2, 1e-1, 1, 10, 100, 1000]
145 | window_sizes = [1, 10, 30, 50, 90, 130]
146 | params = []
147 | for gamma in gammas:
148 | for window_size in window_sizes:
149 | params.append([gamma, window_size])
150 |
151 | # param types and bounds
152 | params_types = [int, int]
153 | params_low = [1e-4, 1]
154 | params_high = [1e7, 1e3]
155 |
156 | # optimize
157 | func = pynumdiff.total_variation_regularization.smooth_acceleration
158 | args = [func, x, dt, params_types, params_low, params_high, options, dxdt_truth, tvgamma, padding, metric]
159 | opt_params, opt_val = __optimize__(params, args, optimization_method=optimization_method,
160 | optimization_options=optimization_options)
161 |
162 | return opt_params, opt_val
163 |
--------------------------------------------------------------------------------
/pynumdiff/smooth_finite_difference/__init__.py:
--------------------------------------------------------------------------------
1 | """Apply smoothing method before finite difference.
2 | """
3 | from ._smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff
4 |
5 | __all__ = ['mediandiff', 'meandiff', 'gaussiandiff', 'friedrichsdiff', 'butterdiff', 'splinediff'] # So these get treated as direct members of the module by sphinx
6 |
--------------------------------------------------------------------------------
/pynumdiff/smooth_finite_difference/_smooth_finite_difference.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.signal
3 |
4 | # included code
5 | from pynumdiff.finite_difference import first_order as finite_difference
6 | from pynumdiff.utils import utility
7 |
8 |
9 | ################################
10 | # Smoothing finite differences #
11 | ################################
12 | def mediandiff(x, dt, params, options={}):
13 | """
14 | Perform median smoothing using scipy.signal.medfilt
15 | followed by first order finite difference
16 |
17 | :param x: array of time series to differentiate
18 | :type x: np.array (float)
19 |
20 | :param dt: time step size
21 | :type dt: float
22 |
23 | :param params: filter window size
24 | :type params: list (int) or int
25 |
26 | :param options: an empty dictionary or a dictionary with 1 key value pair
27 |
28 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
29 |
30 | :type options: dict {'iterate': (boolean)}
31 |
32 | :return: a tuple consisting of:
33 |
34 | - x_hat: estimated (smoothed) x
35 | - dxdt_hat: estimated derivative of x
36 |
37 |
38 | :rtype: tuple -> (np.array, np.array)
39 | """
40 |
41 | if 'iterate' in options.keys() and options['iterate'] is True:
42 | window_size, iterations = params
43 | else:
44 | iterations = 1
45 | if isinstance(params, list):
46 | window_size = params[0]
47 | else:
48 | window_size = params
49 |
50 | if not window_size % 2:
51 | window_size += 1 # assert window_size % 2 == 1 # is odd
52 |
53 | x_hat = x
54 | for _ in range(iterations):
55 | x_hat = scipy.signal.medfilt(x_hat, window_size)
56 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
57 |
58 | return x_hat, dxdt_hat
59 |
60 |
61 | def meandiff(x, dt, params, options={}):
62 | """
63 | Perform mean smoothing by convolving mean kernel with x
64 | followed by first order finite difference
65 |
66 | :param np.ndarray[float] x: array of time series to differentiate
67 | :param float dt: time step size
68 |
69 | :param params: [filter_window_size] or if 'iterate' in options:
70 | [filter_window_size, num_iterations]
71 |
72 | :type params: list (int)
73 |
74 | :param options: an empty dictionary or a dictionary with 1 key value pair
75 |
76 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
77 |
78 | :type options: dict {'iterate': (boolean)}
79 |
80 | :return: a tuple consisting of:
81 |
82 | - x_hat: estimated (smoothed) x
83 | - dxdt_hat: estimated derivative of x
84 |
85 |
86 | :rtype: tuple -> (np.array, np.array)
87 | """
88 |
89 | if 'iterate' in options.keys() and options['iterate'] is True:
90 | window_size, iterations = params
91 | else:
92 | iterations = 1
93 | if isinstance(params, list):
94 | window_size = params[0]
95 | else:
96 | window_size = params
97 |
98 | kernel = utility._mean_kernel(window_size)
99 | x_hat = utility.convolutional_smoother(x, kernel, iterations)
100 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
101 |
102 | return x_hat, dxdt_hat
103 |
104 |
105 | def gaussiandiff(x, dt, params, options={}):
106 | """
107 | Perform gaussian smoothing by convolving gaussian kernel with x
108 | followed by first order finite difference
109 |
110 | :param x: array of time series to differentiate
111 | :type x: np.array (float)
112 |
113 | :param dt: time step size
114 | :type dt: float
115 |
116 | :param params: [filter_window_size] or if 'iterate' in options:
117 | [filter_window_size, num_iterations]
118 |
119 | :type params: list (int)
120 |
121 | :param options: an empty dictionary or a dictionary with 1 key value pair
122 |
123 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
124 |
125 | :type options: dict {'iterate': (boolean)}
126 |
127 | :return: a tuple consisting of:
128 |
129 | - x_hat: estimated (smoothed) x
130 | - dxdt_hat: estimated derivative of x
131 |
132 |
133 | :rtype: tuple -> (np.array, np.array)
134 | """
135 | if 'iterate' in options.keys() and options['iterate'] is True:
136 | window_size, iterations = params
137 | else:
138 | iterations = 1
139 | if isinstance(params, list):
140 | window_size = params[0]
141 | else:
142 | window_size = params
143 |
144 | kernel = utility._gaussian_kernel(window_size)
145 | x_hat = utility.convolutional_smoother(x, kernel, iterations)
146 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
147 |
148 | return x_hat, dxdt_hat
149 |
150 |
151 | def friedrichsdiff(x, dt, params, options={}):
152 | """
153 | Perform friedrichs smoothing by convolving friedrichs kernel with x
154 | followed by first order finite difference
155 |
156 | :param x: array of time series to differentiate
157 | :type x: np.array (float)
158 |
159 | :param dt: time step size
160 | :type dt: float
161 |
162 | :param params: [filter_window_size] or if 'iterate' in options:
163 | [filter_window_size, num_iterations]
164 |
165 | :type params: list (int)
166 |
167 | :param options: an empty dictionary or a dictionary with 1 key value pair
168 |
169 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
170 |
171 | :type options: dict {'iterate': (boolean)}
172 |
173 | :return: a tuple consisting of:
174 |
175 | - x_hat: estimated (smoothed) x
176 | - dxdt_hat: estimated derivative of x
177 |
178 |
179 | :rtype: tuple -> (np.array, np.array)
180 | """
181 |
182 | if 'iterate' in options.keys() and options['iterate'] is True:
183 | window_size, iterations = params
184 | else:
185 | iterations = 1
186 | if isinstance(params, list):
187 | window_size = params[0]
188 | else:
189 | window_size = params
190 |
191 | kernel = utility._friedrichs_kernel(window_size)
192 | x_hat = utility.convolutional_smoother(x, kernel, iterations)
193 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
194 |
195 | return x_hat, dxdt_hat
196 |
197 |
198 | def butterdiff(x, dt, params, options={}):
199 | """
200 | Perform butterworth smoothing on x with scipy.signal.filtfilt
201 | followed by first order finite difference
202 |
203 | :param x: array of time series to differentiate
204 | :type x: np.array (float)
205 |
206 | :param dt: time step size
207 | :type dt: float
208 |
209 | :param params: [n, wn], n = order of the filter; wn = Cutoff frequency.
210 | For a discrete timeseries, the value is normalized to the range 0-1,
211 | where 1 is the Nyquist frequency.
212 |
213 | :type params: list (int)
214 |
215 | :param options: an empty dictionary or a dictionary with 2 key value pair
216 |
217 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
218 | - 'padmethod': "pad" or "gust", see scipy.signal.filtfilt
219 |
220 | :type options: dict {'iterate': (boolean), 'padmethod': string}
221 |
222 | :return: a tuple consisting of:
223 |
224 | - x_hat: estimated (smoothed) x
225 | - dxdt_hat: estimated derivative of x
226 |
227 |
228 | :rtype: tuple -> (np.array, np.array)
229 | """
230 | if 'iterate' in options.keys() and options['iterate'] is True:
231 | n, wn, iterations = params
232 | else:
233 | iterations = 1
234 | n, wn = params
235 |
236 | b, a = scipy.signal.butter(n, wn)
237 |
238 | x_hat = x
239 | for _ in range(iterations):
240 | if len(x) < 9:
241 | x_hat = scipy.signal.filtfilt(b, a, x_hat, method="pad", padlen=len(x)-1)
242 | else:
243 | x_hat = scipy.signal.filtfilt(b, a, x_hat, method="pad")
244 |
245 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
246 |
247 | offset = np.mean(x) - np.mean(x_hat)
248 | x_hat = x_hat + offset
249 |
250 | return x_hat, dxdt_hat
251 |
252 |
253 | def splinediff(x, dt, params, options={}):
254 | """
255 | Perform spline smoothing on x with scipy.interpolate.UnivariateSpline
256 | followed by first order finite difference
257 |
258 | :param x: array of time series to differentiate
259 | :type x: np.array (float)
260 |
261 | :param dt: time step size
262 | :type dt: float
263 |
264 | :param params: [k, s], k: Order of the spline. A kth order spline can be differentiated k times.
265 | s: Positive smoothing factor used to choose the number of knots.
266 | Number of knots will be increased until the smoothing condition is satisfied:
267 | sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
268 |
269 | :type params: list (int)
270 |
271 | :param options: an empty dictionary or a dictionary with 1 key value pair
272 |
273 | - 'iterate': whether to run multiple iterations of the smoother. Note: iterate does nothing for median smoother.
274 |
275 | :type options: dict {'iterate': (boolean)}
276 |
277 | :return: a tuple consisting of:
278 |
279 | - x_hat: estimated (smoothed) x
280 | - dxdt_hat: estimated derivative of x
281 |
282 |
283 | :rtype: tuple -> (np.array, np.array)
284 | """
285 | if 'iterate' in options.keys() and options['iterate'] is True:
286 | k, s, iterations = params
287 | else:
288 | iterations = 1
289 | k, s = params
290 |
291 | t = np.arange(0, len(x)*dt, dt)
292 |
293 | x_hat = x
294 | for _ in range(iterations):
295 | spline = scipy.interpolate.UnivariateSpline(t, x_hat, k=k, s=s)
296 | x_hat = spline(t)
297 |
298 | x_hat, dxdt_hat = finite_difference(x_hat, dt)
299 |
300 | return x_hat, dxdt_hat
301 |
--------------------------------------------------------------------------------
/pynumdiff/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/florisvb/PyNumDiff/b171ac5582732c703f24976e01651730a146f50c/pynumdiff/tests/__init__.py
--------------------------------------------------------------------------------
/pynumdiff/tests/test_finite_difference.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for finite difference methods
3 | """
4 | # pylint: skip-file
5 | import numpy as np
6 | from pynumdiff import first_order, second_order
7 |
8 |
9 | def test_first_order_1():
10 | x = np.array([1, 2, 3, 4, 5])
11 | dt = 0.01
12 | dxdt = np.array([100, 100, 100, 100, 100])
13 | _, dxdt_hat = first_order(x, dt)
14 | np.testing.assert_array_equal(dxdt_hat, dxdt)
15 |
16 | def test_first_order_2():
17 | x = np.array([8, 3, 14, 0, 9])
18 | dt = 0.01
19 | dxdt = np.array([-500, 300, -150, -250, 900])
20 | _, dxdt_hat = first_order(x, dt)
21 | np.testing.assert_array_equal(dxdt_hat, dxdt)
22 |
23 | def test_first_order_iterative():
24 | x = np.random.rand(100)
25 | dt = 0.01
26 | params = [100]
27 | _, dxdt_hat = first_order(x, dt, params, options={'iterate': True})
28 | assert x.shape == dxdt_hat.shape
29 |
30 | def test_second_order_1():
31 | x = np.array([1, 2, 3, 4, 5])
32 | dt = 0.01
33 | dxdt = np.array([100, 100, 100, 100, 100])
34 | _, dxdt_hat = second_order(x, dt)
35 | np.testing.assert_array_equal(dxdt_hat, dxdt)
36 |
37 | def test_second_order_2():
38 | x = np.array([8, 3, 14, 0, 9])
39 | dt = 0.01
40 | dxdt = np.array([-1300, 300, -150, -250, 2050])
41 | _, dxdt_hat = second_order(x, dt)
42 | np.testing.assert_array_equal(dxdt_hat, dxdt)
43 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_kalman_smooth.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for kalman smoothing methods
3 | """
4 | # pylint: skip-file
5 |
6 | import numpy as np
7 | from pynumdiff.kalman_smooth import constant_velocity, constant_acceleration, \
8 | constant_jerk, known_dynamics
9 |
10 |
11 | x = np.array([1., 4., 9., 3., 20.,
12 | 8., 16., 2., 15., 10.,
13 | 15., 3., 5., 7., 4.])
14 | dt = 0.01
15 |
16 |
17 | def test_constant_velocity():
18 | params = [1e-4, 1e-5]
19 | x_hat, dxdt_hat = constant_velocity(x, dt, params)
20 | x_smooth = np.array([7.952849, 7.714494, 7.769948, 7.81768, 8.330625, 8.332996,
21 | 8.46594, 8.243244, 8.458473, 8.367324, 8.284892, 7.947729,
22 | 7.998362, 8.123646, 8.303191])
23 | dxdt = np.array([88.750804, 93.567378, 102.828004, 62.994815, 92.01605,
24 | 60.395089, 47.494064, 27.626483, 21.537133, 14.105156,
25 | 8.138253, 7.996629, 4.016616, -0.1122, 2.319358])
26 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
27 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
28 |
29 | def test_constant_acceleration():
30 | params = [1e-4, 1e-2]
31 | x_hat, dxdt_hat = constant_acceleration(x, dt, params)
32 | x_smooth = np.array([5.069524, 6.137091, 7.191819, 8.062104, 9.112695, 9.616349,
33 | 10.029422, 9.945811, 10.05048, 9.703503, 9.180588, 8.309991,
34 | 7.546839, 6.581819, 5.421122])
35 | dxdt = np.array([170.225553, 164.483647, 156.524187, 103.452558, 113.776639,
36 | 64.258467, 33.813842, -1.889904, -25.372839, -48.272303,
37 | -69.60202, -81.885049, -101.379641, -122.551681, -140.214842])
38 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
39 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
40 |
41 | def test_constant_jerk():
42 | params = [1e-4, 1e-4]
43 | x_hat, dxdt_hat = constant_jerk(x, dt, params)
44 | x_smooth = np.array([5.066536, 6.135826, 7.191131, 8.061294, 9.110784, 9.613802,
45 | 10.026445, 9.943029, 10.047933, 9.701807, 9.179971, 8.310492,
46 | 7.547672, 6.582594, 5.421728])
47 | dxdt = np.array([170.262874, 164.484367, 156.478206, 103.371112, 113.682324,
48 | 64.169044, 33.742701, -1.935552, -25.398252, -48.273806,
49 | -69.59001, -81.873115, -101.384521, -122.579907, -140.269899])
50 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
51 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
52 |
53 | def test_known_dynamics():
54 | return#TODO. We shouldn't have empty tests hanging around
55 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_linear_model.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for linear (smoothing) model
3 | """
4 | # pylint: skip-file
5 |
6 | import numpy as np
7 | import logging as _logging
8 |
9 | _logging.basicConfig(
10 | level=_logging.INFO,
11 | format="%(asctime)s [%(levelname)s] %(message)s",
12 | handlers=[
13 | _logging.FileHandler("debug.log"),
14 | _logging.StreamHandler()
15 | ]
16 | )
17 |
18 |
19 | from pynumdiff.linear_model import *
20 |
21 |
22 | x = np.array([1., 4., 9., 3., 20.,
23 | 8., 16., 2., 15., 10.,
24 | 15., 3., 5., 7., 4.])
25 | dt = 0.01
26 |
27 |
28 | def test_savgoldiff():
29 | params = [2, 4, 4]
30 | x_hat, dxdt_hat = savgoldiff(x, dt, params)
31 | x_smooth = np.array([4.669816, 4.374363, 6.46848, 8.899164, 10.606681, 11.059424,
32 | 10.519283, 10.058375, 10.191014, 10.193343, 9.208019, 7.445465,
33 | 5.880869, 5.49672, 6.930156])
34 | dxdt = np.array([-29.5453, 156.853147, 261.970245, 224.16657, 117.336993,
35 | -26.788542, -81.239512, -10.942197, 37.470096, -37.004311,
36 | -160.060586, -192.450136, -120.46908, 43.639278, 243.047964])
37 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
38 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
39 |
40 | def test_spectraldiff():
41 | params = [0.1]
42 | x_hat, dxdt_hat = spectraldiff(x, dt, params)
43 | x_smooth = np.array([3.99, 5.038, 6.635, 8.365, 9.971, 11.201, 11.86, 11.86,
44 | 11.231, 10.113, 8.722, 7.296, 6.047, 5.116, 4.556])
45 | dxdt = np.array([104.803, 147., 172.464, 173.547, 147.67, 98.194,
46 | 33.754, -33.769, -92.105, -131.479, -146.761, -138.333,
47 | -111.508, -74.752, -37.276])
48 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
49 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
50 |
51 | def test_polydiff():
52 | params = [2, 3]
53 | x_hat, dxdt_hat = polydiff(x, dt, params)
54 | x_smooth = np.array([1.16153, 4.506877, 6.407802, 8.544663, 13.431766, 14.051294,
55 | 10.115687, 7.674865, 10.471466, 13.612046, 11.363571, 5.68407,
56 | 4.443968, 6.213507, 4.695931])
57 | dxdt = np.array([330.730385, 284.267456, 299.891801, 305.441626, 205.475727,
58 | -145.229037, -279.41178, 15.428548, 244.252341, 20.343789,
59 | -326.727498, -288.988297, 33.647456, 27.861175, -344.695033])
60 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
61 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
62 |
63 | # def test_chebydiff(self):
64 | # try:
65 | # import pychebfun
66 | # except:
67 | # __warning__ = '\nCannot import pychebfun, skipping chebydiff test.'
68 | # _logging.info(__warning__)
69 | # return
70 |
71 | # params = [2, 3]
72 | # x_hat, dxdt_hat = chebydiff(x, dt, params)
73 | # x_smooth = np.array([1., 4.638844, 7.184256, 6.644655, 15.614775, 11.60484,
74 | # 12.284141, 6.082226, 12.000615, 12.058705, 12.462283, 5.018101,
75 | # 4.674378, 6.431201, 4.])
76 | # dxdt = np.array([202.732652, 346.950235, -140.713336, 498.719617, 212.717775,
77 | # -185.13847, -266.604056, -51.792587, 377.969849, -0.749768,
78 | # -297.654931, -455.876155, 197.575692, -24.809441, -150.109487])
79 | # np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
80 | # np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
81 |
82 | def test_lineardiff():
83 | try:
84 | import cvxpy
85 | except:
86 | __warning__ = '\nCannot import cvxpy, skipping lineardiff test.'
87 | _logging.info(__warning__)
88 | return
89 |
90 | params = [3, 5, 10]
91 | x_hat, dxdt_hat = lineardiff(x, dt, params, options={'solver': 'CVXOPT'})
92 | x_smooth = np.array([3.070975, 3.435072, 6.363585, 10.276584, 12.033974, 10.594136,
93 | 9.608228, 9.731326, 10.333255, 10.806791, 9.710448, 7.456045,
94 | 5.70695, 4.856271, 5.685251])
95 | dxdt = np.array([36.409751, 164.630545, 342.075623, 283.519415, 15.877598,
96 | -121.287252, -43.140514, 36.251305, 53.773231, -31.140351,
97 | -167.537258, -200.174883, -129.988725, -1.084955, 82.897991])
98 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
99 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
100 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_optimize.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for optimization module
3 | """
4 | # pylint: skip-file
5 | import numpy as np
6 | import pytest
7 |
8 | from pynumdiff.optimize.finite_difference import first_order
9 | from pynumdiff.optimize.smooth_finite_difference import mediandiff, meandiff, gaussiandiff, \
10 | friedrichsdiff, butterdiff, splinediff
11 | from pynumdiff.optimize.total_variation_regularization import *
12 | from pynumdiff.optimize.linear_model import *
13 | from pynumdiff.optimize.kalman_smooth import constant_velocity, constant_acceleration, \
14 | constant_jerk
15 | from pynumdiff.utils import simulate
16 |
17 |
18 | # simulation
19 | noise_type = 'normal'
20 | noise_parameters = [0, 0.01]
21 | dt = 0.01
22 | timeseries_length = 2
23 | problem = 'pi_control'
24 | x, x_truth, dxdt_truth, extras = simulate.__dict__[problem](timeseries_length,
25 | noise_parameters=noise_parameters,
26 | dt=dt)
27 | cutoff_frequency = 0.1
28 | log_gamma = -1.6 * np.log(cutoff_frequency) - 0.71 * np.log(dt) - 5.1
29 | tvgamma = np.exp(log_gamma)
30 |
31 | def get_err_msg(actual_params, desired_params):
32 | err_msg = 'Actual params were: ' + ', '.join(map(str, actual_params)) + ' instead of: ' + ', '.join(map(str, desired_params))
33 | return err_msg
34 |
35 |
36 | def test_first_order():
37 | params_1, val_1 = first_order(x, dt, params=None, options={'iterate': True},
38 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
39 | params_2, val_2 = first_order(x, dt, params=None, options={'iterate': True},
40 | tvgamma=0, dxdt_truth=None)
41 | assert params_1 == [5]
42 | assert params_2 == [1]
43 |
44 | def test_mediandiff():
45 | params_1, val_1 = mediandiff(x, dt, params=None, options={'iterate': False},
46 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
47 | params_2, val_2 = mediandiff(x, dt, params=None, options={'iterate': False},
48 | tvgamma=0, dxdt_truth=None)
49 | assert params_1 == [5]
50 | assert params_2 == [1]
51 |
52 | def test_meandiff():
53 | params_1, val_1 = meandiff(x, dt, params=None, options={'iterate': False},
54 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
55 | params_2, val_2 = meandiff(x, dt, params=None, options={'iterate': False},
56 | tvgamma=0, dxdt_truth=None)
57 | assert params_1 == [5]
58 | assert params_2 == [1]
59 |
60 | def test_gaussiandiff():
61 | params_1, val_1 = gaussiandiff(x, dt, params=None, options={'iterate': False},
62 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
63 | params_2, val_2 = gaussiandiff(x, dt, params=None, options={'iterate': False},
64 | tvgamma=0, dxdt_truth=None)
65 | assert params_1 == [9]
66 | assert params_2 == [1]
67 |
68 | def test_friedrichsdiff():
69 | params_1, val_1 = friedrichsdiff(x, dt, params=None, options={'iterate': False},
70 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
71 | params_2, val_2 = friedrichsdiff(x, dt, params=None, options={'iterate': False},
72 | tvgamma=0, dxdt_truth=None)
73 | assert params_1 == [9]
74 | assert params_2 == [1]
75 |
76 | def test_butterdiff():
77 | params_1, val_1 = butterdiff(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
78 | params_2, val_2 = butterdiff(x, dt, params=None, tvgamma=0, dxdt_truth=None)
79 |
80 | np.testing.assert_array_less( np.abs(params_1[0] - 9), 1.001, err_msg=get_err_msg(params_1, [9, 0.157]))
81 | np.testing.assert_array_less( np.abs(params_1[1] - 0.157), 0.01, err_msg=get_err_msg(params_1, [9, 0.157]))
82 | #np.testing.assert_almost_equal(params_1, [9, 0.157], decimal=3, err_msg=get_err_msg(params_1, [9, 0.157]))
83 | np.testing.assert_almost_equal(params_2, [2, 0.99], decimal=3, err_msg=get_err_msg(params_2, [2, 0.99]))
84 |
85 | def test_splinediff():
86 | params_1, val_1 = splinediff(x, dt, params=None, options={'iterate': True},
87 | tvgamma=tvgamma, dxdt_truth=dxdt_truth)
88 | params_2, val_2 = splinediff(x, dt, params=None, options={'iterate': True},
89 | tvgamma=0, dxdt_truth=None)
90 | np.testing.assert_almost_equal(params_1, [5, 0.0147, 1], decimal=2)
91 | np.testing.assert_almost_equal(params_2, [5, 0.0147, 1], decimal=2)
92 |
93 | def test_iterative_velocity():
94 | params_1, val_1 = iterative_velocity(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
95 | params_2, val_2 = iterative_velocity(x, dt, params=None, tvgamma=0, dxdt_truth=None)
96 | np.testing.assert_array_less( np.abs(params_1[0] - 2), 1.001)
97 | np.testing.assert_array_less( np.abs(params_2[0] - 2), 1.001)
98 |
99 | np.testing.assert_almost_equal(params_1[1], 0.0001, decimal=4)
100 | np.testing.assert_almost_equal(params_2[1], 0.0001, decimal=4)
101 |
102 | #self.assertListEqual(params_1, [2, 0.0001])
103 | #self.assertListEqual(params_2, [2, 0.0001])
104 |
105 | def test_velocity():
106 | try:
107 | import cvxpy
108 | except:
109 | pytest.skip("could not import cvxpy, skipping test_velocity", allow_module_level=True)
110 |
111 | params_1, val_1 = velocity(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
112 | params_2, val_2 = velocity(x, dt, params=None, tvgamma=0, dxdt_truth=None)
113 | param_1_error = np.abs(params_1[0] - 0.07218)
114 | param_2_error = np.abs(params_2[0] - 0.0001)
115 |
116 | np.testing.assert_array_less(param_1_error, 2)
117 | np.testing.assert_array_less(param_2_error, 2)
118 |
119 | def test_acceleration():
120 | try:
121 | import cvxpy
122 | except:
123 | pytest.skip("could not import cvxpy, skipping test_acceleration", allow_module_level=True)
124 |
125 | params_1, val_1 = acceleration(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
126 | params_2, val_2 = acceleration(x, dt, params=None, tvgamma=0, dxdt_truth=None)
127 | param_1_error = np.abs(params_1[0] - 0.1447)
128 | param_2_error = np.abs(params_2[0] - 0.0001)
129 |
130 | np.testing.assert_array_less(param_1_error, 2)
131 | np.testing.assert_array_less(param_2_error, 2)
132 |
133 | def test_savgoldiff():
134 | params_1, val_1 = savgoldiff(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
135 | params_2, val_2 = savgoldiff(x, dt, params=None, tvgamma=0, dxdt_truth=None)
136 | assert params_1 == [10, 59, 3]
137 | assert params_2 == [9, 3, 3]
138 |
139 | def test_spectraldiff():
140 | params_1, val_1 = spectraldiff(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
141 | params_2, val_2 = spectraldiff(x, dt, params=None, tvgamma=0, dxdt_truth=None)
142 | np.testing.assert_almost_equal(params_1, [0.0912], decimal=3)
143 | np.testing.assert_almost_equal(params_2, [0.575], decimal=3)
144 |
145 | def test_polydiff():
146 | params_1, val_1 = polydiff(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
147 | params_2, val_2 = polydiff(x, dt, params=None, tvgamma=0, dxdt_truth=None)
148 | assert params_1 == [6, 50]
149 | assert params_2 == [4, 10]
150 |
151 | # def test_chebydiff(self):
152 | # try:
153 | # import pychebfun
154 | # except:
155 | # pytest.skip("could not import pychebfun, skipping test_chebydiff", allow_module_level=True)
156 |
157 | # params_1, val_1 = chebydiff(x, dt, params=None, tvgamma=tvgamma, dxdt_truth=dxdt_truth)
158 | # params_2, val_2 = chebydiff(x, dt, params=None, tvgamma=0, dxdt_truth=None)
159 | # self.assertListEqual(params_1, [9, 108])
160 | # self.assertListEqual(params_2, [9, 94])
161 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_smooth_finite_difference.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for smoothing + finite difference methods
3 | """
4 | # pylint: skip-file
5 | import numpy as np
6 | from pynumdiff.smooth_finite_difference import mediandiff, meandiff, gaussiandiff, \
7 | friedrichsdiff, butterdiff, splinediff
8 |
9 |
10 | x = np.array([1., 4., 9., 3., 20.,
11 | 8., 16., 2., 15., 10.,
12 | 15., 3., 5., 7., 4.])
13 | dt = 0.01
14 |
15 |
16 | def test_mediandiff():
17 | params = [3, 2]
18 | x_hat, dxdt_hat = mediandiff(x, dt, params, options={'iterate': True})
19 | x_smooth = np.array([1., 4., 4., 8., 9., 8., 15., 10., 15., 10., 10., 5., 5., 5., 4.])
20 | dxdt = np.array([300., 150., 200., 250., 0., 300., 100., 0.,
21 | 0., -250., -250., -250., 0., -50., -100.])
22 | np.testing.assert_array_equal(x_smooth, x_hat)
23 | np.testing.assert_array_equal(dxdt, dxdt_hat)
24 |
25 | def test_meandiff():
26 | params = [3, 2]
27 | x_hat, dxdt_hat = meandiff(x, dt, params, options={'iterate': True})
28 | x_smooth = np.array([2.889, 4., 6.889, 8.778, 11.889, 11.222, 11.444, 9.556,
29 | 11.111, 10.556, 10.111, 7.333, 6., 5.111, 5.111])
30 | dxdt = np.array([111.111, 200., 238.889, 250., 122.222, -22.222,
31 | -83.333, -16.667, 50., -50., -161.111, -205.556,
32 | -111.111, -44.444, 0.])
33 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
34 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
35 |
36 | def test_gaussiandiff():
37 | params = [5]
38 | x_hat, dxdt_hat = gaussiandiff(x, dt, params, options={'iterate': False})
39 | x_smooth = np.array([1.805, 4.377, 6.66, 8.066, 13.508, 12.177, 11.278, 8.044,
40 | 11.116, 11.955, 11.178, 6.187, 5.127, 5.819, 4.706])
41 | dxdt = np.array([257.235, 242.77, 184.438, 342.42, 205.553, -111.535,
42 | -206.61, -8.093, 195.509, 3.089, -288.392, -302.545,
43 | -18.409, -21.032, -111.263])
44 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
45 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
46 |
47 | def test_friedrichsdiff():
48 | params = [5]
49 | x_hat, dxdt_hat = friedrichsdiff(x, dt, params, options={'iterate': False})
50 | x_smooth = np.array([1.884, 4.589, 5.759, 9.776, 11.456, 13.892, 9.519, 9.954,
51 | 9.697, 12.946, 9.992, 7.124, 5., 5.527, 4.884])
52 | dxdt = np.array([270.539, 193.776, 259.335, 284.855, 205.809, -96.888,
53 | -196.888, 8.921, 149.586, 14.73, -291.079, -249.586,
54 | -79.875, -5.809, -64.316])
55 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
56 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
57 |
58 | def test_butterdiff():
59 | params = [3, 0.074]
60 | x_hat, dxdt_hat = butterdiff(x, dt, params, options={'iterate': False})
61 | x_smooth = np.array([3.445, 4.753, 5.997, 7.131, 8.114, 8.914, 9.51, 9.891,
62 | 10.058, 10.02, 9.798, 9.42, 8.919, 8.332, 7.699])
63 | dxdt = np.array([130.832, 127.617, 118.881, 105.827, 89.169, 69.833, 48.871,
64 | 27.381, 6.431, -12.992, -30.023, -43.972, -54.368, -60.98,
65 | -63.326])
66 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
67 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
68 |
69 | def test_splinediff():
70 | params = [5, 2]
71 | x_hat, dxdt_hat = splinediff(x, dt, params, options={'iterate': False})
72 | x_smooth = np.array([0.995, 4.035, 8.874, 3.279, 19.555, 8.564, 15.386, 2.603,
73 | 14.455, 10.45, 14.674, 3.193, 4.916, 7.023, 3.997])
74 | dxdt = np.array([303.996, 393.932, -37.815, 534.063, 264.225, -208.442,
75 | -298.051, -46.561, 392.365, 10.93, -362.858, -487.87,
76 | 191.508, -45.968, -302.579])
77 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=3)
78 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=3)
79 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_total_variation_regularization.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for total variation regularization
3 | """
4 | # pylint: skip-file
5 |
6 | import numpy as np
7 | import pytest
8 | from pynumdiff.total_variation_regularization import *
9 |
10 | x = np.array([1., 4., 9., 3., 20.,
11 | 8., 16., 2., 15., 10.,
12 | 15., 3., 5., 7., 4.])
13 | dt = 0.01
14 |
15 |
16 | def test_velocity():
17 | try:
18 | import cvxpy
19 | except:
20 | pytest.skip("could not import cvxpy, skipping test_velocity", allow_module_level=True)
21 |
22 | params = [0.5]
23 | x_hat, dxdt_hat = velocity(x, dt, params, options={'solver': 'CVXOPT'})
24 | x_smooth = np.array([1.60206974, 3.84254116, 6.08301239, 8.32348272, 14.76608638,
25 | 12.76589239, 10.76569864, 7.70248886, 11.43239643, 11.4325017,
26 | 11.43260691, 6.42354819, 5.78305309, 5.14255819, 4.50206322])
27 | dxdt = np.array([2.24047187e+02, 2.24047133e+02, 2.24047078e+02, 4.34153700e+02,
28 | 2.22120483e+02, -2.00019387e+02, -2.53170177e+02, 3.33348898e+01,
29 | 1.86500642e+02, 1.05238579e-02, -2.50447675e+02, -2.82477691e+02,
30 | -6.40494998e+01, -6.40494935e+01, -6.40494871e+01])
31 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
32 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
33 |
34 | def test_iterative_velocity():
35 | params = [1, 0.05]
36 | x_hat, dxdt_hat = iterative_velocity(x, dt, params)
37 | x_smooth = np.array([1.256, 3.254, 5.249, 7.197, 8.96, 10.287, 11.08, 11.407,
38 | 11.305, 10.875, 10.235, 9.371, 8.305, 7.174, 6.042])
39 | dxdt = np.array([199.802, 199.742, 199.222, 190.43, 162.105, 103.282,
40 | 55.311, 10.12, -30.571, -55.409, -72.603, -100.119,
41 | -113.097, -113.097, -113.464])
42 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
43 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
44 |
45 | def test_acceleration():
46 | try:
47 | import cvxpy
48 | except:
49 | pytest.skip("could not import cvxpy, skipping test_acceleration", allow_module_level=True)
50 |
51 | params = [1]
52 | x_hat, dxdt_hat = acceleration(x, dt, params, options={'solver': 'CVXOPT'})
53 | x_smooth = np.array([0.87728375, 4.44441238, 7.31141687, 9.47829719, 10.94505335,
54 | 11.7116852, 11.78131319, 11.5560333, 11.03584752, 10.2207553,
55 | 9.11075633, 7.7058506, 6.41426253, 5.23599238, 4.17104012])
56 | dxdt = np.array([391.71907211, 321.70665613, 251.69424015, 181.6818242,
57 | 111.66940057, 41.81299196, -7.78259499, -37.27328368,
58 | -66.76389967, -96.25455924, -125.74523529, -134.82469003,
59 | -123.49291116, -112.16112081, -100.82933046])
60 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
61 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
62 |
63 | def test_smooth_acceleration():
64 | try:
65 | import cvxpy
66 | except:
67 | pytest.skip("could not import cvxpy, skipping test_smooth_acceleration", allow_module_level=True)
68 |
69 | params = [5, 30]
70 | x_hat, dxdt_hat = smooth_acceleration(x, dt, params, options={'solver': 'CVXOPT'})
71 | x_smooth = np.array([4.16480747, 5.52913444, 6.77037146, 7.87267273, 8.79483088,
72 | 9.5044844, 9.97926076, 10.20730827, 10.18728338, 9.92792114,
73 | 9.44728533, 8.77174094, 7.93472066, 6.97538656, 5.93725369])
74 | dxdt = np.array([136.43269721, 129.9388182, 118.30858578, 102.15166804,
75 | 82.27996127, 59.65074227, 35.30453082, 10.30497111,
76 | -14.30994982, -37.56249817, -58.56466324, -76.54421499,
77 | -90.85984169, -101.00697716, -106.61959829])
78 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
79 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
80 |
81 | def test_jerk():
82 | try:
83 | import cvxpy
84 | except:
85 | pytest.skip("could not import cvxpy, skipping test_jerk", allow_module_level=True)
86 |
87 | params = [10]
88 | x_hat, dxdt_hat = jerk(x, dt, params, options={'solver': 'CVXOPT'})
89 | x_smooth = np.array([0.71013072, 4.51405229, 7.42619407, 9.53278029, 10.92003519,
90 | 11.674183, 11.88144796, 11.6280543, 11.00022625, 10.08418804,
91 | 8.9661639, 7.73237808, 6.4690548, 5.2624183, 4.19869281])
92 | dxdt = np.array([420.66993476, 335.80316742, 250.93640008, 174.69205619,
93 | 107.07013572, 48.07063861, -2.30643522, -44.06108581,
94 | -77.19331317, -101.70311726, -117.59049798, -124.85545525,
95 | -123.49798898, -113.51809914, -103.5382093])
96 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
97 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
98 |
99 | def test_jerk_sliding():
100 | try:
101 | import cvxpy
102 | except:
103 | pytest.skip("could not import cvxpy, skipping test_jerk_sliding", allow_module_level=True)
104 |
105 | params = [10]
106 | x_hat, dxdt_hat = jerk_sliding(x, dt, params, options={'solver': 'CVXOPT'})
107 | x_smooth = np.array([0.71013072, 4.51405229, 7.42619407, 9.53278029, 10.92003519,
108 | 11.674183, 11.88144796, 11.6280543, 11.00022625, 10.08418804,
109 | 8.9661639, 7.73237808, 6.4690548, 5.2624183, 4.19869281])
110 | dxdt = np.array([420.66993476, 335.80316742, 250.93640008, 174.69205619,
111 | 107.07013572, 48.07063861, -2.30643522, -44.06108581,
112 | -77.19331317, -101.70311726, -117.59049798, -124.85545525,
113 | -123.49798898, -113.51809914, -103.5382093])
114 | np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2)
115 | np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)
116 |
--------------------------------------------------------------------------------
/pynumdiff/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for utility functions
3 | """
4 | # pylint: skip-file
5 |
6 | import numpy as np
7 | from pynumdiff.utils import utility, simulate, evaluate
8 |
9 |
10 | def test_utility():
11 | return# TODO. There are a lot of basic integration functionalities, etc. that deserve tests.
12 |
13 | def test_simulate():
14 | return
15 |
16 | def test_evaluate():
17 | return
18 |
--------------------------------------------------------------------------------
/pynumdiff/total_variation_regularization/__chartrand_tvregdiff__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # pylint: skip-file
3 |
4 | # u = TVRegDiff( data, iter, alph, u0, scale, ep, dx, plotflag, diagflag );
5 | #
6 | # Rick Chartrand (rickc@lanl.gov), Apr. 10, 2011
7 | # Please cite Rick Chartrand, "Numerical differentiation of noisy,
8 | # nonsmooth data," ISRN Applied Mathematics, Vol. 2011, Article ID 164564,
9 | # 2011.
10 | #
11 | # Inputs: (First three required; omitting the final N parameters for N < 7
12 | # or passing in [] results in default values being used.)
13 | # data Vector of data to be differentiated.
14 | #
15 | # iter Number of iterations to run the main loop. A stopping
16 | # condition based on the norm of the gradient vector g
17 | # below would be an easy modification. No default value.
18 | #
19 | # alph Regularization parameter. This is the main parameter
20 | # to fiddle with. Start by varying by orders of
21 | # magnitude until reasonable results are obtained. A
22 | # value to the nearest power of 10 is usally adequate.
23 | # No default value. Higher values increase
24 | # regularization strenght and improve conditioning.
25 | #
26 | # u0 Initialization of the iteration. Default value is the
27 | # naive derivative (without scaling), of appropriate
28 | # length (this being different for the two methods).
29 | # Although the solution is theoretically independent of
30 | # the initialization, a poor choice can exacerbate
31 | # conditioning issues when the linear system is solved.
32 | #
33 | # scale 'large' or 'small' (case insensitive). Default is
34 | # 'small'. 'small' has somewhat better boundary
35 | # behavior, but becomes unwieldly for data larger than
36 | # 1000 entries or so. 'large' has simpler numerics but
37 | # is more efficient for large-scale problems. 'large' is
38 | # more readily modified for higher-order derivatives,
39 | # since the implicit differentiation matrix is square.
40 | #
41 | # ep Parameter for avoiding division by zero. Default value
42 | # is 1e-6. Results should not be very sensitive to the
43 | # value. Larger values improve conditioning and
44 | # therefore speed, while smaller values give more
45 | # accurate results with sharper jumps.
46 | #
47 | # dx Grid spacing, used in the definition of the derivative
48 | # operators. Default is the reciprocal of the data size.
49 | #
50 | # plotflag Flag whether to display plot at each iteration.
51 | # Default is 1 (yes). Useful, but adds significant
52 | # running time.
53 | #
54 | # diagflag Flag whether to display diagnostics at each
55 | # iteration. Default is 1 (yes). Useful for diagnosing
56 | # preconditioning problems. When tolerance is not met,
57 | # an early iterate being best is more worrying than a
58 | # large relative residual.
59 | #
60 | # Output:
61 | #
62 | # u Estimate of the regularized derivative of data. Due to
63 | # different grid assumptions, length( u ) =
64 | # length( data ) + 1 if scale = 'small', otherwise
65 | # length( u ) = length( data ).
66 |
67 | # Copyright notice:
68 | # Copyright 2010. Los Alamos National Security, LLC. This material
69 | # was produced under U.S. Government contract DE-AC52-06NA25396 for
70 | # Los Alamos National Laboratory, which is operated by Los Alamos
71 | # National Security, LLC, for the U.S. Department of Energy. The
72 | # Government is granted for, itself and others acting on its
73 | # behalf, a paid-up, nonexclusive, irrevocable worldwide license in
74 | # this material to reproduce, prepare derivative works, and perform
75 | # publicly and display publicly. Beginning five (5) years after
76 | # (March 31, 2011) permission to assert copyright was obtained,
77 | # subject to additional five-year worldwide renewals, the
78 | # Government is granted for itself and others acting on its behalf
79 | # a paid-up, nonexclusive, irrevocable worldwide license in this
80 | # material to reproduce, prepare derivative works, distribute
81 | # copies to the public, perform publicly and display publicly, and
82 | # to permit others to do so. NEITHER THE UNITED STATES NOR THE
83 | # UNITED STATES DEPARTMENT OF ENERGY, NOR LOS ALAMOS NATIONAL
84 | # SECURITY, LLC, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY,
85 | # EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
86 | # RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF
87 | # ANY INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR
88 | # REPRESENTS THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED
89 | # RIGHTS.
90 |
91 | # BSD License notice:
92 | # Redistribution and use in source and binary forms, with or without
93 | # modification, are permitted provided that the following conditions
94 | # are met:
95 | #
96 | # Redistributions of source code must retain the above
97 | # copyright notice, this list of conditions and the following
98 | # disclaimer.
99 | # Redistributions in binary form must reproduce the above
100 | # copyright notice, this list of conditions and the following
101 | # disclaimer in the documentation and/or other materials
102 | # provided with the distribution.
103 | # Neither the name of Los Alamos National Security nor the names of its
104 | # contributors may be used to endorse or promote products
105 | # derived from this software without specific prior written
106 | # permission.
107 | #
108 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
109 | # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
110 | # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
111 | # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
112 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
113 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
114 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
115 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
116 | # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
117 | # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
118 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
119 | # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
120 | # POSSIBILITY OF SUCH DAMAGE.
121 | #
122 | #########################################################
123 | # #
124 | # Python translation by Simone Sturniolo #
125 | # Rutherford Appleton Laboratory, STFC, UK (2014) #
126 | # simonesturniolo@gmail.com #
127 | # #
128 | #########################################################
129 |
130 | #########################################################
131 | # #
132 | # Summary of changes made by Floris van Breugel: #
133 | # #
134 | # (1) #
135 | # scipy.sparse.linalg.cg seems to require more #
136 | # iterations to reach the same result as #
137 | # MATLAB's pcg. (scipy version 1.1.0) #
138 | # A good guess is the length of the data #
139 | # #
140 | # (2) #
141 | # Drop last entry of derivative (u) for small scale #
142 | # This way both small and large scale results are same #
143 | # length as data #
144 | # #
145 | #########################################################
146 |
147 | import sys
148 |
149 | try:
150 | import numpy as np
151 | import scipy as sp
152 | from scipy import sparse
153 | from scipy.sparse import linalg as splin
154 | except ImportError:
155 | sys.exit("Numpy and Scipy must be installed for TVRegDiag to work - "
156 | "aborting")
157 |
158 | _has_matplotlib = True
159 |
160 | try:
161 | import matplotlib.pyplot as plt
162 | except ImportError:
163 | _has_matplotlib = False
164 | print("Matplotlib is not installed - plotting functionality disabled")
165 |
166 | # Utility function.
167 |
168 |
169 | def chop(v):
170 | return v[1:]
171 |
172 |
173 | def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
174 | plotflag=_has_matplotlib, diagflag=1, maxit=None):
175 | if maxit is None:
176 | maxit = len(data)
177 |
178 | # code starts here
179 | # Make sure we have a column vector
180 | data = np.array(data)
181 | if (len(data.shape) != 1):
182 | print("Error - data is not a column vector")
183 | return
184 | # Get the data size.
185 | n = len(data)
186 |
187 | # Default checking. (u0 is done separately within each method.)
188 | if dx is None:
189 | dx = 1.0 / n
190 |
191 | # Different methods for small- and large-scale problems.
192 | if (scale.lower() == 'small'):
193 |
194 | # Construct differentiation matrix.
195 | c = np.ones(n + 1) / dx
196 | D = sparse.spdiags([-c, c], [0, 1], n, n + 1)
197 |
198 | DT = D.transpose()
199 |
200 | # Construct antidifferentiation operator and its adjoint.
201 | def A(x): return chop(np.cumsum(x) - 0.5 * (x + x[0])) * dx
202 |
203 | def AT(w): return (sum(w) * np.ones(n + 1) -
204 | np.transpose(np.concatenate(([sum(w) / 2.0],
205 | np.cumsum(w) -
206 | w / 2.0)))) * dx
207 |
208 | # Default initialization is naive derivative
209 |
210 | if u0 is None:
211 | u0 = np.concatenate(([0], np.diff(data), [0]))
212 |
213 | u = u0
214 | # Since Au( 0 ) = 0, we need to adjust.
215 | ofst = data[0]
216 | # Precompute.
217 | ATb = AT(ofst - data) # input: size n
218 |
219 | # Main loop.
220 | for ii in range(1, itern+1):
221 | # Diagonal matrix of weights, for linearizing E-L equation.
222 | Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
223 | # Linearized diffusion matrix, also approximation of Hessian.
224 | L = dx * DT * Q * D
225 |
226 |
227 | # Gradient of functional.
228 | g = AT(A(u)) + ATb + alph * L * u
229 |
230 | # Prepare to solve linear equation.
231 | rtol = 1e-4
232 | # Simple preconditioner.
233 | P = alph * sparse.spdiags(L.diagonal() + 1, 0, n + 1, n + 1)
234 |
235 | def linop(v): return (alph * L * v + AT(A(v)))
236 | linop = splin.LinearOperator((n + 1, n + 1), linop)
237 |
238 | if diagflag:
239 | [s, info_i] = sparse.linalg.cg(
240 | linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
241 | M=P, atol=0)
242 | #print('iteration {0:4d}: relative change = {1:.3e}, '
243 | # 'gradient norm = {2:.3e}\n'.format(ii,
244 | # np.linalg.norm(
245 | # s[0]) /
246 | # np.linalg.norm(u),
247 | # np.linalg.norm(g)))
248 | #if (info_i > 0):
249 | # print("WARNING - convergence to tolerance not achieved!")
250 | #elif (info_i < 0):
251 | # print("WARNING - illegal input or breakdown")
252 | else:
253 | [s, info_i] = sparse.linalg.cg(
254 | linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
255 | M=P, atol=0)
256 | # Update solution.
257 | u = u - s
258 | # Display plot.
259 | if plotflag:
260 | plt.plot(u)
261 | plt.show()
262 | u = u[0:-1]
263 |
264 | elif (scale.lower() == 'large'):
265 |
266 | # Construct antidifferentiation operator and its adjoint.
267 | def A(v): return np.cumsum(v)
268 |
269 | def AT(w): return (sum(w) * np.ones(len(w)) -
270 | np.transpose(np.concatenate(([0.0],
271 | np.cumsum(w[:-1])))))
272 | # Construct differentiation matrix.
273 | c = np.ones(n)
274 | D = sparse.spdiags([-c, c], [0, 1], n, n) / dx
275 | mask = np.ones((n, n))
276 | mask[-1, -1] = 0.0
277 | D = sparse.dia_matrix(D.multiply(mask))
278 | DT = D.transpose()
279 | # Since Au( 0 ) = 0, we need to adjust.
280 | data = data - data[0]
281 | # Default initialization is naive derivative.
282 | if u0 is None:
283 | u0 = np.concatenate(([0], np.diff(data)))
284 | u = u0
285 | # Precompute.
286 | ATd = AT(data)
287 |
288 | # Main loop.
289 | for ii in range(1, itern + 1):
290 | # Diagonal matrix of weights, for linearizing E-L equation.
291 | Q = sparse.spdiags(1. / np.sqrt((D*u)**2.0 + ep), 0, n, n)
292 | # Linearized diffusion matrix, also approximation of Hessian.
293 | L = DT*Q*D
294 | # Gradient of functional.
295 | g = AT(A(u)) - ATd
296 | g = g + alph * L * u
297 | # Build preconditioner.
298 | c = np.cumsum(range(n, 0, -1))
299 | B = alph * L + sparse.spdiags(c[::-1], 0, n, n)
300 | # droptol = 1.0e-2
301 | R = sparse.dia_matrix(np.linalg.cholesky(B.todense()))
302 | # Prepare to solve linear equation.
303 | rtol = 1.0e-4
304 |
305 | def linop(v): return (alph * L * v + AT(A(v)))
306 | linop = splin.LinearOperator((n, n), linop)
307 |
308 | print(maxit)
309 | if diagflag:
310 | [s, info_i] = sparse.linalg.cg(
311 | linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
312 | M=np.dot(R.transpose(), R), atol=0)
313 | print('iteration {0:4d}: relative change = {1:.3e}, '
314 | 'gradient norm = {2:.3e}\n'.format(ii,
315 | np.linalg.norm(s[0]) /
316 | np.linalg.norm(u),
317 | np.linalg.norm(g)))
318 | if (info_i > 0):
319 | print("WARNING - convergence to tolerance not achieved!")
320 | elif (info_i < 0):
321 | print("WARNING - illegal input or breakdown")
322 |
323 | else:
324 | [s, info_i] = sparse.linalg.cg(
325 | linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
326 | M=np.dot(R.transpose(), R), atol=0)
327 | # Update current solution
328 | u = u + s
329 | # Display plot.
330 | if plotflag:
331 | plt.plot(u/dx)
332 | plt.show()
333 |
334 | u = u/dx
335 |
336 | return u
337 |
338 | # Small testing script
339 |
340 |
341 | if __name__ == "__main__":
342 |
343 | dx = 0.05
344 | x0 = np.arange(0, 2.0*np.pi, dx)
345 |
346 | testf = np.sin(x0)
347 |
348 | testf = testf + np.random.normal(0.0, 0.04, x0.shape)
349 |
350 | deriv_sm = TVRegDiff(testf, 1, 5e-2, dx=dx,
351 | ep=1e-1, scale='small', plotflag=0)
352 | deriv_lrg = TVRegDiff(testf, 100, 1e-1, dx=dx,
353 | ep=1e-2, scale='large', plotflag=0)
354 |
355 | if (_has_matplotlib):
356 | plt.plot(np.cos(x0), label='Analytical', c=(0,0,0))
357 | plt.plot(np.gradient(testf, dx), label='numpy.gradient')
358 | plt.plot(deriv_sm, label='TVRegDiff (small)')
359 | plt.plot(deriv_lrg, label='TVRegDiff (large)')
360 | plt.legend()
361 | plt.show()
--------------------------------------------------------------------------------
/pynumdiff/total_variation_regularization/__init__.py:
--------------------------------------------------------------------------------
1 | """This module implements some common total variation regularization methods
2 | """
3 | try:
4 | import cvxpy
5 | from ._total_variation_regularization import velocity, acceleration, jerk, jerk_sliding, smooth_acceleration
6 | except:
7 | from warnings import warn
8 | warn("""Limited Total Variation Regularization Support Detected! CVXPY is not installed.
9 | Many Total Variation Methods require CVXPY including: velocity, acceleration, jerk, jerk_sliding, smooth_acceleration.
10 | Please install CVXPY to use these methods. Recommended to also install MOSEK and obtain a MOSEK license.
11 | You can still use: total_variation_regularization.iterative_velocity.""")
12 |
13 | from ._total_variation_regularization import iterative_velocity
14 |
15 | __all__ = ['velocity', 'acceleration', 'jerk', 'jerk_sliding', 'smooth_acceleration', 'iterative_velocity'] # So these get treated as direct members of the module by sphinx
16 |
17 |
--------------------------------------------------------------------------------
/pynumdiff/total_variation_regularization/_total_variation_regularization.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import numpy as np
3 |
4 | from pynumdiff.total_variation_regularization import __chartrand_tvregdiff__
5 | import pynumdiff.smooth_finite_difference
6 | from pynumdiff.utils import utility
7 |
8 | try:
9 | import cvxpy
10 | except ImportError:
11 | pass
12 |
13 | # Iterative total variation regularization
14 | def iterative_velocity(x, dt, params, options=None):
15 | """
16 | Use an iterative solver to find the total variation regularized 1st derivative.
17 | See __chartrand_tvregdiff__.py for details, author info, and license
18 | Methods described in: Rick Chartrand, "Numerical differentiation of noisy, nonsmooth data,"
19 | ISRN Applied Mathematics, Vol. 2011, Article ID 164564, 2011.
20 | Original code (MATLAB and python): https://sites.google.com/site/dnartrahckcir/home/tvdiff-code
21 |
22 | :param x: array of time series to differentiate
23 | :type x: np.array (float)
24 |
25 | :param dt: time step size
26 | :type dt: float
27 |
28 | :param params: a list consisting of:
29 |
30 | - iterations: Number of iterations to run the solver. More iterations results in blockier derivatives, which approach the convex result
31 | - gamma: Regularization parameter.
32 |
33 | :type params: list (int, float)
34 |
35 | :param options: a dictionary with 2 key value pairs
36 |
37 | - 'cg_maxiter': Max number of iterations to use in scipy.sparse.linalg.cg. Default is None, results in maxiter = len(x). This works well in our test examples.
38 | - 'scale': This method has two different numerical options. From __chartrand_tvregdiff__.py: 'large' or 'small' (case insensitive). Default is 'small'. 'small' has somewhat better boundary behavior, but becomes unwieldly for data larger than 1000 entries or so. 'large' has simpler numerics but is more efficient for large-scale problems. 'large' is more readily modified for higher-order derivatives, since the implicit differentiation matrix is square.
39 |
40 | :type options: dict {'cg_maxiter': (int), 'scale': (string)}, optional
41 |
42 | :return: a tuple consisting of:
43 |
44 | - x_hat: estimated (smoothed) x
45 | - dxdt_hat: estimated derivative of x
46 |
47 | :rtype: tuple -> (np.array, np.array)
48 | """
49 |
50 | if options is None:
51 | options = {'cg_maxiter': 1000, 'scale': 'small'}
52 |
53 | iterations, gamma = params
54 | dxdt_hat = __chartrand_tvregdiff__.TVRegDiff(x, iterations, gamma, dx=dt,
55 | maxit=options['cg_maxiter'], scale=options['scale'],
56 | ep=1e-6, u0=None, plotflag=False, diagflag=1)
57 | x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
58 | x0 = utility.estimate_initial_condition(x, x_hat)
59 | x_hat = x_hat + x0
60 |
61 | return x_hat, dxdt_hat
62 |
63 |
64 | # Generalized total variation regularized derivatives
65 | def __total_variation_regularized_derivative__(x, dt, N, gamma, solver='MOSEK'):
66 | """
67 | Use convex optimization (cvxpy) to solve for the Nth total variation regularized derivative.
68 | Default solver is MOSEK: https://www.mosek.com/
69 |
70 | :param x: (np.array of floats, 1xN) time series to differentiate
71 | :param dt: (float) time step
72 | :param N: (int) 1, 2, or 3, the Nth derivative to regularize
73 | :param gamma: (float) regularization parameter
74 | :param solver: (string) Solver to use. Solver options include: 'MOSEK' and 'CVXOPT',
75 | in testing, 'MOSEK' was the most robust.
76 | :return: x_hat : estimated (smoothed) x
77 | dxdt_hat : estimated derivative of x
78 | """
79 |
80 | # Normalize
81 | mean = np.mean(x)
82 | std = np.std(x)
83 | x = (x-mean)/std
84 |
85 | # Define the variables for the highest order derivative and the integration constants
86 | var = cvxpy.Variable(len(x) + N)
87 |
88 | # Recursively integrate the highest order derivative to get back to the position
89 | derivatives = [var[N:]]
90 | for i in range(N):
91 | d = cvxpy.cumsum(derivatives[-1]) + var[i]
92 | derivatives.append(d)
93 |
94 | # Compare the recursively integration position to the noisy position
95 | sum_squared_error = cvxpy.sum_squares(derivatives[-1] - x)
96 |
97 | # Total variation regularization on the highest order derivative
98 | r = cvxpy.sum(gamma*cvxpy.tv(derivatives[0]))
99 |
100 | # Set up and solve the optimization problem
101 | obj = cvxpy.Minimize(sum_squared_error + r)
102 | prob = cvxpy.Problem(obj)
103 | prob.solve(solver=solver)
104 |
105 | # Recursively calculate the value of each derivative
106 | final_derivative = var.value[N:]
107 | derivative_values = [final_derivative]
108 | for i in range(N):
109 | d = np.cumsum(derivative_values[-1]) + var.value[i]
110 | derivative_values.append(d)
111 | for i, _ in enumerate(derivative_values):
112 | derivative_values[i] = derivative_values[i]/(dt**(N-i))
113 |
114 | # Extract the velocity and smoothed position
115 | dxdt_hat = derivative_values[-2]
116 | x_hat = derivative_values[-1]
117 |
118 | dxdt_hat = (dxdt_hat[0:-1] + dxdt_hat[1:])/2
119 | ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2]
120 | dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f
121 | dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f))
122 |
123 | # fix first point
124 | d = dxdt_hat[2] - dxdt_hat[1]
125 | dxdt_hat[0] = dxdt_hat[1] - d
126 |
127 | return x_hat*std+mean, dxdt_hat*std
128 |
129 |
130 | def velocity(x, dt, params, options=None):
131 | """
132 | Use convex optimization (cvxpy) to solve for the velocity total variation regularized derivative.
133 | Default solver is MOSEK: https://www.mosek.com/
134 |
135 | :param x: array of time series to differentiate
136 | :type x: np.array (float)
137 |
138 | :param dt: time step size
139 | :type dt: float
140 |
141 | :param params: [gamma], where gamma (float) is the regularization parameter
142 | or if 'iterate' in options: [gamma, num_iterations]
143 |
144 | :type params: list (float) or float
145 |
146 | :param options: {'solver': SOLVER} SOLVER options include: 'MOSEK' and 'CVXOPT',
147 | in testing, 'MOSEK' was the most robust.
148 |
149 | :type options: dict {'solver': SOLVER}, optional
150 |
151 | :return: a tuple consisting of:
152 |
153 | - x_hat: estimated (smoothed) x
154 | - dxdt_hat: estimated derivative of x
155 |
156 | :rtype: tuple -> (np.array, np.array)
157 | """
158 |
159 | if options is None:
160 | options = {'solver': 'MOSEK'}
161 |
162 | if isinstance(params, list):
163 | gamma = params[0]
164 | else:
165 | gamma = params
166 |
167 | return __total_variation_regularized_derivative__(x, dt, 1, gamma, solver=options['solver'])
168 |
169 |
170 | def acceleration(x, dt, params, options=None):
171 | """
172 | Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative.
173 | Default solver is MOSEK: https://www.mosek.com/
174 |
175 | :param x: array of time series to differentiate
176 | :type x: np.array (float)
177 |
178 | :param dt: time step size
179 | :type dt: float
180 |
181 | :param params: [gamma], where gamma (float) is the regularization parameter
182 | or if 'iterate' in options: [gamma, num_iterations]
183 |
184 | :type params: list (float) or float
185 |
186 | :param options: {'solver': SOLVER} SOLVER options include: 'MOSEK' and 'CVXOPT',
187 | in testing, 'MOSEK' was the most robust.
188 |
189 | :type options: dict {'solver': SOLVER}, optional
190 |
191 | :return: a tuple consisting of:
192 |
193 | - x_hat: estimated (smoothed) x
194 | - dxdt_hat: estimated derivative of x
195 |
196 | :rtype: tuple -> (np.array, np.array)
197 | """
198 |
199 | if options is None:
200 | options = {'solver': 'MOSEK'}
201 |
202 | if isinstance(params, list):
203 | gamma = params[0]
204 | else:
205 | gamma = params
206 |
207 | return __total_variation_regularized_derivative__(x, dt, 2, gamma, solver=options['solver'])
208 |
209 |
210 | def jerk(x, dt, params, options=None):
211 | """
212 | Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative.
213 | Default solver is MOSEK: https://www.mosek.com/
214 |
215 | :param x: array of time series to differentiate
216 | :type x: np.array (float)
217 |
218 | :param dt: time step size
219 | :type dt: float
220 |
221 | :param params: [gamma], where gamma (float) is the regularization parameter
222 | or if 'iterate' in options: [gamma, num_iterations]
223 |
224 | :type params: list (float) or float
225 |
226 | :param options: {'solver': SOLVER} SOLVER options include: 'MOSEK' and 'CVXOPT',
227 | in testing, 'MOSEK' was the most robust.
228 |
229 | :type options: dict {'solver': SOLVER}, optional
230 |
231 | :return: a tuple consisting of:
232 |
233 | - x_hat: estimated (smoothed) x
234 | - dxdt_hat: estimated derivative of x
235 |
236 | :rtype: tuple -> (np.array, np.array)
237 | """
238 |
239 | if options is None:
240 | options = {'solver': 'MOSEK'}
241 |
242 | if isinstance(params, list):
243 | gamma = params[0]
244 | else:
245 | gamma = params
246 |
247 | return __total_variation_regularized_derivative__(x, dt, 3, gamma, solver=options['solver'])
248 |
249 |
250 | def smooth_acceleration(x, dt, params, options=None):
251 | """
252 | Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative.
253 | And then apply a convolutional gaussian smoother to the resulting derivative to smooth out the peaks.
254 | The end result is similar to the jerk method, but can be more time-efficient.
255 |
256 | Default solver is MOSEK: https://www.mosek.com/
257 |
258 | :param x: time series to differentiate
259 | :type x: np.array of floats, 1xN
260 |
261 | :param dt: time step
262 | :type dt: float
263 |
264 | :param params: list with values [gamma, window_size], where gamma (float) is the regularization parameter, window_size (int) is the window_size to use for the gaussian kernel
265 | :type params: list -> [float, int]
266 |
267 | :param options: a dictionary indicating which SOLVER option to use, ie. 'MOSEK' or 'CVXOPT', in testing, 'MOSEK' was the most robust.
268 | :type options: dict {'solver': SOLVER}
269 |
270 | :return: a tuple consisting of:
271 | - x_hat: estimated (smoothed) x
272 | - dxdt_hat: estimated derivative of x
273 | :rtype: tuple -> (np.array, np.array)
274 |
275 | """
276 | if options is None:
277 | options = {'solver': 'MOSEK'}
278 |
279 | gamma, window_size = params
280 |
281 | x_hat, dxdt_hat = acceleration(x, dt, [gamma], options=options)
282 | kernel = utility._gaussian_kernel(window_size)
283 | dxdt_hat = utility.convolutional_smoother(dxdt_hat, kernel, 1)
284 |
285 | x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
286 | x0 = utility.estimate_initial_condition(x, x_hat)
287 | x_hat = x_hat + x0
288 |
289 | return x_hat, dxdt_hat
290 |
291 |
292 | def jerk_sliding(x, dt, params, options=None):
293 | """
294 | Use convex optimization (cvxpy) to solve for the jerk total variation regularized derivative.
295 | Default solver is MOSEK: https://www.mosek.com/
296 |
297 | :param x: array of time series to differentiate
298 | :type x: np.array (float)
299 |
300 | :param dt: time step size
301 | :type dt: float
302 |
303 | :param params: [gamma], where gamma (float) is the regularization parameter
304 | or if 'iterate' in options: [gamma, num_iterations]
305 |
306 | :type params: list (float) or float
307 |
308 | :param options: {'solver': SOLVER} SOLVER options include: 'MOSEK' and 'CVXOPT',
309 | in testing, 'MOSEK' was the most robust.
310 |
311 | :type options: dict {'solver': SOLVER}, optional
312 |
313 | :return: a tuple consisting of:
314 |
315 | - x_hat: estimated (smoothed) x
316 | - dxdt_hat: estimated derivative of x
317 |
318 | :rtype: tuple -> (np.array, np.array)
319 | """
320 |
321 | if options is None:
322 | options = {'solver': 'MOSEK'}
323 |
324 | if isinstance(params, list):
325 | gamma = params[0]
326 | else:
327 | gamma = params
328 |
329 | window_size = 1000
330 | stride = 200
331 |
332 | if len(x) < window_size:
333 | return jerk(x, dt, params, options=options)
334 |
335 | # slide the jerk
336 | final_xsmooth = []
337 | final_xdot_hat = []
338 | first_idx = 0
339 | final_idx = first_idx + window_size
340 | last_loop = False
341 |
342 | final_weighting = []
343 |
344 | try:
345 | while not last_loop:
346 | xsmooth, xdot_hat = __total_variation_regularized_derivative__(x[first_idx:final_idx], dt, 3,
347 | gamma, solver=options['solver'])
348 |
349 | xsmooth = np.hstack(([0]*first_idx, xsmooth, [0]*(len(x)-final_idx)))
350 | final_xsmooth.append(xsmooth)
351 |
352 | xdot_hat = np.hstack(([0]*first_idx, xdot_hat, [0]*(len(x)-final_idx)))
353 | final_xdot_hat.append(xdot_hat)
354 |
355 | # blending
356 | w = np.hstack(([0]*first_idx,
357 | np.arange(1, 201)/200,
358 | [1]*(final_idx-first_idx-400),
359 | (np.arange(1, 201)/200)[::-1],
360 | [0]*(len(x)-final_idx)))
361 | final_weighting.append(w)
362 |
363 | if final_idx >= len(x):
364 | last_loop = True
365 | else:
366 | first_idx += stride
367 | final_idx += stride
368 | if final_idx > len(x):
369 | final_idx = len(x)
370 | if final_idx - first_idx < 200:
371 | first_idx -= (200 - (final_idx - first_idx))
372 |
373 | # normalize columns
374 | weights = np.vstack(final_weighting)
375 | for c in range(weights.shape[1]):
376 | weights[:, c] /= np.sum(weights[:, c])
377 |
378 | # weighted sums
379 | xsmooth = np.vstack(final_xsmooth)
380 | xsmooth = np.sum(xsmooth*weights, axis=0)
381 |
382 | xdot_hat = np.vstack(final_xdot_hat)
383 | xdot_hat = np.sum(xdot_hat*weights, axis=0)
384 |
385 | return xsmooth, xdot_hat
386 |
387 | except ValueError:
388 | print('Solver failed, returning finite difference instead')
389 | return pynumdiff.utils.utility.finite_difference(x, dt)
390 |
--------------------------------------------------------------------------------
/pynumdiff/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/florisvb/PyNumDiff/b171ac5582732c703f24976e01651730a146f50c/pynumdiff/utils/__init__.py
--------------------------------------------------------------------------------
/pynumdiff/utils/_pi_cruise_control.py:
--------------------------------------------------------------------------------
1 | import numpy as _np
2 |
3 | def run(timeseries_length=4, dt=0.01):
4 | """
5 | Simulate proportional integral control of a car attempting to maintain constant velocity while going up and down hills.
6 | This function is used for testing differentiation methods.
7 |
8 | This is a linear interpretation of something similar to what is described in Astrom and Murray 2008 Chapter 3.
9 |
10 | :param timeseries_length: number of seconds to simulate
11 | :type timeseries_length: float
12 |
13 | :param dt: timestep in seconds
14 | :type dt: float
15 |
16 | :return: a tuple consisting of arrays of size [N, M], where M is the number of time steps.:
17 | - state_vals: state of the car, i.e. position and velocity as a function of time
18 | - disturbances: disturbances, ie. hills, that the car is subjected to
19 | - controls: control inputs applied by the car
20 | :rtype: tuple -> (np.array, np.array, np.array)
21 | """
22 |
23 | t = _np.arange(0, timeseries_length+dt, dt)
24 |
25 | # disturbance
26 | hills = _np.sin(2*_np.pi*t) + 0.3*_np.sin(4*2*_np.pi*t + 0.5) + 1.2*_np.sin(1.7*2*_np.pi*t + 0.5)
27 | hills = 0.01*hills
28 |
29 | # parameters
30 | mg = 10000 # mass*gravity
31 | fr = 0.9 # friction
32 | ki = 5/0.01*dt # integral control
33 | kp = 25/0.01*dt # proportional control
34 | vd = 0.5 # desired velocity
35 |
36 | A = _np.array([[1, dt, 0, 0, 0],
37 | [0, 1, dt, 0, 0],
38 | [0, -fr, 0, -mg, ki],
39 | [0, 0, 0, 0, 0],
40 | [0, 0, 0, 0, 1]])
41 |
42 | B = _np.array([[0, 0],
43 | [0, 0],
44 | [0, kp],
45 | [1, 0],
46 | [0, 1]])
47 |
48 | x0 = _np.array([0, 0, 0, hills[0], 0]).reshape(A.shape[0], 1)
49 |
50 | # run simulation
51 | xs = [x0]
52 | us = [_np.array([0, 0]).reshape([2,1])]
53 | for i in range(1, len(hills)-1):
54 | u = _np.array([hills[i], vd - xs[-1][1,0]]).reshape([2,1])
55 | xnew = A@xs[-1] + B@u
56 | xs.append(xnew)
57 | us.append(u)
58 |
59 | xs = _np.hstack(xs)
60 | us = _np.hstack(us)
61 |
62 | if len(hills.shape) == 1:
63 | hills = _np.reshape(hills, [1, len(hills)])
64 |
65 | return xs, hills, us
66 |
--------------------------------------------------------------------------------
/pynumdiff/utils/evaluate.py:
--------------------------------------------------------------------------------
1 | """
2 | Metrics and evaluations?
3 | """
4 | import numpy as _np
5 | import matplotlib.pyplot as _plt
6 | import scipy.stats as _scipy_stats
7 |
8 | # local imports
9 | from pynumdiff.utils import utility as _utility
10 | from pynumdiff.finite_difference import first_order as _finite_difference
11 |
12 |
13 | # pylint: disable-msg=too-many-locals, too-many-arguments
14 | def plot(x, dt, x_hat, dxdt_hat, x_truth, dxdt_truth, xlim=None, ax_x=None, ax_dxdt=None,
15 | show_error=True, markersize=5):
16 | """
17 | Make comparison plots of 'x (blue) vs x_truth (black) vs x_hat (red)' and
18 | 'dxdt_truth (black) vs dxdt_hat (red)'
19 |
20 | :param x: array of noisy time series
21 | :type x: np.array (float)
22 |
23 | :param dt: a float number representing the time step size
24 | :type dt: float
25 |
26 | :param x_hat: array of smoothed estimation of x
27 | :type x_hat: np.array (float)
28 |
29 | :param dxdt_hat: array of estimated derivative
30 | :type dxdt_hat: np.array (float)
31 |
32 | :param x_truth: array of noise-free time series
33 | :type x_truth: np.array (float)
34 |
35 | :param dxdt_truth: array of true derivative
36 | :type dxdt_truth: np.array (float)
37 |
38 | :param xlim: a list specifying range of x
39 | :type xlim: list (2 integers), optional
40 |
41 | :param ax_x: axis of the first plot
42 | :type ax_x: :class:`matplotlib.axes`, optional
43 |
44 | :param ax_dxdt: axis of the second plot
45 | :type ax_dxdt: :class:`matplotlib.axes`, optional
46 |
47 | :param show_error: whether to show the rmse
48 | :type show_error: boolean, optional
49 |
50 | :param markersize: marker size of noisy observations
51 | :type markersize: int, optional
52 |
53 | :return: Display two plots
54 | :rtype: None
55 | """
56 | t = _np.arange(0, dt*len(x), dt)
57 | if ax_x is None and ax_dxdt is None:
58 | fig = _plt.figure(figsize=(20, 6))
59 | ax_x = fig.add_subplot(121)
60 | ax_dxdt = fig.add_subplot(122)
61 |
62 | if xlim is None:
63 | xlim = [t[0], t[-1]]
64 |
65 | if ax_x is not None:
66 | if x_hat is not None:
67 | ax_x.plot(t, x_hat, color='red')
68 | ax_x.plot(t, x_truth, '--', color='black')
69 | ax_x.plot(t, x, '.', color='blue', zorder=-100, markersize=markersize)
70 | ax_x.set_ylabel('Position', fontsize=20)
71 | ax_x.set_xlabel('Time', fontsize=20)
72 | ax_x.set_xlim(xlim[0], xlim[-1])
73 | ax_x.tick_params(axis='x', labelsize=15)
74 | ax_x.tick_params(axis='y', labelsize=15)
75 | ax_x.set_rasterization_zorder(0)
76 |
77 | if ax_dxdt is not None:
78 | ax_dxdt.plot(t, dxdt_hat, color='red')
79 | ax_dxdt.plot(t, dxdt_truth, '--', color='black', linewidth=3)
80 | ax_dxdt.set_ylabel('Velocity', fontsize=20)
81 | ax_dxdt.set_xlabel('Time', fontsize=20)
82 | ax_dxdt.set_xlim(xlim[0], xlim[-1])
83 | ax_dxdt.tick_params(axis='x', labelsize=15)
84 | ax_dxdt.tick_params(axis='y', labelsize=15)
85 | ax_dxdt.set_rasterization_zorder(0)
86 |
87 | if show_error:
88 | _, _, rms_dxdt = metrics(x, dt, x_hat, dxdt_hat, x_truth, dxdt_truth)
89 | print('RMS error in velocity: ', rms_dxdt)
90 |
91 |
92 | def __rms_error__(a, e):
93 | """
94 | Calculate rms error
95 |
96 | :param a: the first array
97 | :param e: the second array
98 | :return: a float number representing the rms error
99 | """
100 | if _np.max(_np.abs(a-e)) > 1e16:
101 | return 1e16
102 | s_error = _np.ravel((a - e))**2
103 | ms_error = _np.mean(s_error)
104 | rms_error = _np.sqrt(ms_error)
105 | return rms_error
106 |
107 |
108 | def metrics(x, dt, x_hat, dxdt_hat, x_truth=None, dxdt_truth=None, padding=None):
109 | """
110 | Evaluate x_hat based on various metrics, depending on whether dxdt_truth and x_truth are known or not.
111 |
112 | :param x: time series that was differentiated
113 | :type x: np.array
114 |
115 | :param dt: time step in seconds
116 | :type dt: float
117 |
118 | :param x_hat: estimated (smoothed) x
119 | :type x_hat: np.array
120 |
121 | :param dxdt_hat: estimated xdot
122 | :type dxdt_hat: np.array
123 |
124 | :param x_truth: true value of x, if known, optional
125 | :type x_truth: np.array like x or None
126 |
127 | :param dxdt_truth: true value of dxdt, if known, optional
128 | :type dxdt_truth: np.array like x or None
129 |
130 | :param padding: number of snapshots on either side of the array to ignore when calculating the metric. If autor or None, defaults to 2.5% of the size of x
131 | :type padding: int, None, or auto
132 |
133 | :return: a tuple containing the following:
134 | - rms_rec_x: RMS error between the integral of dxdt_hat and x
135 | - rms_x: RMS error between x_hat and x_truth, returns None if x_truth is None
136 | - rms_dxdt: RMS error between dxdt_hat and dxdt_truth, returns None if dxdt_hat is None
137 | :rtype: tuple -> (float, float, float)
138 |
139 | """
140 | if padding is None or padding == 'auto':
141 | padding = int(0.025*len(x))
142 | padding = max(padding, 1)
143 | if _np.isnan(x_hat).any():
144 | return _np.nan, _np.nan, _np.nan
145 |
146 | # RMS dxdt
147 | if dxdt_truth is not None:
148 | rms_dxdt = __rms_error__(dxdt_hat[padding:-padding], dxdt_truth[padding:-padding])
149 | else:
150 | rms_dxdt = None
151 |
152 | # RMS x
153 | if x_truth is not None:
154 | rms_x = __rms_error__(x_hat[padding:-padding], x_truth[padding:-padding])
155 | else:
156 | rms_x = None
157 |
158 | # RMS reconstructed x
159 | rec_x = _utility.integrate_dxdt_hat(dxdt_hat, dt)
160 | x0 = _utility.estimate_initial_condition(x, rec_x)
161 | rec_x = rec_x + x0
162 | rms_rec_x = __rms_error__(rec_x[padding:-padding], x[padding:-padding])
163 |
164 | return rms_rec_x, rms_x, rms_dxdt
165 |
166 |
167 | def error_correlation(dxdt_hat, dxdt_truth, padding=None):
168 | """
169 | Calculate the error correlation (pearsons correlation coefficient) between the estimated dxdt and true dxdt
170 |
171 | :param dxdt_hat: estimated xdot
172 | :type dxdt_hat: np.array
173 |
174 | :param dxdt_truth: true value of dxdt, if known, optional
175 | :type dxdt_truth: np.array like x or None
176 |
177 | :param padding: number of snapshots on either side of the array to ignore when calculating the metric. If autor or None, defaults to 2.5% of the size of x
178 | :type padding: int, None, or auto
179 |
180 | :return: r-squared correlation coefficient
181 | :rtype: float
182 |
183 | """
184 | if padding is None or padding == 'auto':
185 | padding = int(0.025*len(dxdt_hat))
186 | padding = max(padding, 1)
187 | errors = (dxdt_hat[padding:-padding] - dxdt_truth[padding:-padding])
188 | r = _scipy_stats.linregress(dxdt_truth[padding:-padding] -
189 | _np.mean(dxdt_truth[padding:-padding]), errors)
190 | return r.rvalue**2
191 |
192 |
193 | def rmse(dxdt_hat, dxdt_truth, padding=None):
194 | """
195 | Calculate the Root Mean Squared Error between the estimated dxdt and true dxdt
196 |
197 | :param dxdt_hat: estimated xdot
198 | :type dxdt_hat: np.array
199 |
200 | :param dxdt_truth: true value of dxdt, if known, optional
201 | :type dxdt_truth: np.array like x or None
202 |
203 | :param padding: number of snapshots on either side of the array to ignore when calculating the metric. If autor or None, defaults to 2.5% of the size of x
204 | :type padding: int, None, or auto
205 |
206 | :return: Root Mean Squared Error
207 | :rtype: float
208 | """
209 | if padding is None or padding == 'auto':
210 | padding = int(0.025*len(dxdt_hat))
211 | padding = max(padding, 1)
212 | RMSE = _np.sqrt(_np.mean((dxdt_hat[padding:-padding] - dxdt_truth[padding:-padding])**2))
213 | return RMSE
214 |
--------------------------------------------------------------------------------
/pynumdiff/utils/old_pi_cruise_control.py:
--------------------------------------------------------------------------------
1 | """
2 | Simulation and Control of a cruise ?
3 | """
4 |
5 | import numpy as np
6 | from pynumdiff.utils import utility
7 |
8 | parameters = {'omega_m': 420,
9 | 'T_m': 190,
10 | 'beta': 0.4,
11 | 'Cr': 0.01,
12 | 'Cd': 0.32,
13 | 'A': 2.4,
14 | 'g': 9.8,
15 | 'm': 3000,
16 | 'rho': 1.3,
17 | 'v_r': 30,
18 | 'k_p': 2,
19 | 'k_i': 2
20 | }
21 |
22 |
23 | def triangle(iterations, dt):
24 | """
25 | Create sawtooth pattern of hills for the car to drive over.
26 |
27 | :param iterations: number of time points in time series
28 | :type iterations: int
29 |
30 | :param dt: time step in seconds
31 | :type dt: float
32 |
33 | :return: time series of hills (angle of road as a function of time)
34 | :rtype: np.matrix
35 |
36 | """
37 | t = np.arange(0, iterations*dt, dt)
38 | continuous_x = np.sin(0.02*t*np.sqrt(t))
39 |
40 | # find peaks and valleys
41 | peaks, valleys = utility.peakdet(continuous_x, 0.1)
42 |
43 | # organize peaks and valleys
44 | if len(peaks) > 0:
45 | reversal_idxs = peaks[:, 0].astype(int).tolist()
46 | reversal_vals = peaks[:, 1].tolist()
47 | else:
48 | reversal_idxs = []
49 | reversal_vals = []
50 | if len(valleys) > 0:
51 | reversal_idxs.extend(valleys[:, 0].astype(int).tolist())
52 | reversal_vals.extend(valleys[:, 1].tolist())
53 |
54 | reversal_idxs.extend([0, len(continuous_x)-1])
55 | reversal_vals.extend([0, continuous_x[-1]])
56 |
57 | idx = np.argsort(reversal_idxs)
58 | reversal_idxs = np.array(reversal_idxs)[idx]
59 | reversal_vals = np.array(reversal_vals)[idx]
60 | reversal_ts = t[reversal_idxs]
61 |
62 | x = np.interp(t, reversal_ts, reversal_vals)
63 | x = np.matrix(x)
64 |
65 | return x
66 |
67 |
68 | def effective_wheel_radius(v=20):
69 | """
70 | Allow effective wheel radius to be a function of velocity.
71 |
72 | :param v: ignored for now
73 | :type v:
74 |
75 | :return: effective wheel radius, constant for now
76 | :rtype: float
77 | """
78 | return v
79 |
80 |
81 | def torque(omega):
82 | """
83 | Convert throttle input to Torque. See Astrom and Murray 2008 Chapter 3.
84 |
85 | :param omega: throttle
86 | :type omega: float
87 |
88 | :return: motor torque
89 | :rtype: float
90 | """
91 | omega_m = parameters['omega_m']
92 | t_m = parameters['T_m']
93 | beta = parameters['beta']
94 | return t_m*(1 - beta*(omega / omega_m - 1)**2)
95 |
96 |
97 | # pylint: disable-msg=too-many-locals
98 | def step_forward(state_vals, disturbances, desired_v, dt):
99 | """
100 | One-step Euler integrator that takes the current state, disturbances, and desired velocity and calculates the subsequent state.
101 |
102 | :param state_vals: current state [position, velocity, road_angle]
103 | :type state_vals: np.matrix
104 |
105 | :param disturbances: current hill angle
106 | :type disturbances: np.matrix
107 |
108 | :param desired_velocity: current desired velocity
109 | :type desired_velocity: np.matrix
110 |
111 | :param dt: time step (seconds)
112 | :type dt: float
113 |
114 | :return: a tuple consisting of:
115 | - new_state: new state
116 | - u: control inputs
117 | :rtype: tuple -> (np.matrix, np.matrix)
118 | """
119 | p = state_vals[0, -1]
120 | v = state_vals[1, -1]
121 | theta = disturbances[2, -1]
122 | m = parameters['m']
123 | g = parameters['g']
124 | Cr = parameters['Cr']
125 | rho = parameters['rho']
126 | Cd = parameters['Cd']
127 | A = parameters['A']
128 | v_r = desired_v[0, -1]
129 | alpha_n = effective_wheel_radius(v)
130 | z = np.sum(desired_v[0, :] - state_vals[1, :])*dt
131 | k_p = parameters['k_p']
132 | k_i = parameters['k_i']
133 | u = k_p*(v_r-v) + k_i*z
134 |
135 | # rolling friction
136 | Fr = m*g*Cr*np.sign(v)
137 | # aerodynamic drag
138 | Fa = 0.5*rho*Cd*A*np.abs(v)*v
139 | # forces due to gravity
140 | Fg = m*g*np.sin(theta)
141 | # driving force
142 | Fd = alpha_n*u*torque(alpha_n*v)
143 | vdot = 1/m*(Fd - (Fr + Fa + Fg))
144 | new_state = np.matrix([[p + dt*v], [v + vdot*dt], [theta]])
145 | return new_state, np.matrix(u)
146 |
147 |
148 | # disturbance
149 | def hills(iterations, dt, factor):
150 | """
151 | Wrapper for creating a hill profile for the car to drive over that has an appropriate length and magnitude
152 |
153 | :param iterations: number of time points to simulate
154 | :type iterations: int
155 |
156 | :param dt: timestep of simulation in seconds
157 | :type dt: float
158 |
159 | :param factor: determines magnitude of the hills
160 | :type factor: int
161 |
162 |
163 | :return: hills, the output of the triangle function, a [1,M] matrix where M is the number of time points simulated
164 | :rtype: np.matrix
165 | """
166 | return triangle(iterations, dt)*0.3/factor
167 |
168 |
169 | # desired velocity
170 | def desired_velocity(n, factor):
171 | """
172 | Wrapper for defining the desired velocity as a matrix with size [1, M], where M is the number of time points to simulate
173 | See function "run" for how this function gets used.
174 |
175 | :param n: number of time points to simulate
176 | :type n: int
177 |
178 | :param factor: factor that determines the magnitude of the desired velocity
179 | :type factor: float
180 |
181 | :return: desired velocity as function of time, a [1,M] matrix, M is the number of time points to simulate
182 | :rtype: np.matrix
183 | """
184 | return np.matrix([2/factor]*n)
185 |
186 |
187 | def run(timeseries_length=4, dt=0.01):
188 | """
189 | Simulate proportional integral control of a car attempting to maintain constant velocity while going up and down hills.
190 | This function is used for testing differentiation methods.
191 |
192 | See Astrom and Murray 2008 Chapter 3.
193 |
194 | :param timeseries_length: number of seconds to simulate
195 | :type timeseries_length: float
196 |
197 | :param dt: timestep in seconds
198 | :type dt: float
199 |
200 | :return: a tuple consisting of arrays of size [N, M], where M is the number of time steps.:
201 | - state_vals: state of the car, i.e. position and velocity as a function of time
202 | - disturbances: disturbances, ie. hills, that the car is subjected to
203 | - controls: control inputs applied by the car
204 | :rtype: tuple -> (np.array, np.array, np.array)
205 | """
206 | t = np.arange(0, timeseries_length, dt)
207 | iterations = len(t)
208 |
209 | # hills
210 | disturbances = np.matrix(np.zeros([3, iterations+1]))
211 | h = hills(iterations+1, dt, factor=0.5*timeseries_length/2)
212 | disturbances[2, :] = h[:, 0:disturbances.shape[1]]
213 |
214 | # controls
215 | controls = np.matrix([[0]])
216 |
217 | # initial condition
218 | state_vals = np.matrix([[0], [0], [0]])
219 |
220 | # desired vel
221 | v_r = desired_velocity(iterations, factor=0.5*iterations*dt/2)
222 |
223 | for i in range(1, iterations+1):
224 | new_state, u = step_forward(state_vals, disturbances[:, 0:i], v_r[:, 0:i], dt)
225 | state_vals = np.hstack((state_vals, new_state))
226 | controls = np.hstack((controls, u))
227 |
228 | return state_vals[0:2, 1:], disturbances[2, 1:], controls
229 |
--------------------------------------------------------------------------------
/pynumdiff/utils/utility.py:
--------------------------------------------------------------------------------
1 | import os, sys, copy, scipy
2 | import numpy as np
3 |
4 |
5 | def hankel_matrix(x, num_delays, pad=False): # fixed delay step of 1
6 | """
7 | :param x: numpy array or matrix
8 | :param num_delays: int, number of times to 1-step shift data
9 | :param pad:
10 | :return: a Hankel Matrix m
11 |
12 | e.g. if
13 | x = [a, b, c, d, e] and num_delays = 3
14 | then with pad = False:
15 | m = [['a', 'b', 'c'],
16 | ['b', 'c', 'd'],
17 | ['c', 'd', 'e']]
18 | or pad = True:
19 | m = [['a', 'b', 'c', 'd', 'e'],
20 | ['b', 'c', 'd', 'e', 0],
21 | ['c', 'd', 'e', 0, 0]]
22 | """
23 | m = copy.copy(x)
24 | for d in range(1, num_delays):
25 | xi = x[:, d:]
26 | xi = np.pad(xi, ((0, 0), (0, x.shape[1]-xi.shape[1])), 'constant', constant_values=0)
27 | m = np.vstack((m, xi))
28 | if not pad:
29 | return m[:, 0:-1*num_delays]
30 | return m
31 |
32 |
33 | def matrix_inv(X, max_sigma=1e-16):
34 | """
35 | Stable (pseudo) matrix inversion using singular value decomposition
36 |
37 | :param X: matrix to invert
38 | :type X: np.matrix or np.array
39 |
40 | :param max_sigma: smallest singular values to take into account. matrix will be truncated prior to inversion based on this value.
41 | :type max_sigma: float
42 |
43 | :return: matrix pseudo inverse
44 | :rtype: np.array or np.matrix
45 | """
46 | U, Sigma, V = np.linalg.svd(X, full_matrices=False)
47 | Sigma_inv = Sigma**-1
48 | Sigma_inv[np.where(Sigma < max_sigma)[0]] = 0 # helps reduce instabilities
49 | return V.T.dot(np.diag(Sigma_inv)).dot(U.T)
50 |
51 |
52 | def total_variation(x):
53 | """
54 | Calculate the total variation of an array
55 |
56 | :param x: timeseries
57 | :type x: np.array
58 |
59 | :return: total variation
60 | :rtype: float
61 |
62 | """
63 | if np.isnan(x).any():
64 | return np.nan
65 | x1 = np.ravel(x)[0:-1]
66 | x2 = np.ravel(x)[1:]
67 | return np.sum(np.abs(x2-x1))/len(x1) # mostly equivalent to cvxpy.tv(x2-x1).value
68 |
69 |
70 | def peakdet(v, delta, x=None):
71 | """
72 | Find peaks and valleys of 1D array. A point is considered a maximum peak if it has the maximal value, and was preceded (to the left) by a value lower by delta.
73 |
74 | Converted from MATLAB script at http://billauer.co.il/peakdet.html
75 | % Eli Billauer, 3.4.05 (Explicitly not copyrighted).
76 | % This function is released to the public domain; Any use is allowed.
77 |
78 | :param v: array for which to find peaks and valleys
79 | :typpe v: np.array
80 |
81 | :param delta: threshold for finding peaks and valleys. A point is considered a maximum peak if it has the maximal value, and was preceded (to the left) by a value lower by delta.
82 | :type delta: float
83 |
84 | :return: tuple of min and max locations and values:
85 | - maxtab: array with locations (column 1) and values of maxima (column 2)
86 | - mintab: array with locations (column 1) and values of minima (column 2)
87 | :rtype: tuple -> (np.array, np.array)
88 |
89 | """
90 | maxtab = []
91 | mintab = []
92 | if x is None:
93 | x = np.arange(len(v))
94 | v = np.asarray(v)
95 | if len(v) != len(x):
96 | sys.exit('Input vectors v and x must have same length')
97 | if not np.isscalar(delta):
98 | sys.exit('Input argument delta must be a scalar')
99 | if delta <= 0:
100 | sys.exit('Input argument delta must be positive')
101 |
102 | mn, mx = np.Inf, -1*np.Inf
103 | mnpos, mxpos = np.NaN, np.NaN
104 | lookformax = True
105 | for i in np.arange(len(v)):
106 | this = v[i]
107 | if this > mx:
108 | mx = this
109 | mxpos = x[i]
110 | if this < mn:
111 | mn = this
112 | mnpos = x[i]
113 | if lookformax:
114 | if this < mx-delta:
115 | maxtab.append((mxpos, mx))
116 | mn = this
117 | mnpos = x[i]
118 | lookformax = False
119 | else:
120 | if this > mn+delta:
121 | mintab.append((mnpos, mn))
122 | mx = this
123 | mxpos = x[i]
124 | lookformax = True
125 |
126 | return np.array(maxtab), np.array(mintab)
127 |
128 |
129 | # Trapazoidal integration, with interpolated final point so that the lengths match.
130 | def integrate_dxdt_hat(dxdt_hat, dt):
131 | """Wrapper for scipy.integrate.cumulative_trapezoid to integrate dxdt_hat that ensures the integral has the same length
132 |
133 | :param np.array[float] dxdt_hat: estimate derivative of timeseries
134 | :param float dt: time step in seconds
135 |
136 | :return: **x_hat** (np.array[float]) -- integral of dxdt_hat
137 | """
138 | x = scipy.integrate.cumulative_trapezoid(dxdt_hat)
139 | first_value = x[0] - dxdt_hat[0]
140 | return np.hstack((first_value, x))*dt
141 |
142 |
143 | # Optimization routine to estimate the integration constant.
144 | def estimate_initial_condition(x, x_hat):
145 | """
146 | Integration leaves an unknown integration constant. This function finds a best fit integration constant given x, and x_hat (the integral of dxdt_hat)
147 |
148 | :param x: timeseries of measurements
149 | :type x: np.array
150 |
151 | :param x_hat: smoothed estiamte of x, for the purpose of this function this should have been determined by integrate_dxdt_hat
152 | :type x_hat: np.array
153 |
154 | :return: integration constant (i.e. initial condition) that best aligns x_hat with x
155 | :rtype: float
156 | """
157 | def f(x0, *args):
158 | x, x_hat = args[0]
159 | error = np.linalg.norm(x - (x_hat+x0))
160 | return error
161 | result = scipy.optimize.minimize(f, [0], args=[x, x_hat], method='SLSQP')
162 | return result.x
163 |
164 |
165 | # kernels
166 | def _mean_kernel(window_size):
167 | """A uniform boxcar of total integral 1
168 | """
169 | return np.ones(window_size)/window_size
170 |
171 |
172 | def _gaussian_kernel(window_size):
173 | """A truncated gaussian
174 | """
175 | sigma = window_size / 6.
176 | t = np.linspace(-2.7*sigma, 2.7*sigma, window_size)
177 | ker = 1/np.sqrt(2*np.pi*sigma**2) * np.exp(-(t**2)/(2*sigma**2)) # gaussian function itself
178 | return ker / np.sum(ker)
179 |
180 |
181 | def _friedrichs_kernel(window_size):
182 | """A bump function
183 | """
184 | x = np.linspace(-0.999, 0.999, window_size)
185 | ker = np.exp(-1/(1-x**2))
186 | return ker / np.sum(ker)
187 |
188 |
189 | def convolutional_smoother(x, kernel, iterations):
190 | """Perform smoothing by convolving x with a kernel.
191 |
192 | :param np.array[float] x: 1D data
193 | :param np.array[float] kernel: kernel to use in convolution
194 | :param int iterations: number of iterations, >=1
195 | :return: **x_hat** (np.array[float]) -- smoothed x
196 | """
197 | x_hat = np.hstack((x[::-1], x, x[::-1])) # pad
198 | w = np.arange(len(x_hat)) / (len(x_hat) - 1) # weights
199 |
200 | for _ in range(iterations):
201 | x_hat_f = np.convolve(x_hat, kernel, 'same')
202 | x_hat_b = np.convolve(x_hat[::-1], kernel, 'same')[::-1]
203 |
204 | x_hat = x_hat_f*w + x_hat_b*(1-w)
205 |
206 | return x_hat[len(x):len(x)*2]
207 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "pynumdiff"
7 | dynamic = ["version"]
8 | description = "pynumdiff: numerical derivatives in python"
9 | readme = "README.md"
10 | license = {text = "MIT"}
11 | maintainers = [
12 | {name = "Floris van Breugel", email = "fvanbreugel@unr.edu"},
13 | {name = "Yuying Liu", email = "yliu814@uw.edu"},
14 | {name = "Pavel Komarov", email = "pvlkmrv@uw.edu"}
15 | ]
16 | keywords = ["derivative", "smoothing", "curve fitting", "optimization", "total variation"]
17 | classifiers = [
18 | "Development Status :: 3 - Alpha",
19 | "Environment :: Console",
20 | "Intended Audience :: Science/Research",
21 | "License :: OSI Approved :: MIT License",
22 | "Operating System :: OS Independent",
23 | "Programming Language :: Python",
24 | "Topic :: Scientific/Engineering"
25 | ]
26 | dependencies = [
27 | "numpy",
28 | "scipy",
29 | "matplotlib"
30 | ]
31 |
32 | [project.urls]
33 | homepage = "https://github.com/florisvb/PyNumDiff"
34 | documentation = "https://pynumdiff.readthedocs.io/"
35 | package = "https://pypi.org/project/pynumdiff/"
36 |
37 | [project.optional-dependencies]
38 | advanced = [
39 | "cvxpy",
40 | "MOSEK",
41 | ]
42 | dev = [
43 | "pylint",
44 | "pytest",
45 | "cvxopt",
46 | "cvxpy",
47 | "Mosek"
48 | ]
49 |
50 | [tool.setuptools_scm]
51 | write_to = "pynumdiff/_version.py"
52 |
--------------------------------------------------------------------------------