├── .gitattributes
├── .github
└── workflows
│ ├── pull_request.yml
│ └── push_branch.yml
├── .gitignore
├── .gitlab-ci.yml
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── LICENSE
├── MANIFEST.in
├── README.md
├── docs
├── Makefile
├── assets
│ └── style.css
├── conf.py
├── content
│ ├── api.rst
│ ├── assets
│ │ └── style.css
│ ├── atbd.rst
│ ├── getting_started.rst
│ ├── punpy_digital_effects_table.rst
│ ├── punpy_memory_and_speed.rst
│ ├── punpy_standalone.rst
│ └── user_guide.rst
├── figs
│ ├── Digital_effects_tables.jpg
│ ├── comet_logo.png
│ ├── image1.png
│ ├── image2.png
│ └── punpy.png
├── index.rst
└── make.bat
├── punpy
├── __init__.py
├── _version.py
├── digital_effects_table
│ ├── __init__.py
│ ├── digital_effects_table_templates.py
│ ├── measurement_function.py
│ ├── measurement_function_utils.py
│ └── tests
│ │ ├── __init__.py
│ │ ├── det_hypernets_cal.nc
│ │ ├── det_hypernets_l0.nc
│ │ ├── digital_effects_table_gaslaw_example.nc
│ │ ├── make_digital_effects_table_gaslaw.py
│ │ ├── make_hypernets_det.py
│ │ ├── propagate_ds_example.nc
│ │ ├── test_cal.nc
│ │ ├── test_fillvalues.py
│ │ ├── test_l0.nc
│ │ ├── test_l1.nc
│ │ ├── test_measurement_function.py
│ │ └── variables.py
├── lpu
│ ├── __init__.py
│ ├── lpu_propagation.py
│ └── tests
│ │ ├── __init__.py
│ │ └── test_lpu_propagation.py
├── main.py
├── mc
│ ├── __init__.py
│ ├── mc_propagation.py
│ └── tests
│ │ ├── __init__.py
│ │ └── test_mc_propagation.py
└── utilities
│ ├── __init__.py
│ ├── correlation_forms.py
│ ├── tests
│ ├── __init__.py
│ └── test_utilities.py
│ └── utilities.py
├── quality_documentation
├── .gitignore
├── QualityDocumentation.tex
├── base
│ ├── CookiecutterMacros.tex
│ ├── acronyms.tex
│ ├── history.tex
│ ├── metadata.tex
│ ├── pythoninputstyle.tex
│ ├── sil2.tex
│ ├── sil3.tex
│ ├── sources.bib
│ └── sources.tex
├── countpdfpages.sh
├── footnotehyper-sphinx.sty
├── punpy_QF-59.docx
├── punpy_requirements.docx
├── review_checklists
│ ├── qf-16a.docx
│ ├── qf-16b.docx
│ ├── qf-16c.docx
│ ├── qf-16d.docx
│ ├── qf-16e.docx
│ └── qf-16f.docx
├── softwareRequirements.sty
├── sphinx.sty
├── sphinxcyrillic.sty
├── sphinxhighlight.sty
├── sphinxhowto.cls
├── sphinxmanual.cls
├── sphinxmessages.sty
├── sphinxmulticell.sty
└── uml
│ ├── Diagram 2020-11-20 17-25-02.uxf
│ └── Diagram.png
├── requirements.txt
├── setup.cfg
├── setup.py
└── tox.ini
/.gitattributes:
--------------------------------------------------------------------------------
1 | punpy/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/.github/workflows/pull_request.yml:
--------------------------------------------------------------------------------
1 | # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
2 | name: Pull Request
3 |
4 | on:
5 | pull_request:
6 | branches:
7 | - main
8 | push:
9 | branches:
10 | - main
11 |
12 | jobs:
13 | lint_code:
14 | runs-on: ubuntu-latest
15 | strategy:
16 | matrix:
17 | python-version: ["3.11"]
18 | steps:
19 | - uses: actions/checkout@v3
20 | - name: Set up Python ${{ matrix.python-version }}
21 | uses: actions/setup-python@v3
22 | with:
23 | python-version: ${{ matrix.python-version }}
24 | - name: Install dependencies
25 | run: |
26 | python -m pip install --upgrade pip
27 | pip install .[dev]
28 | - name: Analysing the code with pre-commit lint checks
29 | run: |
30 | pre-commit run -a
31 |
32 | test_code_python3p8:
33 | runs-on: ubuntu-latest
34 | strategy:
35 | matrix:
36 | python-version: ["3.8"]
37 | steps:
38 | - uses: actions/checkout@v4
39 | - name: Set up Python ${{ matrix.python-version }}
40 | uses: actions/setup-python@v3
41 | with:
42 | python-version: ${{ matrix.python-version }}
43 | - name: Install dependencies
44 | run: |
45 | python -m pip install --upgrade pip
46 | pip install .[dev]
47 | - name: Test code
48 | run: |
49 | mkdir test_report
50 | tox
51 |
52 |
53 | test_code_and_coverage_report_python3p11:
54 | runs-on: ubuntu-latest
55 | strategy:
56 | matrix:
57 | python-version: ["3.11"]
58 | steps:
59 | - uses: actions/checkout@v4
60 | - name: Set up Python ${{ matrix.python-version }}
61 | uses: actions/setup-python@v3
62 | with:
63 | python-version: ${{ matrix.python-version }}
64 | - name: Install dependencies
65 | run: |
66 | python -m pip install --upgrade pip
67 | pip install .[dev]
68 | - name: Test code
69 | run: |
70 | mkdir test_report
71 | tox
72 | - name: html to pdf
73 | uses: fifsky/html-to-pdf-action@master
74 | with:
75 | htmlFile: test_report/cov_report/index.html
76 | outputFile: test_report/cov_report/cov_report.pdf
77 | pdfOptions: '{"format": "A4", "margin": {"top": "10mm", "left": "10mm", "right": "10mm", "bottom": "10mm"}}'
78 | - name: Archive code coverage results
79 | uses: actions/upload-artifact@v4
80 | with:
81 | name: code-coverage-report
82 | path: test_report/cov_report/cov_report.pdf
--------------------------------------------------------------------------------
/.github/workflows/push_branch.yml:
--------------------------------------------------------------------------------
1 | # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
2 | name: Push to branch
3 |
4 | on:
5 | push:
6 | branches:
7 | - '*'
8 |
9 | jobs:
10 | test_code:
11 | runs-on: ubuntu-latest
12 | strategy:
13 | matrix:
14 | python-version: ["3.12"]
15 | steps:
16 | - uses: actions/checkout@v4
17 | - name: Set up Python ${{ matrix.python-version }}
18 | uses: actions/setup-python@v3
19 | with:
20 | python-version: ${{ matrix.python-version }}
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install .[dev]
25 | - name: Test code
26 | run: |
27 | tox
28 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Mypy cache
2 | .mypy_cache
3 |
4 | # Compiles docs for gitlab pages
5 | public
6 | latex
7 |
8 | # Visual studio / pycharm folders
9 | .idea
10 | .vscode
11 |
12 | # Byte-compiled / optimized / DLL files
13 | __pycache__/
14 | *.py[cod]
15 | *$py.class
16 |
17 | # C extensions
18 | *.so
19 |
20 | # Distribution / packaging
21 | .Python
22 | env/
23 | build/
24 | develop-eggs/
25 | dist/
26 | downloads/
27 | eggs/
28 | .eggs/
29 | lib/
30 | lib64/
31 | parts/
32 | sdist/
33 | var/
34 | wheels/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .coverage
53 | .coverage.*
54 | .cache
55 | nosetests.xml
56 | coverage.xml
57 | *.cover
58 | .hypothesis/
59 | .pytest_cache/
60 |
61 | # Translations
62 | *.mo
63 | *.pot
64 |
65 | # Django stuff:
66 | *.log
67 | local_settings.py
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 |
79 | # PyBuilder
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # pyenv
86 | .python-version
87 |
88 | # celery beat schedule file
89 | celerybeat-schedule
90 |
91 | # SageMath parsed files
92 | *.sage.py
93 |
94 | # dotenv
95 | .env
96 |
97 | # virtualenv
98 | .venv
99 | venv/
100 | ENV/
101 |
102 | # Spyder project settings
103 | .spyderproject
104 | .spyproject
105 |
106 | # Rope project settings
107 | .ropeproject
108 |
109 | # mkdocs documentation
110 | /site
111 |
112 | # mypy
113 | .mypy_cache/
114 |
115 | # IDE settings
116 | .vscode/.git.old
117 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | # Default image, if not specified
2 | image: "python:3.8"
3 |
4 | stages:
5 | - Static Analysis
6 | - Readthedocs Documentation
7 | - Build latex Documentation
8 | - Publish latex Documentation
9 | - Test
10 | #- Full test
11 | - Publish test report
12 | - Combine quality documentation
13 |
14 |
15 | ### Setup cache ###
16 | # See https://docs.gitlab.com/ee/ci/caching/index.html
17 | #
18 | # Change pip's cache directory to be inside the project directory since we can
19 | # only cache local items.
20 | variables:
21 | PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
22 |
23 | # Pip's cache doesn't store the python packages
24 | # https://pip.pypa.io/en/stable/reference/pip_install/#caching
25 | #
26 | # If you want to also cache the installed packages, you have to install
27 | # them in a virtualenv and cache it as well.
28 | cache:
29 | key: ${CI_JOB_NAME}
30 | paths:
31 | - .cache/pip
32 | - .tox/
33 | - venv/
34 |
35 | # Make a template for steps that use a python virtual env
36 | .py_venv:
37 | before_script:
38 | # Store the CI_JOB_TOKEN so that jobs can access other repositories with the access rights of the triggering user:
39 | - echo -e "machine gitlab.npl.co.uk\nlogin gitlab-ci-token\npassword ${CI_JOB_TOKEN}" > ~/.netrc
40 | # Debug output
41 | - ls -la
42 | - pwd
43 | - python -c "import sys;print(sys.path)"
44 | - python --version
45 | # Set up git for test commits
46 | - git config --global user.name "Gitlabs CI Testing"
47 | - git config --global user.email "test@example.com"
48 | # Make a virtual environment to install deps into (this will be cached for each step)
49 | - python --version
50 | - pip install virtualenv
51 | - virtualenv venv
52 | - source venv/bin/activate
53 | - pip install .[dev]
54 |
55 | ### Linting ###
56 |
57 | black:
58 | image:
59 | name: cytopia/black
60 | entrypoint: [""]
61 | stage: Static Analysis
62 | script:
63 | - black --check punpy
64 | allow_failure: true
65 | needs: []
66 |
67 |
68 | mypy:
69 | image:
70 | name: grktsh/mypy
71 | entrypoint: [""]
72 | stage: Static Analysis
73 | script:
74 | - mypy punpy
75 | allow_failure: true
76 | needs: []
77 |
78 |
79 | ### Tests ###
80 | .tox:
81 | extends: .py_venv
82 | script:
83 | - pwd
84 | - ls -l
85 | - export PYTHONPATH="$PYTHONPATH:."
86 | - python -c "import sys;print(sys.path)"
87 | - pip install tox
88 | - mkdir test_report
89 | - tox
90 | coverage: '/^TOTAL.+?(\d+\%)$/'
91 | needs: []
92 | artifacts:
93 | when: always
94 | paths:
95 | - test_report/
96 | - test_report/cov_report
97 | expire_in: 1 hour
98 |
99 | tox-3.6:
100 | extends: ".tox"
101 | stage: "Test"
102 | image: "python:3.6"
103 | rules:
104 | - if: '$CI_COMMIT_TAG'
105 | when: always
106 |
107 | tox-3.7:
108 | extends: ".tox"
109 | stage: "Test"
110 | image: "python:3.7"
111 | # rules:
112 | # - if: '$CI_COMMIT_TAG'
113 | # when: always
114 |
115 | tox-3.8:
116 | extends: ".tox"
117 | stage: "Test"
118 | image: "python:3.8"
119 | rules:
120 | - if: '$CI_COMMIT_TAG'
121 | when: always
122 |
123 | tox-3.9:
124 | extends: ".tox"
125 | stage: "Test"
126 | image: "python:3.9"
127 |
128 |
129 | # tox_slowtests:
130 | # extends: .py_venv
131 | # stage: "Full test"
132 | # rules:
133 | # # Run for scheduled or triggered builds
134 | # - if: '$CI_PIPELINE_SOURCE == "trigger"'
135 | # when: always
136 | # - if: '$CI_PIPELINE_SOURCE == "schedule"'
137 | # when: always
138 | # # Run for tags
139 | # - if: '$CI_COMMIT_TAG'
140 | # when: always
141 | # # And allow manual runs
142 | # - if: '$CI_COMMIT_BRANCH'
143 | # when: manual
144 | # allow_failure: true
145 | # script:
146 | # - pwd
147 | # - ls -l
148 | # - export PYTHONPATH="$PYTHONPATH:."
149 | # - python -c "import sys;print(sys.path)"
150 | # - pip install tox
151 | # - tox -- --runslow
152 | # coverage: '/^TOTAL.+?(\d+\%)$/'
153 | # needs: []
154 |
155 | test_report:
156 | needs: ["tox-3.9"]
157 | stage: Publish test report
158 | image:
159 | name: oberonamsterdam/wkhtmltopdf
160 | entrypoint: [""]
161 | script:
162 | - wkhtmltopdf --enable-local-file-access file:////builds/eco/tools/punpy/test_report/report.html test_report.pdf
163 | - wkhtmltopdf --enable-local-file-access file:////builds/eco/tools/punpy/test_report/cov_report/index.html cov_report.pdf
164 | - mv test_report.pdf ./quality_documentation/test_report.pdf
165 | - mv cov_report.pdf ./quality_documentation/cov_report.pdf
166 | artifacts:
167 | paths:
168 | - quality_documentation
169 | expire_in: 1 hour
170 | rules:
171 | - if: '$CI_COMMIT_TAG'
172 | when: always
173 | ### Documentation ###
174 |
175 | # Upload the documentation to pages if it's a tagged commit
176 | pages:
177 | extends: .py_venv
178 | stage: Readthedocs Documentation
179 | needs: []
180 | script:
181 | - sphinx-build docs public -b html
182 | artifacts:
183 | paths:
184 | - public
185 | expire_in: 1 hour
186 |
187 | rules:
188 | - if: '$CI_COMMIT_TAG'
189 | when: always
190 | - if: '$CI_COMMIT_BRANCH == "master"'
191 | when: always
192 |
193 | pdf_make:
194 | extends: .py_venv
195 | stage: Build latex Documentation
196 | needs: []
197 | script:
198 | - sphinx-build docs latex -b latex
199 | - cp -r latex ./quality_documentation/latex
200 | artifacts:
201 | paths:
202 | - quality_documentation/latex
203 | expire_in: 1 hour
204 | rules:
205 | - if: '$CI_COMMIT_TAG'
206 | when: always
207 |
208 | pdf_compile:
209 | needs: ["pdf_make"]
210 | stage: Publish latex Documentation
211 | image: charlesbaynham/ctanfull
212 | artifacts:
213 | paths:
214 | - user_manual.pdf
215 | expose_as: "Manual"
216 | name: ${CI_COMMIT_REF_SLUG}_manual.pdf
217 | script:
218 | - ls
219 | - cd quality_documentation/latex
220 | - make
221 | - cp user_manual.pdf ../../user_manual.pdf
222 | rules:
223 | - if: '$CI_COMMIT_TAG'
224 | when: always
225 |
226 | pdf_compile_all:
227 | needs: ["pdf_make","test_report"]
228 | stage: Combine quality documentation
229 | image: charlesbaynham/ctanfull
230 | artifacts:
231 | paths:
232 | - QualityDocumentation.pdf
233 | expose_as: "QualityDocumentation"
234 | name: ${CI_COMMIT_REF_SLUG}_QualityDocumentation.pdf
235 | script:
236 | - cd quality_documentation/latex
237 | - ls
238 | - cd ..
239 | - cd uml
240 | - for i in *.png; do echo "\includegraphics[width=\textwidth]{$i}">>uml.tex; done
241 | - cd ..
242 | - latexmk -e '$pdflatex=q/pdflatex %O -shell-escape %S/' -pdf
243 | - cp QualityDocumentation.pdf ../.
244 | rules:
245 | - if: '$CI_COMMIT_TAG'
246 | when: always
247 | # Copied contents of Gitlab's Code-Quality.gitlab-ci.yml so I can edit the rules
248 |
249 | code_quality:
250 | stage: Static Analysis
251 | needs: []
252 | image: docker:19.03.5
253 | allow_failure: true
254 | services:
255 | - docker:19.03.5-dind
256 | variables:
257 | DOCKER_DRIVER: overlay2
258 | DOCKER_TLS_CERTDIR: ""
259 | CODE_QUALITY_IMAGE: "registry.gitlab.com/gitlab-org/ci-cd/codequality:0.85.9"
260 | script:
261 | - |
262 | if ! docker info &>/dev/null; then
263 | if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
264 | export DOCKER_HOST='tcp://localhost:2375'
265 | fi
266 | fi
267 | - docker pull --quiet "$CODE_QUALITY_IMAGE"
268 | - docker run
269 | --env SOURCE_CODE="$PWD"
270 | --volume "$PWD":/code
271 | --volume /var/run/docker.sock:/var/run/docker.sock
272 | "$CODE_QUALITY_IMAGE" /code
273 | artifacts:
274 | reports:
275 | codequality: gl-code-quality-report.json
276 | expire_in: 1 week
277 | dependencies: []
278 |
279 | rules:
280 | - if: '$CI_PROJECT_URL =~ /^.*gitlab.com.*/'
281 | when: always
282 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: stable
4 | hooks:
5 | - id: black
6 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the OS, Python version and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.9"
13 |
14 | # Build documentation in the docs/ directory with Sphinx
15 | sphinx:
16 | configuration: docs/conf.py
17 |
18 |
19 | # Optionally build your docs in additional formats such as PDF
20 | formats:
21 | - pdf
22 |
23 | # Optionally set the version of Python and requirements required to build your docs
24 | python:
25 | install:
26 | - requirements: requirements.txt
27 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include punpy/_version.py
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # punpy: Propagating Uncertainties with PYthon
2 |
3 | **punpy** is a Python software package to propagate random, structured and systematic uncertainties through a given measurement function.
4 |
5 | punpy is part of the [CoMet Toolkit](https://www.comet-toolkit.org) (community metrology toolkit), and can combined with the [obsarray](https://obsarray.readthedocs.io/en/latest/) (Measurement uncertainty handling in Python) for efficient definition, storing and interfacing with uncertainty information using standardised metadata.
6 |
7 | ## Installation
8 |
9 | punpy is installable via pip.
10 | ```
11 | pip install punpy
12 | ```
13 | ## Documentation
14 |
15 | For more information visit our [documentation](https://punpy.readthedocs.io/en/latest).
16 |
17 | ## License
18 |
19 | punpy is free software licensed under the
20 | [GNU Public License (v3)](./LICENSE).
21 |
22 | ## Acknowledgements
23 |
24 | punpy has been developed by [Pieter De Vis](https://www.comet-toolkit.org/author/pieter-de-vis/).
25 |
26 | The development has been funded by:
27 |
28 | * The UK's Department for Business, Energy and Industrial Strategy's (BEIS) National Measurement System (NMS) programme
29 | * The IDEAS-QA4EO project funded by the European Space Agency.
30 |
31 | ## Project status
32 |
33 | punpy is under active development. It is beta software.
34 |
35 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = punpy
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/assets/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: Helvetica, Arial, sans-serif;
3 | font-size: 12px;
4 | /* do not increase min-width as some may use split screens */
5 | min-width: 800px;
6 | color: #999;
7 | }
8 |
9 | h1 {
10 | font-size: 24px;
11 | color: black;
12 | }
13 |
14 | h2 {
15 | font-size: 16px;
16 | color: black;
17 | }
18 |
19 | p {
20 | color: black;
21 | }
22 |
23 | a {
24 | color: #999;
25 | }
26 |
27 | table {
28 | border-collapse: collapse;
29 | }
30 |
31 | /******************************
32 | * SUMMARY INFORMATION
33 | ******************************/
34 |
35 | #environment td {
36 | padding: 5px;
37 | border: 1px solid #E6E6E6;
38 | }
39 |
40 | #environment tr:nth-child(odd) {
41 | background-color: #f6f6f6;
42 | }
43 |
44 | /******************************
45 | * TEST RESULT COLORS
46 | ******************************/
47 | span.passed, .passed .col-result {
48 | color: green;
49 | }
50 | span.skipped, span.xfailed, span.rerun, .skipped .col-result, .xfailed .col-result, .rerun .col-result {
51 | color: orange;
52 | }
53 | span.error, span.failed, span.xpassed, .error .col-result, .failed .col-result, .xpassed .col-result {
54 | color: red;
55 | }
56 |
57 |
58 | /******************************
59 | * RESULTS TABLE
60 | *
61 | * 1. Table Layout
62 | * 2. Extra
63 | * 3. Sorting items
64 | *
65 | ******************************/
66 |
67 | /*------------------
68 | * 1. Table Layout
69 | *------------------*/
70 |
71 | #results-table {
72 | border: 1px solid #e6e6e6;
73 | color: #999;
74 | font-size: 12px;
75 | width: 100%
76 | }
77 |
78 | #results-table th, #results-table td {
79 | padding: 5px;
80 | border: 1px solid #E6E6E6;
81 | text-align: left
82 | }
83 | #results-table th {
84 | font-weight: bold
85 | }
86 |
87 | /*------------------
88 | * 2. Extra
89 | *------------------*/
90 |
91 | .log:only-child {
92 | height: inherit
93 | }
94 | .log {
95 | background-color: #e6e6e6;
96 | border: 1px solid #e6e6e6;
97 | color: black;
98 | display: block;
99 | font-family: "Courier New", Courier, monospace;
100 | height: 230px;
101 | overflow-y: scroll;
102 | padding: 5px;
103 | white-space: pre-wrap
104 | }
105 | div.image {
106 | border: 1px solid #e6e6e6;
107 | float: right;
108 | height: 240px;
109 | margin-left: 5px;
110 | overflow: hidden;
111 | width: 320px
112 | }
113 | div.image img {
114 | width: 320px
115 | }
116 | div.video {
117 | border: 1px solid #e6e6e6;
118 | float: right;
119 | height: 240px;
120 | margin-left: 5px;
121 | overflow: hidden;
122 | width: 320px
123 | }
124 | div.video video {
125 | overflow: hidden;
126 | width: 320px;
127 | height: 240px;
128 | }
129 | .collapsed {
130 | display: none;
131 | }
132 | .expander::after {
133 | content: " (show details)";
134 | color: #BBB;
135 | font-style: italic;
136 | cursor: pointer;
137 | }
138 | .collapser::after {
139 | content: " (hide details)";
140 | color: #BBB;
141 | font-style: italic;
142 | cursor: pointer;
143 | }
144 |
145 | /*------------------
146 | * 3. Sorting items
147 | *------------------*/
148 | .sortable {
149 | cursor: pointer;
150 | }
151 |
152 | .sort-icon {
153 | font-size: 0px;
154 | float: left;
155 | margin-right: 5px;
156 | margin-top: 5px;
157 | /*triangle*/
158 | width: 0;
159 | height: 0;
160 | border-left: 8px solid transparent;
161 | border-right: 8px solid transparent;
162 | }
163 |
164 | .inactive .sort-icon {
165 | /*finish triangle*/
166 | border-top: 8px solid #E6E6E6;
167 | }
168 |
169 | .asc.active .sort-icon {
170 | /*finish triangle*/
171 | border-bottom: 8px solid #999;
172 | }
173 |
174 | .desc.active .sort-icon {
175 | /*finish triangle*/
176 | border-top: 8px solid #999;
177 | }
178 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # punpy documentation build configuration file, created by
4 | # cookiecutter
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 |
16 | # If extensions (or modules to document with autodoc) are in another
17 | # directory, add these directories to sys.path here. If the directory is
18 | # relative to the documentation root, use os.path.abspath to make it
19 | # absolute, like shown here.
20 |
21 | import sphinx_autosummary_accessors
22 | import punpy
23 |
24 | project_title = "punpy".replace("_", " ").title()
25 |
26 |
27 | # -- General configuration ---------------------------------------------
28 |
29 | # If your documentation needs a minimal Sphinx version, state it here.
30 | #
31 | # needs_sphinx = '1.0'
32 |
33 | # Attempt to make links automatially
34 | default_role = "code"
35 |
36 | # Add any Sphinx extension module names here, as strings. They can be
37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
38 | # CFAB added napolean to support google-style docstrings
39 | extensions = [
40 | "sphinx.ext.autodoc",
41 | "sphinx.ext.autosummary",
42 | "sphinx.ext.intersphinx",
43 | "sphinx.ext.viewcode",
44 | "sphinx.ext.napoleon",
45 | "IPython.sphinxext.ipython_directive",
46 | "IPython.sphinxext.ipython_console_highlighting",
47 | "sphinx_design",
48 | "sphinx_autosummary_accessors",
49 | ]
50 |
51 | # Add any paths that contain templates here, relative to this directory.
52 | templates_path = ["_templates", sphinx_autosummary_accessors.templates_path]
53 |
54 | # The suffix(es) of source filenames.
55 | # You can specify multiple suffix as a list of string:
56 | #
57 | # source_suffix = ['.rst', '.md']
58 | source_suffix = ".rst"
59 |
60 | # The master toctree document.
61 | master_doc = "index"
62 |
63 | # General information about the project.
64 | project = project_title
65 | copyright = "CoMet Toolkit Team"
66 | author = "CoMet Toolkit Team"
67 |
68 | # The version info for the project you're documenting, acts as replacement
69 | # for |version| and |release|, also used in various other places throughout
70 | # the built documents.
71 | #
72 | # The short X.Y version.
73 | version = punpy.__version__
74 | # The full version, including alpha/beta/rc tags.
75 | release = punpy.__version__
76 |
77 | # The language for content autogenerated by Sphinx. Refer to documentation
78 | # for a list of supported languages.
79 | #
80 | # This is also used if you do content translation via gettext catalogs.
81 | # Usually you set "language" from the command line for these cases.
82 | language = "en"
83 |
84 | # List of patterns, relative to source directory, that match files and
85 | # directories to ignore when looking for source files.
86 | # This patterns also effect to html_static_path and html_extra_path
87 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
88 |
89 | # The name of the Pygments (syntax highlighting) style to use.
90 | pygments_style = "sphinx"
91 |
92 | rst_prolog = """
93 | .. role:: python(code)
94 | :language: python
95 | :class: highlight
96 | """
97 |
98 | # If true, `todo` and `todoList` produce output, else they produce nothing.
99 | todo_include_todos = False
100 |
101 |
102 | # -- Options for HTML output -------------------------------------------
103 |
104 | # The theme to use for HTML and HTML Help pages. See the documentation for
105 | # a list of builtin themes.
106 | #
107 | html_theme = "sphinx_book_theme"
108 |
109 | # Theme options are theme-specific and customize the look and feel of a
110 | # theme further. For a list of options available for each theme, see the
111 | # documentation.
112 | #
113 | # html_theme_options = {}
114 |
115 | html_logo = "figs/punpy.png"
116 |
117 | # Add any paths that contain custom static files (such as style sheets) here,
118 | # relative to this directory. They are copied after the builtin static files,
119 | # so a file named "default.css" will overwrite the builtin "default.css".
120 | html_static_path = ["_static"]
121 |
122 | # -- Options for HTMLHelp output ---------------------------------------
123 |
124 | # Output file base name for HTML help builder.
125 | htmlhelp_basename = "punpydoc"
126 |
127 |
128 | # -- Options for LaTeX output ------------------------------------------
129 |
130 | latex_elements = {
131 | # The paper size ('letterpaper' or 'a4paper').
132 | #
133 | # 'papersize': 'letterpaper',
134 | # The font size ('10pt', '11pt' or '12pt').
135 | #
136 | # 'pointsize': '10pt',
137 | # Additional stuff for the LaTeX preamble.
138 | #
139 | # 'preamble': '',
140 | # Latex figure (float) alignment
141 | #
142 | # 'figure_align': 'htbp',
143 | }
144 |
145 | # Grouping the document tree into LaTeX files. List of tuples
146 | # (source start file, target name, title, author, documentclass
147 | # [howto, manual, or own class]).
148 | latex_documents = [
149 | (
150 | master_doc,
151 | "user_manual.tex",
152 | "{} Documentation".format(project_title),
153 | "CoMet Toolkit Team",
154 | "manual",
155 | ),
156 | ]
157 |
158 |
159 | # -- Options for manual page output ------------------------------------
160 |
161 | # One entry per manual page. List of tuples
162 | # (source start file, name, description, authors, manual section).
163 | man_pages = [(master_doc, "punpy", "punpy Documentation", [author], 1)]
164 |
165 |
166 | # -- Options for Texinfo output ----------------------------------------
167 |
168 | # Grouping the document tree into Texinfo files. List of tuples
169 | # (source start file, target name, title, author,
170 | # dir menu entry, description, category)
171 | texinfo_documents = [
172 | (
173 | master_doc,
174 | "punpy",
175 | "punpy Documentation",
176 | author,
177 | "punpy",
178 | "Tool for “Propagation of UNcertainties in Python” through any python function. ",
179 | "Miscellaneous",
180 | ),
181 | ]
182 |
--------------------------------------------------------------------------------
/docs/content/api.rst:
--------------------------------------------------------------------------------
1 | .. currentmodule:: punpy
2 |
3 | .. _api:
4 |
5 | #############
6 | API reference
7 | #############
8 |
9 | This page provides an auto-generated summary of **punpy**'s API. For more details
10 | and examples, refer to the relevant chapters in the main part of the
11 | documentation.
12 |
13 | MCPropagation
14 | ===================
15 |
16 | .. autosummary::
17 | :toctree: generated/
18 |
19 | mc.mc_propagation.MCPropagation
20 | mc.mc_propagation.MCPropagation.propagate_standard
21 | mc.mc_propagation.MCPropagation.propagate_random
22 | mc.mc_propagation.MCPropagation.propagate_systematic
23 | mc.mc_propagation.MCPropagation.propagate_cov
24 | mc.mc_propagation.MCPropagation.generate_MC_sample
25 | mc.mc_propagation.MCPropagation.generate_MC_sample_cov
26 | mc.mc_propagation.MCPropagation.propagate_cov_flattened
27 | mc.mc_propagation.MCPropagation.run_samples
28 | mc.mc_propagation.MCPropagation.combine_samples
29 | mc.mc_propagation.MCPropagation.process_samples
30 |
31 | LPUPropagation
32 | =====================
33 |
34 | .. autosummary::
35 | :toctree: generated/
36 |
37 | lpu.lpu_propagation.LPUPropagation
38 | lpu.lpu_propagation.LPUPropagation.propagate_standard
39 | lpu.lpu_propagation.LPUPropagation.propagate_random
40 | lpu.lpu_propagation.LPUPropagation.propagate_systematic
41 | lpu.lpu_propagation.LPUPropagation.propagate_cov
42 | lpu.lpu_propagation.LPUPropagation.propagate_flattened_cov
43 | lpu.lpu_propagation.LPUPropagation.process_jacobian
44 |
45 | Digital Effects Tables
46 | =======================
47 |
48 | .. autosummary::
49 | :toctree: generated/
50 |
51 | digital_effects_table.measurement_function.MeasurementFunction
52 | digital_effects_table.measurement_function.MeasurementFunction.meas_function
53 | digital_effects_table.measurement_function.MeasurementFunction.get_argument_names
54 | digital_effects_table.measurement_function.MeasurementFunction.get_measurand_name_and_unit
55 | digital_effects_table.measurement_function.MeasurementFunction.update_measurand
56 | digital_effects_table.measurement_function.MeasurementFunction.setup
57 | digital_effects_table.measurement_function.MeasurementFunction.propagate_ds
58 | digital_effects_table.measurement_function.MeasurementFunction.propagate_ds_total
59 | digital_effects_table.measurement_function.MeasurementFunction.propagate_ds_specific
60 | digital_effects_table.measurement_function.MeasurementFunction.propagate_ds_all
61 | digital_effects_table.measurement_function.MeasurementFunction.run
62 | digital_effects_table.measurement_function.MeasurementFunction.propagate_total
63 | digital_effects_table.measurement_function.MeasurementFunction.propagate_random
64 | digital_effects_table.measurement_function.MeasurementFunction.propagate_systematic
65 | digital_effects_table.measurement_function.MeasurementFunction.propagate_structured
66 | digital_effects_table.measurement_function.MeasurementFunction.propagate_specific
67 |
68 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils
69 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.find_comps
70 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.get_input_qty
71 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.get_input_unc
72 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.calculate_unc
73 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.calculate_unc_missingdim
74 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.get_input_corr
75 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.calculate_corr
76 | digital_effects_table.measurement_function_utils.MeasurementFunctionUtils.calculate_corr_missingdim
77 |
78 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates
79 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates.make_template_main
80 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates.make_template_tot
81 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates.make_template_specific
82 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates.remove_unc_component
83 | digital_effects_table.digital_effects_table_templates.DigitalEffectsTableTemplates.join_with_preexisting_ds
84 |
--------------------------------------------------------------------------------
/docs/content/assets/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: Helvetica, Arial, sans-serif;
3 | font-size: 12px;
4 | /* do not increase min-width as some may use split screens */
5 | min-width: 800px;
6 | color: #999;
7 | }
8 |
9 | h1 {
10 | font-size: 24px;
11 | color: black;
12 | }
13 |
14 | h2 {
15 | font-size: 16px;
16 | color: black;
17 | }
18 |
19 | p {
20 | color: black;
21 | }
22 |
23 | a {
24 | color: #999;
25 | }
26 |
27 | table {
28 | border-collapse: collapse;
29 | }
30 |
31 | /******************************
32 | * SUMMARY INFORMATION
33 | ******************************/
34 |
35 | #environment td {
36 | padding: 5px;
37 | border: 1px solid #E6E6E6;
38 | }
39 |
40 | #environment tr:nth-child(odd) {
41 | background-color: #f6f6f6;
42 | }
43 |
44 | /******************************
45 | * TEST RESULT COLORS
46 | ******************************/
47 | span.passed, .passed .col-result {
48 | color: green;
49 | }
50 | span.skipped, span.xfailed, span.rerun, .skipped .col-result, .xfailed .col-result, .rerun .col-result {
51 | color: orange;
52 | }
53 | span.error, span.failed, span.xpassed, .error .col-result, .failed .col-result, .xpassed .col-result {
54 | color: red;
55 | }
56 |
57 |
58 | /******************************
59 | * RESULTS TABLE
60 | *
61 | * 1. Table Layout
62 | * 2. Extra
63 | * 3. Sorting items
64 | *
65 | ******************************/
66 |
67 | /*------------------
68 | * 1. Table Layout
69 | *------------------*/
70 |
71 | #results-table {
72 | border: 1px solid #e6e6e6;
73 | color: #999;
74 | font-size: 12px;
75 | width: 100%
76 | }
77 |
78 | #results-table th, #results-table td {
79 | padding: 5px;
80 | border: 1px solid #E6E6E6;
81 | text-align: left
82 | }
83 | #results-table th {
84 | font-weight: bold
85 | }
86 |
87 | /*------------------
88 | * 2. Extra
89 | *------------------*/
90 |
91 | .log:only-child {
92 | height: inherit
93 | }
94 | .log {
95 | background-color: #e6e6e6;
96 | border: 1px solid #e6e6e6;
97 | color: black;
98 | display: block;
99 | font-family: "Courier New", Courier, monospace;
100 | height: 230px;
101 | overflow-y: scroll;
102 | padding: 5px;
103 | white-space: pre-wrap
104 | }
105 | div.image {
106 | border: 1px solid #e6e6e6;
107 | float: right;
108 | height: 240px;
109 | margin-left: 5px;
110 | overflow: hidden;
111 | width: 320px
112 | }
113 | div.image img {
114 | width: 320px
115 | }
116 | div.video {
117 | border: 1px solid #e6e6e6;
118 | float: right;
119 | height: 240px;
120 | margin-left: 5px;
121 | overflow: hidden;
122 | width: 320px
123 | }
124 | div.video video {
125 | overflow: hidden;
126 | width: 320px;
127 | height: 240px;
128 | }
129 | .collapsed {
130 | display: none;
131 | }
132 | .expander::after {
133 | content: " (show details)";
134 | color: #BBB;
135 | font-style: italic;
136 | cursor: pointer;
137 | }
138 | .collapser::after {
139 | content: " (hide details)";
140 | color: #BBB;
141 | font-style: italic;
142 | cursor: pointer;
143 | }
144 |
145 | /*------------------
146 | * 3. Sorting items
147 | *------------------*/
148 | .sortable {
149 | cursor: pointer;
150 | }
151 |
152 | .sort-icon {
153 | font-size: 0px;
154 | float: left;
155 | margin-right: 5px;
156 | margin-top: 5px;
157 | /*triangle*/
158 | width: 0;
159 | height: 0;
160 | border-left: 8px solid transparent;
161 | border-right: 8px solid transparent;
162 | }
163 |
164 | .inactive .sort-icon {
165 | /*finish triangle*/
166 | border-top: 8px solid #E6E6E6;
167 | }
168 |
169 | .asc.active .sort-icon {
170 | /*finish triangle*/
171 | border-bottom: 8px solid #999;
172 | }
173 |
174 | .desc.active .sort-icon {
175 | /*finish triangle*/
176 | border-top: 8px solid #999;
177 | }
178 |
--------------------------------------------------------------------------------
/docs/content/atbd.rst:
--------------------------------------------------------------------------------
1 | .. atbd - algorithm theoretical basis
2 | Author: Pieter De Vis
3 | Email: pieter.de.vis@npl.co.uk
4 | Created: 15/04/20
5 |
6 | .. _atbd:
7 |
8 | Algorithm Theoretical Basis
9 | ===========================
10 |
11 | Principles of Uncertainty Analysis
12 | ###################################
13 |
14 | The Guide to the expression of Uncertainty in Measurement (GUM 2008)
15 | provides a framework for how to determine and express the uncertainty of
16 | the measured value of a given measurand (the quantity which is being
17 | measured). The International Vocabulary of Metrology (VIM 2008) defines
18 | measurement uncertainty as:
19 |
20 | *"a non-negative parameter characterizing the dispersion of the
21 | quantity values being attributed to a measurand, based on the information used."*
22 |
23 | The standard uncertainty is the measurement uncertainty expressed as a
24 | standard deviation. Please note this is a separate concept to
25 | measurement error, which is also defined in the VIM as:
26 |
27 | *"the measured quantity value minus a reference quantity value."*
28 |
29 | Generally, the "reference quantity" is considered to be the "true value"
30 | of the measurand and is therefore unknown. Figure 1 illustrates these
31 | concepts.
32 |
33 | .. image:: ../figs/image1.png
34 |
35 | *Figure 1 - Diagram illustrating the different concepts of measured value and true value, uncertainty and error.*
36 |
37 | In a series of measurements (for example each pixel in a remote sensing
38 | Level 1 (L1) data product) it is vital to consider how the errors
39 | between the measurements in the series are correlated. This is crucial
40 | when evaluating the uncertainty of a result derived from these data (for
41 | example a Level 2 (L2) retrieval of geophysical parameter from a L1
42 | product). In their vocabulary the Horizon 2020 FIDUCEO [1]_ (Fidelity
43 | and uncertainty in climate data records from Earth observations) project
44 | (see FIDUCEO Vocabulary, 2018) define three broad categories of error
45 | correlation effects important to satellite data products, as follows:
46 |
47 | - **Random effects**: *"those causing errors that cannot be corrected
48 | for in a single measured value, even in principle, because the effect
49 | is stochastic. Random effects for a particular measurement process
50 | vary unpredictably from (one set of) measurement(s) to (another set
51 | of) measurement(s). These produce random errors which are entirely
52 | uncorrelated between measurements (or sets of measurements) and
53 | generally are reduced by averaging."*
54 |
55 |
56 | - **Structured random effects**: *"means those that across many
57 | observations there is a deterministic pattern of errors whose
58 | amplitude is stochastically drawn from an underlying probability
59 | distribution; "structured random" therefore implies "unpredictable"
60 | and "correlated across measurements"..."*
61 |
62 |
63 | - **Systematic (or common) effects**: *"those for a particular
64 | measurement process that do not vary (or vary coherently) from (one
65 | set of) measurement(s) to (another set of) measurement(s) and
66 | therefore produce systematic errors that cannot be reduced by
67 | averaging."*
68 |
69 | .. [1] See: https://www.fiduceo.eu
70 |
71 |
72 |
73 | .. _LPUMethod:
74 |
75 | Law of Propagation of Uncertainty Method
76 | #########################################
77 |
78 | Within the GUM framework uncertainty analysis begins with understanding
79 | the measurement function. The measurement function establishes the
80 | mathematical relationship between all known input quantities (e.g.
81 | instrument counts) and the measurand itself (e.g. radiance). Generally,
82 | this may be written as
83 |
84 | .. math:: Y = f\left( X_{i},\ldots,\ X_{N} \right)
85 |
86 | where:
87 |
88 | - :math:`Y` is the measurand;
89 |
90 | - :math:`X_{i}` are the input quantities.
91 |
92 | Uncertainty analysis is then performed by considering in turn each of
93 | these different input quantities to the measurement function, this
94 | process is represented in Figure 2. Each input quantity may be
95 | influenced by one or more error effects which are described by an
96 | uncertainty distribution. These separate distributions may then be
97 | combined to determine the uncertainty of the measurand,
98 | :math:`u^{2}(Y)`, using the *Law of Propagation of Uncertainties* (GUM,
99 | 2008),
100 |
101 | .. math:: u^{2}\left( Y \right) = \mathbf{\text{C\ S}}\left( \mathbf{X} \right)\mathbf{C}^{T}
102 |
103 | where:
104 |
105 | - :math:`\mathbf{C}` is the vector of sensitivity coefficients,
106 | :math:`\partial Y/\partial X_{i}`;
107 |
108 | - :math:`\mathbf{S(X)}` is the error covariance matrix for the input
109 | quantities.
110 |
111 | This can be extended to a measurement function with a measurand vector (rather than scalar) :math:`\mathbf{Y}=(Y_{i},\ldots,\ Y_{N})`.
112 | The uncertainties are then given by:
113 |
114 | .. math:: \mathbf{S(Y)}=\mathbf{J}\ \mathbf{S(X)} \mathbf{J}^T
115 |
116 | where:
117 |
118 | - :math:`\mathbf{J}` is the Jacobian matrix of sensitivity coefficients, :math:`J_{ni} = \partial Y_{n}/\partial X_{i}`;
119 | - :math:`\mathbf{S(Y)}` is the error covariance matrix (n*n) for the measurand;
120 | - :math:`\mathbf{S(X)}` is the error covariance matrix (i*i) for the input quantities.
121 |
122 | The error covariances matrices define the uncertainties (from the diagonal elements) as well as
123 | the correlation between the different quantities (off-diagonal elements).
124 |
125 | .. image:: ../figs/image2.png
126 |
127 | *Figure 2 - Conceptual process of uncertainty propagation.*
128 |
129 |
130 | .. _Monte Carlo Method:
131 |
132 | Monte Carlo Method
133 | ########################
134 | For a detailed description of the Monte Carlo (MC) method, we refer to `Supplement 1 to the
135 | "Guide to the expression of uncertainty in measurement" - Propagation of distributions
136 | using a Monte Carlo method `_.
137 |
138 | Here we summarise the main steps and detail how these were implemented.
139 | The main stages consist of:
140 |
141 | - Formulation: Defining the measurand (output quantity Y), the input quantities :math:`X = (X_{i},\ldots,\ X_{N})`, and the measurement function (as a model relating Y and X). One also needs to asign Probability Density Functions (PDF) of each of the input quantities, as well as define the correlation between them (through joint PDF).
142 |
143 | - Propagation: propagate the PDFs for the :math:`X_i` through the model to obtain the PDF for Y.
144 |
145 | - Summarizing: Use the PDF for Y to obtain the expectation of Y, the standard uncertainty u(Y) associated with Y (from the standard deviation), and the covariance between the different values in Y.
146 |
147 | The MC method implemented in punpy consists of generating joint PDF from the provided
148 | uncertainties and correlation matrices or covariances. Punpy then propagates the PDFs for the :math:`X_i` to Y
149 | and then summarises the results through returning the uncertainties and correlation matrices.
150 |
151 | As punpy is meant to be widely applicable, the user can define the measurand, input quantities
152 | and measurement function themselves. Within punpy, the input quantities and measurand will often
153 | be provided as python arrays (or scalars) and the measurement function in particular needs to be
154 | a python function that can take the input quantities as function arguments and returns the measurand.
155 |
156 | To generate the joint PDF, comet_maths is used. The ATBD for the comet_maths PDF generator is given
157 | `here `_.
158 |
159 | The measurand pdf is then defined by processing each draw :math:`\xi_i` to Y:
160 |
161 | :math:`Y = f(\xi)`.
162 |
163 | The measurands for each of these draws are then concatenated into one array containing the full measurand sample.
164 | The uncertainties are than calculated by taking the standard deviation along these draws.
165 |
166 | When no `corr_dims` are specified, but `return_corr` is set to True, the correlation matrix is calculated
167 | by calculating the Pearson product-moment correlation coefficients of the full measurand sample along the draw dimension.
168 | When `corr_dims` are specified, the correlation matrix is calculated along a subset of the full measurand sample.
169 | This subset is made by taking only the first index along every dimension that is not the correlation dimension.
170 | We note that `corr_dims` should only be used when the error correlation matrices do not vary along the other dimension(s).
171 | A warning is raised if any element of the correlation matrix varies by more than 0.05 between the one using a subset taking
172 | only the first index along each other dimension and the one using a subset taking only the last index along each other dimension.
--------------------------------------------------------------------------------
/docs/content/getting_started.rst:
--------------------------------------------------------------------------------
1 | .. Getting Started
2 | Author: Pieter De Vis
3 | Email: pieter.de.vis@npl.co.uk
4 | Created: 15/04/20
5 |
6 | .. _getting_started:
7 |
8 | Getting Started
9 | ===============
10 |
11 | Dependencies
12 | #############
13 |
14 | Punpy has the following dependencies:
15 |
16 | * Python (3.7 or above)
17 | * `numpy `_
18 | * `scipy `_
19 | * `emcee `_
20 | * `comet_maths `_
21 | * `obsarray `_
22 |
23 |
24 | Installation
25 | #############
26 |
27 | The easiest way to install punpy is using pip::
28 |
29 | $ pip install punpy
30 |
31 | Ideally, it is recommended to do this inside a virtual environment (e.g. conda).
32 |
33 | Alternatively, for the latest development version, first go to the folder where you want to save punpy and clone the project repository from GitLab::
34 |
35 | $ git clone git@github.com:comet-toolkit/punpy.git
36 |
37 | Then go into the created directory and install the module with pip::
38 |
39 | $ cd punpy
40 | $ pip install -e .
41 |
42 | Example Usage
43 | ##############
44 |
45 | For examples on how to use punpy either as a standalone package or with digital effects tables, we refer to the `examples section `_ on the CoMet Website.
46 | There some jupyter notebooks (hosted on google colab) are available with examples.
47 |
48 | Below, we show an example for using punpy standalone for illustration purposes.
49 | For more complete examples with more detailed explanations, we refer to the CoMet website `examples `_.
50 |
51 | In this example, we use punpy to propagate uncertainties through a calibration process::
52 |
53 | import punpy
54 | import numpy as np
55 |
56 | # your measurement function
57 | def calibrate(L0,gains,dark):
58 | return (L0-dark)*gains
59 |
60 | # your data
61 | L0 = np.array([0.43,0.8,0.7,0.65,0.9])
62 | dark = np.array([0.05,0.03,0.04,0.05,0.06])
63 | gains = np.array([23,26,28,29,31])
64 |
65 | # your uncertainties
66 | L0_ur = L0*0.05 # 5% random uncertainty
67 | dark_ur = np.array([0.01,0.002,0.006,0.002,0.015]) # random uncertainty
68 |
69 | gains_ur = np.array([0.5,0.7,0.6,0.4,0.1]) # random uncertainty
70 | gains_us = np.array([0.1,0.2,0.1,0.4,0.3]) # systematic uncertainty
71 | # (different for each band but fully correlated)
72 | gains_utemp = gains*0.03
73 |
74 | corr_temp = []
75 |
76 | prop=punpy.MCPropagation(10000)
77 | L1=calibrate(L0,gains,dark)
78 | L1_ur=prop.propagate_random(calibrate,[L0,gains,dark],
79 | [L0_ur,gains_ur,dark_ur])
80 | L1_us=prop.propagate_systematic(calibrate,[L0,gains,dark],
81 | [L0_us,gains_us,np.zeros(5)])
82 | L1_ut=(L1_ur**2+L1_us**2)**0.5
83 | L1_cov=punpy.convert_corr_to_cov(np.eye(len(L1_ur)),L1_ur)+\
84 | punpy.convert_corr_to_cov(np.ones((len(L1_us),len(L1_us))),L1_ur)
85 |
86 | print(L1)
87 | print(L1_ur)
88 | print(L1_us)
89 | print(L1_ut)
90 | print(L1_cov)
91 |
92 |
93 |
--------------------------------------------------------------------------------
/docs/content/punpy_digital_effects_table.rst:
--------------------------------------------------------------------------------
1 | .. Overview of method
2 | Author: Pieter De Vis
3 | Email: pieter.de.vis@npl.co.uk
4 | Created: 15/04/20
5 |
6 | .. _punpy_digital_effects_table:
7 |
8 | punpy in combination with digital effects tables
9 | =======================================================
10 | In this section we explain how punpy can be used for propagating uncertainties in digital effects tables through a measurement function.
11 | For details on how to create these digital effects tables, we refer to the `obsarray documentation `_.
12 | Once the digital effects tables are created, this is the most concise method for propagating uncertainties.
13 | The code in this section is just as illustration and we refer to the the CoMet website `examples `_ for example with all requied information for running punpy.
14 | The punpy package can propagate the various types of correlated uncertainties that can be stored in digital effects tables through a given measurement function. In the next subsection we discuss how these measurement functions need to be defined in order to use the digital effects tables.
15 |
16 | Digital Effects Tables
17 | #######################
18 | Digital Effects tables are created with the obsarray package. The `documentation for obsarray `_ is the reference for digital effect tables.
19 | Here we summarise the main concepts in order to give context to the rest of the Section.
20 |
21 | Digital effects tables are a digital version of the effects tables created as part of the `FIDUCEO project `_.
22 | Both FIDUCEO effects tables and digital effects tables store information on the uncertainties on a given variable, as well as its error-correlation information (see Figure below).
23 | The error-correlation information often needs to be specified along multiple different dimensions.
24 | For each of these dimensions (or for combinations of them), the correlation structure needs to be defined.
25 | This can be done using an error-correlation matrix, or using the `FIDUCEO correlation forms `_.
26 | These FIDUCEO correlation forms essentially provide a parametrisation of the error correlation matrix using a few parameters rather than a full matrix.
27 | These thus require much less memory and are typically the preferred option (though this is not always possible as not all error-correlation matrices can be parameterised in this way).
28 | Some correlation forms, such as e.g. "random" and "systematic" do not require any additional parameters.
29 | Others, such as "triangle_relative", require a parameter that e.g. sets the number of pixels/scanlines being averaged.
30 |
31 |
32 | .. image:: ../figs/Digital_effects_tables.jpg
33 |
34 | *Figure 1 - left: FIDUCEO effects table template. right: obsarray digital effects table for one uncertainty component.*
35 |
36 |
37 | The obsarray package which implements the digital effects tables, extends the commonly used xarray package.
38 | xarray objects typically have multiple variables with data defined on multiple dimensions and with attributes specifying additional information.
39 | In digital effects tables, each of these variables has one (or more) uncertainty variables associated with it.
40 | Each of these uncertainty components is clearly linked to its associated variable, and has the same dimensions.
41 | These uncertainty components, unsurprisingly, have uncertainties as the data values.
42 | As attributes, they have the information defining the error-correlation structure.
43 | If the FIDUCEO correlation forms can be used, the form name and optionally its parameters are stored directly in the attributes of the uncertainty component.
44 | If the FIDUCEO correlation forms cannot be used, the form in the attributes is listed as "err_corr_matrix" and as parameter it has the name of another variable in the xarray dataset that has the correlation matrix.
45 |
46 | Multiple uncertainty components can be added for the same data variable, and obsarray provide functionality to combine these uncertainties, either as the total uncertainties for a given variable, or as separate random, systematic, and structured components.
47 |
48 |
49 | Measurement Function
50 | ####################
51 | Generally, a measurement function can be written mathematically as:
52 |
53 | .. math:: y = f\left( x_{i},\ldots,\ x_{N} \right)
54 |
55 | where:
56 |
57 | * :math:`f` is the measurment function;
58 | * :math:`y` is the measurand;
59 | * :math:`x_{i}` are the input quantities.
60 |
61 | The measurand and input quantities are often vectors consisting of multiple numbers. Here, we choose an example of an ideal gas law equivalent:
62 |
63 | .. math:: V = \frac{8.314 \times n T}{P}
64 |
65 | where:
66 |
67 | * :math:`V` is the volume;
68 | * :math:`n` is the amount of substance (number of moles);
69 | * :math:`T` is the temperature;
70 | * :math:`P` is the pressure.
71 |
72 | Here :math:`V` is now the measurand, and :math:`n`, :math:`T` and :math:`P` are the input quantities.
73 | Digital effects tables for :math:`n`, :math:`T` and :math:`P` will thus need to be specified prior, and punpy will create a digital effects table for :math:`V` as output.
74 |
75 | In order to be able to do the uncertainty propagation with these digital effects tables, the measurement functions now need to be defined within a subclass of the MeasurementFunction class provided by punpy.
76 | In this subclass, one can then define the measurement function in python as a function called "function"::
77 |
78 | from punpy import MeasurementFunction
79 |
80 | class GasLaw(MeasurementFunction):
81 | def function(self, P, T, n):
82 | return (8.134 * n * T)/P
83 |
84 | In some cases, it can also be useful to define the measurand name and input quantity names directly in this class::
85 |
86 | from punpy import MeasurementFunction
87 |
88 | class GasLaw(MeasurementFunction):
89 | def function(self, P, T, n):
90 | return (8.134 * n * T)/P
91 |
92 | def get_measurand_name_and_unit(self):
93 | return "volume", "m^3"
94 |
95 | def get_argument_names(self):
96 | return ["pressure", "temperature", "n_moles"]
97 |
98 | These names will be used as variable names in the digital effects tables. This means they have to match the expected names in e.g. the input digital effects tables that are used.
99 | Providing the names of the input quantities and measurand in this way is not a requirement, as this information can also be provided when initialising the object of this class.
100 |
101 | Propagating uncertainties in digital effects tables
102 | ####################################################
103 | Once this kind of measurement function class is defined, we can initialise an object of this class.
104 | In principle there are no required arguments when creating an object of this class (all arguments have a default).
105 | However, in practise we will almost always provide at least some arguments.
106 | The first argument `prop` allows to pass a MCPropagation or LPUpropagaion object. It thus specifies whether the Monte Carlo (MC) method (see Section :ref:`Monte Carlo Method`)
107 | or Law of Propagation of Uncertainties (LPU) method (see Section :ref:`LPUMethod`) should be used. These prop objects can be created with any of their options (such as parallel_cores)::
108 |
109 | prop = MCPropagation(1000, dtype="float32", verbose=False, parallel_cores=4)
110 |
111 | gl = IdealGasLaw(prop=prop)
112 |
113 | If no argument is provided for prop, a MCPropagation(100,parallel_cores=0) object is used.
114 | The next arguments are for providing the input quantity names and the measurand name and measurand unit respectively::
115 |
116 | gl = IdealGasLaw(prop=prop, xvariables=["pressure", "temperature", "n_moles"], yvariable="volume", yunit="m^3")
117 |
118 | In the `xvariables` argument, one needs to specify the names of each of the input quantities.
119 | These names have to be in the same order as in the specified function, and need to correspond to the names used for the variables in the digital effects tables.
120 | These variable names can be provided as optional arguments here, or alternatively using the get_argument_names() function in the class definition.
121 | If both options are provided, they are compared and an error is raised if they are different.
122 |
123 | Similarly, the `yvariable` gives the name of the measurand (or list of names if multiple measurands are returned by measurement function) and `yunit` specifies its associated unit(s).
124 | Alternatively, these can also be provided using the get_measurand_name_and_unit() function in the class definition (they will be cross-checked if both are provided).
125 | There are many more optional keywords that can be set to finetune the processing of the uncertainty propagation.
126 | These will be discussed in the :ref:`MeasurementFunctionOptions` section.
127 |
128 | Once this object is created, and a digital effects table has been provided (here as a NetCDF file), the uncertainties can be propagated easily::
129 |
130 | import xarray as xr
131 | ds_x1 = xr.open_dataset("digital_effects_table_gaslaw.nc")
132 | ds_y = gl.propagate_ds(ds_x1)
133 |
134 | This generates a digital effects table for the measurand, which could optionally be saved as a NetCDF file, or passed to the next stage of the processing.
135 | The measurand effects table will have separate contributions for the random, systematic and structured uncertainties, which can easily be combined into a single covariance matrix using the obsarray functionalities of the digital effects tables.
136 | It is quite common that not all the uncertainty information is available in a single digital effects table.
137 | In such cases, multiple digital effects tables can simply be provided to "propagate_ds".
138 | punpy will then search each of these effects tables for the input quantities provided when initialising the MeasurementFunction object.
139 | For example, if :math:`n`, :math:`T` and :math:`P`, each had their own digital effects tables, these could be propagated as::
140 |
141 | import xarray as xr
142 | ds_nmol = xr.open_dataset("n_moles.nc")
143 | ds_temp = xr.open_dataset("temperature.nc")
144 | ds_pres = xr.open_dataset("pressure.nc")
145 | ds_y = gl.propagate_ds(ds_pres, ds_nmol, ds_temp)
146 |
147 | These digital effects tables can be provided in any order. They can also contain numerous other quantities that are not relevant for the current measurement function.
148 | When multiple of these digital effects tables have a variable with the same name (which is used in the measurement function), an error is raised.
149 |
150 | functions for propagating uncertainties
151 | ####################################################
152 | In the above example, we show an example of using the propagate_ds() function to obtain a
153 | measurand effects table that has separate contributions for the random, systematic and structured uncertainties.
154 | Depending on what uncertainty components one is interested in, there are a number of functions that can be used:
155 | * propagate_ds: measurand digital effects table with separate contributions for the random, systematic and structured uncertainties.
156 | * propagate_ds_tot: measurand digital effects table with one combined contribution for the total uncertainty (and error correlation matrix).
157 | * propagate_ds_specific: measurand digital effects table with separate contributions for a list of named uncertainty contributions provided by the user.
158 | * propagate_ds_all: measurand digital effects table with separate contributions for all the individual uncertainty contributions in the input quantities in the provided input digital effects tables.
159 |
160 | It is worth noting that the uncertainty components labelled in the measurand digital effect tables as
161 | "random" or "systematic" (either in propagate_ds, propagate_ds_specific or propagate_ds_all),
162 | will contain the propagated uncertainties for all uncertainty components on the input
163 | quantities that are random or systematic respectively along all the measurand dimensions.
164 | Any uncertainty components on the input quantities where this is not the case (e.g. because
165 | the error correlation along one dimension is random and along another is systematic;
166 | or because one of the error correlations is provided as a numerical error correlation matrix)
167 | will be propagated to the structured uncertainty components on the measurand.
168 |
169 | This is somewhat further complicated by the fact that the input quantity dimensions are
170 | not always the same as the measurand dimensions. If any of the measurand dimensions is
171 | not in the input quantity dimensions, some assumption needs to made about how this input
172 | quantity will be correlated along that measurand dimension. Often, such a situation will
173 | simply mean that the same value of the input quantity will be used for every index along
174 | the measurand dimension (broadcasting). This often leads to a systematic correlation along this measurand
175 | dimension (a typical example would be the same spectral gains being applied to multiple
176 | spectral scans in a measurement, where the gains have a wavelength dimension and the
177 | spectral scans have wavelength and scan index dimensions; any error in the gains, will
178 | affect all scans equally). There are however also scenarios where
179 | the introduced error-correlation along the measurand dimension should be random (e.g. if
180 | a constant temperature is assumed and applied along the time dimension, but we know in
181 | reality the temperature is fluctuating randomly w.r.t. to assumed temperature). It can
182 | also be structured. Detailed understanding of the problem is thus required when the measurand
183 | dimensions are not present along the measurand dimensions. By default, punpy assumes that
184 | the error correlation along the missing dimensions is systematic. If another error correlation is required,
185 | this can be done by setting the `expand` keyword to True and the `broadcast_correlation` to the
186 | appropriate error correlation (either "rand", "syst" or an error correlation matrix as a numpy array).
187 | Depending on how this broadcast error correlation combines with
188 | the error correlations in the other dimensions, can also affect which measurand uncertainty component
189 | (random, systematic or structured) it contributes to when using propagate_ds.
190 |
191 | Sometimes one wants to propagate uncertainties one input quantity at a time.
192 | This can be the case no matter if we are propagating total uncertainties or individual components.
193 | When creating the MeasurementFunction object, it is possible to specify on which input quantities
194 | the uncertainties should be propagated using the `uncxvariables` keyword::
195 |
196 | gl = IdealGasLaw(prop=prop,
197 | xvariables=["pressure", "temperature", "n_moles"],
198 | uncxvariables=["pressure"]
199 | yvariable="volume",
200 | yunit="m^3")
201 | ds_y = gl.propagate_ds(ds_pres, ds_nmol, ds_temp)
202 |
203 | In the above example, only the uncertainties on pressure will be propagated.
204 | This behaviour could also be obtained by removing the unc_comps in the temperature and
205 | n_moles variables in their respective datasets, but the solution shown above is easier.
206 | If no uncxvariables are provided, the uncertainties on all input quantities are propagated.
207 |
208 |
209 | .. _MeasurementFunctionOptions:
210 |
211 | Options when creating MeasurementFunction object
212 | ##################################################
213 | A number of additional options are available when creating the MeasurementFunction object, and when running one of the propagate_ds functions.
214 | We refer to the API for a full list of the keywords, but here highlight some of the ones that were not previously explained.
215 |
216 | When creating the MeasurementFunction object, we previously discussed the `prop`, `xvariables`, `uncxvariables`, `yvariable` and
217 | `yunit` keywords. Next, there are a number of keywords that are the same as the keywords for using punpy as standalone. These are
218 | `corr_between`,`param_fixed`, `repeat_dims`, `corr_dims`, `seperate_corr_dims`, `allow_some_nans`. Here these keywords work in the same way as for standalone
219 | punpy and we refer to the :ref:`punpy_standalone` Section for further explanation. The one difference is that here, the repeat_dims and
220 | corr_dims can be provided as dimension names rather than dimension indices (dimension indices are also still allowed).
221 | If a string corr_dim is provided that is present in some but not all of the measurand dimensions (only relevant when there are multiple differently-shaped measurands), the `seperate_corr_dims` will automatically be set to True, and the appropriate separate corr_dims will be worked out automatically.
222 |
223 | The options we have not previously explained are the `ydims`, `refxvar` and `sizes_dict`. These all have to do with the handling of dimensions when they differ between input quantities (or between input quantities and measurand).
224 | In the typical punpy usecase, the dimensions of the measurand are the same as the dimensions of the input quantities.
225 | If this is not the case, the `ydims` keyword should be set to a list of the measurand dimensions (in order matching the shape).
226 | If one of these dimensions is not in the input quantities, one should also provide `sizes_dict`, which is a dictionary with all dimension names as keys, and the dimension size as the value.
227 | Alternatively, if the dimensions of the measurand match the dimensions of one (but not all) of the input quantities, the measurnad shape can
228 | be automatically set if `refxvar` is provided, where `refxvar` is the name of the input quantity with matching shape.
229 |
230 | Finally the `use_err_corr_dict` is explained in the :ref:`punpy_memory_and_speed` Section.
231 |
232 | Options when running propagate_ds functions
233 | ##################################################
234 | There are also a few options when running the propagate_ds (or similar) functions.
235 | The `store_unc_percent` keyword simply indicates whether the measurand uncertainties should be stored in percent or in the measurand units (the latter is the default).
236 | The `expand` keyword indicate whether the input quantities should be expanded/broadcasted to the shape of the measurand, prior to passing to the measurement function (defaults to False).
237 |
238 | `ds_out_pre` allows to provide a pre-generated xarray dataset (typically made using obsarray) in which the results will be stored.
239 | This can be used to add additional variables to the dataset prior to running the uncertainty propagation, or to concatenate multiple uncertainty propagation results into one file.
240 | By default, the measurand variables and associated uncertainty and error correlation will be overwritten, but all other variables in the dataset remain.
241 | If one does not want to get punpy to work out the error correlations iteself, but just use the ones in the template (e.g. because in complex cases, punpy can run into problems),
242 | this can be specified by setting the `use_ds_out_pre_unmodified` keyword to True. In this case, only the values of the variables will be changed, but none of the attributes.
243 |
244 | Finally the `include_corr` keyword can be set to False if error correlations should be omited from the calculation.
245 | The latter results in faster processing but can lead to wrong results so should be used with caution.
--------------------------------------------------------------------------------
/docs/content/punpy_memory_and_speed.rst:
--------------------------------------------------------------------------------
1 | .. Overview of method
2 | Author: Pieter De Vis
3 | Email: pieter.de.vis@npl.co.uk
4 | Created: 15/04/20
5 |
6 | .. _punpy_memory_and_speed:
7 |
8 | Managing memory and processing speed in punpy
9 | ==============================================
10 |
11 | Storing error correlation matrices separately per dimension
12 | #############################################################
13 | Error correlation matrices typically take up a lot of memory as they give the error correlation coefficients between each pair of measurements.
14 | The MC samples themselves can also take up a lot of memory when the number of samples is large.
15 | One easy way to limit the amount of memory used is by setting the `dtype` of the MC samples (and resulting uncertainties and error correlation matrices)::
16 |
17 | prop = MCPropagation(20000, dtype=np.float32)
18 |
19 | Random and systematic uncertainty components take up relatively little space, as each of their error
20 | correlation dimensions are by defnition parameterised as random or systematic.
21 | For structured components with error correlation matrices stored as separate variables, it is not
22 | uncommon for these matrices to take up a lot of memory. This is especially the case when
23 | each of the dimensions is not parametrised separately, and instead an error correlation
24 | matrix is provided along the combination of angles. E.g. for a variable with dimensions (x,y,z),
25 | which correspond to a shape of e.g. (20,30,40), the resulting total error correlation matrix will have shape
26 | (20*30*40,20*30*40) which would contain 575 million elements. The shape chosen here as an example is
27 | quite moderate, so it is clear this could be an issue when using larger datasets.
28 |
29 | The solution to this is to avoid storing the full (x*y*z,x*y*z) error correlation matrix when possible.
30 | In many cases, even though the errors for pixels along a certain dimension (e.g. x) might
31 | be correlated, this error correlation w.r.t x does not change for different values of y or z.
32 | In that case, the error correlation for x can be separated and stored as a matrix of shape (x,x).
33 |
34 | One way to reduce the memory load is by separately storing the error-correlation matrices in the output dataset.
35 | In the :ref:`punpy_standalone` section, we showed that the `corr_dims` keyword can be used to output the error correlation matrix for a single dimension rather than the full error correlation matrix.
36 | This can also be used to separately store the error correlation for each dimension by passing a list of all dimension indices for `corr_dims`::
37 |
38 | ur_y, corr_y = prop.propagate_random(measurement_function,
39 | [x1, x2, x3], [ur_x1, ur_x2, ur_x3], return_corr=True, corr_dims=[0,1])
40 |
41 | where now corr_y will have two elements, the first of which will be a (k,k) shaped matrix and the second one a (l,l) matrix.
42 | Saving the error correlations in this manner often required less memory than storing the full error correlation matrix.
43 | However, because of the averaging over the other dimension(s) when calculating these one-dimensional errror correlation matrices, some information in lost.
44 | In fact, this approach should only be done when the error correlation matrices do not vary along the other dimension(s).
45 | Whether this method can be applied thus depends on the measurement function itself and it should be used with caution (a good rule of thumb to decide whether this approach can be used is whether the measurement function could be applied without passing data along the averaged dimension(s) at once).
46 | There are cases where the measurement function can only partially be decomposed (e.g. a measurement function where the first nd second dimensions of the measurand have some complex correlation, but the third dimension can easily be separated out as effectively the calculation could be done entirely separate for each index along this third dimension).
47 | In such cases, the corr_dims keyword for the dimensions that cannot be separted can be given a string with the dimension indices separated by a dot::
48 |
49 | ur_y, corr_y = prop.propagate_random(measurement_function,
50 | [x1, x2, x3], [ur_x1, ur_x2, ur_x3], return_corr=True, corr_dims=["0.1",2])
51 |
52 | Here, if the measurand (and ur_y) are of shape (k,l,m), corr_y will have two elements. The first element will be of shape (k*l,k*l) and the second element of shape (m,m).
53 |
54 | We note that for the digital effects table use case, when creating the MeasurementFunction object, it is possible to provide the `corr_dims` keyword as strings with the dimension names rather than dimension indices (both options work).
55 | When using dimension names as strings, they can still be separated by a dot to indicate combined error correlation matrices in the outputs::
56 |
57 | gl = GasLaw(prop, ["n_moles", "temperature", "pressure"], corr_dims=["x.y","time"])
58 |
59 | Using error correlation dictionaries
60 | ######################################
61 | In addition to the large error correlation matrices in the output, another memory issue comes from the calculation of the error-correlation for each of the input quantities (which often have the same dimensions).
62 | When using punpy as standalone, one can pass the error correlation for separate dimensions using a dictionary::
63 |
64 | ufb, ucorrb = prop.propagate_systematic(
65 | functionb,
66 | xsb,
67 | xerrsb,
68 | corr_x=[{"0":np.eye(len(xerrb)),"1":"syst"}, "syst"],
69 | return_corr=True,
70 | )
71 |
72 | This avoids having to pass the correlation matrix as one large array.
73 | When using digital effects tables with punpy, the use of these correlation dictionaries can be handled internally.
74 | This can be achieved by setting the use_err_corr_dict keyword::
75 |
76 | gl = IdealGasLaw(
77 | prop=prop,
78 | xvariables=["pressure", "temperature", "n_moles"],
79 | yvariable="volume",
80 | yunit="m^3",
81 | use_err_corr_dict=True,
82 | )
83 |
84 | Input quantities with repeated measurements along one axis
85 | ###############################################################
86 | In general, random uncertainties are uncorrelated between repeated measurements, and systematic
87 | uncertainties are fully correlated between repeated measurements.
88 | If the input quantities are arrays and no further information is specified, punpy assumes that all the different
89 | values in the array are repeated measurements, and the correlation between the values is treated accordingly.
90 |
91 | However, it is also possible that the arrays provided in the input quantities have multiple dimensions,
92 | one of which is for repeated measurements, and one is another dimension. E.g. when propagating uncertainties
93 | in spectra, often one of the input quantities is a 2D array where along one dimension there are repeated
94 | measurements and along another there are different wavelengths. In this case the `repeat_dims` keyword can
95 | be set to an integer indicating which dimension has repeated measurements.
96 | When the `repeat_dims` keyword is set, punpy also splits the calculations and does them separately per repeated measurement.
97 | This reduces the memory requirements and as a result speeds up the calculations. It is however possible that
98 | not all of the input quantities have repeated measurements. E.g. one of the input quantities could be an array of three
99 | calibration coefficients, whereas another input quantity is an array with repeated spectral measurements which are being calibrated.
100 | If the `repeat_dims` keyword does not apply to one of the input quantities, this can be specified by the `param_fixed` keyword.
101 | This keyword then needs to be set to a list of bools where each bool indicates whether the corresponding input quantity
102 | should remain fixed (True) or should be split along repeat_dims (False).
103 |
104 | If x1, x2, us_x1, us_x2 are all arrays with shape (n_wav,n_repeats) where n_wav is the
105 | number of wavelengths and n_repeats is the number of repeated
106 | measurements, and x3 is an array with some calibration coefficients (with uncertainties u_x3)::
107 |
108 | import numpy as np
109 |
110 | corr_wav_x1= np.eye(len(wavelengths)) # This is a diagonal (i.e.
111 | # uncorrelated) correlation matrix with shape (n_wav,n_wav) where
112 | # n_wav is the number of wavelengths.
113 |
114 | corr_wav_x2= np.ones((len(wavelengths),len(wavelengths)) # This is
115 | # a correlation matrix of ones (i.e. fully correlated) with shape
116 | #(n_wav,n_wav) where n_wav is the number of wavelengths.
117 |
118 | corr_wav_x3= None # When set to None, the correlation between
119 | # wavelength defaults to the same as the correlation between repeated
120 | # wavelengths (i.e. fully correlated for propagate_systematic()).
121 |
122 | param_fixed_x1x2x3 = [False,False,True] # indicates that x1 and x2
123 | # have repeated measurements along repeat_dims and calculations will
124 | # be split up accordingly, and x3 will remain fixed and not split up
125 | # (x3 does not have the right shape to be split up)
126 |
127 | us_y, corr_y = prop.propagate_systematic(measurement_function,
128 | [x1, x2, x3], [us_x1, us_x2, us_x3],
129 | corr_x=[corr_wav_x1,corr_wav_x2,corr_wav_x3],
130 | param_fixed=param_fixed_x1x2x3, fixed return_corr=True,
131 | repeat_dims=1, corr_dims=0)
132 |
133 | Here only one matrix is returned for corr_y with a shape matching the provided corr_dims, rather than a correlation matrix per repeated measurement. The matrices for each repeated measurement have been averaged.
134 | We note that if no corr_dims are set, the default option is to return a combined error correlation matrix for all dimensions that are not in repeat_dims.
135 |
136 | In some cases, when there are multiple measurands with different shapes, it is not clear what dimension the repeat_dim refers to.
137 | In such cases, the `refyvar` keyword should be set to the index of the measurand with the most dimensions and the repeat_dims indexes should correspond to this measurand.
138 |
139 |
140 | Processing the MC samples in parallel
141 | ######################################
142 | At the start of this section we already saw that the optional `parallel_cores` keyword can be used to running the MC
143 | samples one-by-one through the measurement function rather than all at once as in the standard case. It is also possible
144 | to use the same keyword to use parallel processing. Here, only the processing of the input quantities through the measurement
145 | function is done in parallel. Generating the samples and calculating the covariance matrix etc is still done as normal.
146 | Punpy uses the multiprocessing module which comes standard with your python distribution.
147 | The gain by using parallel processing only really outweighs the overhead if the measurement function is relatively slow
148 | (of the order of 0.1 s or slower for one set of input quantities).
149 |
150 | Parallel processing for MC can be done as follows::
151 |
152 | if __name__ == "__main__":
153 | prop = punpy.MCPropagation(10000,parallel_cores=4)
154 | ur_y = prop.propagate_random(measurement_function, [x1, x2, x3],
155 | [ur_x1, ur_x2, ur_x3])
156 | us_y = prop.propagate_systematic(measurement_function, [x1, x2, x3],
157 | [us_x1, us_x2, us_x3])
158 |
159 | Note that the use of 'if __name__ == "__main__":' is required when using a Windows machine for multiprocessing and is generally good practise.
160 | When processing in parallel, child processes are generated from the parent code, and the above statement is necessary in Windows to avoid the child processes to generate children themselves.
161 | Everything using the results of the multiprocessing needs to be inside the 'if __name__ == "__main__"'.
162 | However the measurement function itself needs to be outside this since the child processes need to find this.
163 |
164 | One other important aspect is that in order for the parallel processing to work, the measurement function cannot be a normal function of a class.
165 | It can however be a static function of a class.
166 | This means that if we want to do parallel processing for a measurement function in a punpy MeasurementFunction class in order to use digital effects tables, we need to define it as a static function::
167 |
168 | # Define your measurement function inside a subclass of MeasurementFunction
169 | class IdealGasLaw(MeasurementFunction):
170 | @staticmethod
171 | def meas_function(pres, temp, n):
172 | return (n * temp * 8.134) / pres
173 |
174 | Measurement function for which multiprocessing can be used can thus not have self as their first argument.
175 |
176 | For the LPU method, it is also possible to use parallel processing, though only if the `repeat_dims` keyword is set.
177 | In this case each of the repeated measurements is processed in parallel::
178 |
179 | if __name__ == "__main__":
180 | prop = punpy.LPUPropagation(parallel_cores=4)
181 | ur_y = prop.propagate_random(measurement_function, [x1, x2, x3],
182 | [ur_x1, ur_x2, ur_x3],repeat_dims=0)
183 | us_y = prop.propagate_systematic(measurement_function, [x1, x2, x3],
184 | [us_x1, us_x2, us_x3],repeat_dims=0)
185 |
186 | Separating MC propagation in different stages
187 | ###############################################
188 | In some cases, it is necessary to run a large MC sample but the measurement function requires too much memory to run all the MC samples in one single run.
189 | In such cases it is possible to break up the punpy processing in differnet stages. Generally, there are 4 stages:
190 | - Generating the MC sample of the input quantities.
191 | - Running these samples through the measurement function.
192 | - Combining the MC samples of measurands.
193 | - Processing the MC measurand sample to produce the required outputs (sush as uncertainties and error correlation matrices).
194 |
195 | In code, this looks like::
196 |
197 | MC_x = prop.generate_MC_sample(xsd, xerrsd, corrd)
198 | MC_y1 = prop.run_samples(functiond, MC_x, output_vars=2, start=0, end=10000)
199 | MC_y2 = prop.run_samples(functiond, MC_x, output_vars=2, start=10000, end=20000)
200 | MC_y = prop.combine_samples([MC_y1, MC_y2])
201 |
202 | ufd, ucorrd, corr_out = prop.process_samples(
203 | MC_x, MC_y, return_corr=True, corr_dims=0, output_vars=2
204 | )
205 |
206 | Here the run has been broken up into two seperate calls to run the samples, which can be controlled by specifying the start and end indices of the MC sample of input quantities (i.e. which MC iterations should be processed by this call).
207 | This can be broken up into any number of samples. The runnning of these samples through the measurand can even be distributed on different computers. The different measurand samples could then simply be stored in files, before bringing them all together and analysing the combined measurand MC sample.
208 | This also allows detailed controll (e.g. quality checks) on the measurand MC samples, prior to processing the samples.
209 |
210 | Additional options
211 | #####################
212 | For both MC and LPU methods there are some cases, when there is only one correlation matrix contributing to the measurand (e.g. a complicated
213 | measurement function where all but one of the input quantities are known with perfect precision, i.e. without uncertainty),
214 | it can be beneficial to just copy this correlation matrix to the measurand rather than calculating it (since copying is faster
215 | and does not introduce MC noise). When the `fixed_corr_var` is set to True, punpy automatically detects if there is only one
216 | term of uncertainty, and if so copies the relevant correlation matrix to the output instead of calculating it. If `fixed_corr_var`
217 | is set to an integer, the correlation matrix corresponding to that dimension is copied without any checks::
218 |
219 | prop = punpy.MCPropagation(10000)
220 | ur_y = prop.propagate_random(
221 | measurement_function, [x1, x2, x3], [ur_x1, ur_x2, ur_x3],
222 | corr_between=corr_x1x2x3, fixed_corr_var=True)
223 |
--------------------------------------------------------------------------------
/docs/content/user_guide.rst:
--------------------------------------------------------------------------------
1 | ###########
2 | User Guide
3 | ###########
4 |
5 | In this user guide, you will find detailed descriptions and examples that describe many common tasks that you can accomplish with **punpy**.
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 |
10 | punpy_standalone
11 | punpy_digital_effects_table
12 | punpy_memory_and_speed
13 |
--------------------------------------------------------------------------------
/docs/figs/Digital_effects_tables.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/docs/figs/Digital_effects_tables.jpg
--------------------------------------------------------------------------------
/docs/figs/comet_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/docs/figs/comet_logo.png
--------------------------------------------------------------------------------
/docs/figs/image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/docs/figs/image1.png
--------------------------------------------------------------------------------
/docs/figs/image2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/docs/figs/image2.png
--------------------------------------------------------------------------------
/docs/figs/punpy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/docs/figs/punpy.png
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Punpy: Propagating Uncertainties with PYthon
2 | ==================================================
3 |
4 | The **punpy** module is a Python software package to propagate random, structured and systematic uncertainties through a given measurement function.
5 |
6 | **punpy** can be used as a standalone tool, where the input uncertainties are inputted manually.
7 | Alternatively, **punpy** can also be used in combination with digital effects tables created with **obsarray**.
8 | This documentation provides general information on how to use the module (with some examples), as well as a detailed API of the included classes and function.
9 |
10 | .. grid:: 2
11 | :gutter: 2
12 |
13 | .. grid-item-card:: Quickstart Guide
14 | :link: content/getting_started
15 | :link-type: doc
16 |
17 | New to *punpy*? Check out the quickstart guide for an introduction.
18 |
19 | .. grid-item-card:: User Guide
20 | :link: content/user_guide
21 | :link-type: doc
22 |
23 | The user guide provides a documentation and examples how to use **punpy** either standalone or in combination with *obsarray* digital effects tables.
24 |
25 | .. grid-item-card:: ATBD
26 | :link: content/atbd
27 | :link-type: doc
28 |
29 | ATBD mathematical description of **punpy** (under development).
30 |
31 | .. grid-item-card:: API Reference
32 | :link: content/api
33 | :link-type: doc
34 |
35 | The API Reference contains a description of each of the **punpy** classes and functions.
36 |
37 |
38 | Acknowledgements
39 | ----------------
40 |
41 | **punpy** has been developed by `Pieter De Vis `_.
42 |
43 | The development has been funded by:
44 |
45 | * The UK's Department for Business, Energy and Industrial Strategy's (BEIS) National Measurement System (NMS) programme
46 | * The IDEAS-QA4EO project funded by the European Space Agency.
47 |
48 | Project status
49 | --------------
50 |
51 | **punpy** is under active development. It is beta software.
52 |
53 | .. toctree::
54 | :maxdepth: 2
55 | :hidden:
56 | :caption: For users
57 |
58 | Quickstart
59 | User Guide
60 | ATBD
61 | API Reference
62 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=python -msphinx
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=punpy
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed,
20 | echo.then set the SPHINXBUILD environment variable to point to the full
21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the
22 | echo.Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/punpy/__init__.py:
--------------------------------------------------------------------------------
1 | from punpy.digital_effects_table.measurement_function import MeasurementFunction
2 | from punpy.lpu.lpu_propagation import LPUPropagation
3 | from punpy.mc.mc_propagation import MCPropagation
4 | from punpy.utilities.correlation_forms import *
5 | from punpy.utilities.utilities import *
6 | from ._version import __version__
7 |
8 | """___Authorship___"""
9 | __author__ = "Pieter De Vis"
10 | __created__ = "01/03/2020"
11 | __maintainer__ = "Pieter De Vis"
12 | __email__ = "pieter.de.vis@npl.co.uk"
13 | __status__ = "Development"
14 |
--------------------------------------------------------------------------------
/punpy/_version.py:
--------------------------------------------------------------------------------
1 | __version__ = "1.0.1"
2 |
--------------------------------------------------------------------------------
/punpy/digital_effects_table/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/__init__.py
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/__init__.py
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/det_hypernets_cal.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/det_hypernets_cal.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/det_hypernets_l0.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/det_hypernets_l0.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/digital_effects_table_gaslaw_example.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/digital_effects_table_gaslaw_example.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/make_digital_effects_table_gaslaw.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import obsarray
3 | import os
4 |
5 | # define ds variables
6 | template = {
7 | "temperature": {
8 | "dtype": np.float32,
9 | "dim": ["x", "y", "time"],
10 | "attributes": {
11 | "units": "K",
12 | "unc_comps": ["u_ran_temperature", "u_sys_temperature"],
13 | },
14 | },
15 | "u_ran_temperature": {
16 | "dtype": np.float32,
17 | "dim": ["x", "y", "time"],
18 | "attributes": {
19 | "units": "K",
20 | "err_corr": [
21 | {"dim": "x", "form": "random", "params": [], "units": []},
22 | {"dim": "y", "form": "random", "params": [], "units": []},
23 | {"dim": "time", "form": "random", "params": [], "units": []},
24 | ],
25 | },
26 | },
27 | "u_sys_temperature": {
28 | "dtype": np.float32,
29 | "dim": ["x", "y", "time"],
30 | "attributes": {
31 | "units": "K",
32 | "err_corr": [
33 | {"dim": "x", "form": "systematic", "params": [], "units": []},
34 | {"dim": "y", "form": "systematic", "params": [], "units": []},
35 | {"dim": "time", "form": "systematic", "params": [], "units": []},
36 | ],
37 | },
38 | },
39 | "pressure": {
40 | "dtype": np.float32,
41 | "dim": ["x", "y", "time"],
42 | "attributes": {"units": "Pa", "unc_comps": ["u_str_pressure"]},
43 | },
44 | "u_str_pressure": {
45 | "dtype": np.float32,
46 | "dim": ["x", "y", "time"],
47 | "attributes": {
48 | "units": "Pa",
49 | "err_corr": [
50 | {"dim": "x", "form": "random", "params": [], "units": []},
51 | {
52 | "dim": "y",
53 | "form": "err_corr_matrix",
54 | "params": "err_corr_str_pressure_y",
55 | "units": [],
56 | },
57 | {"dim": "time", "form": "systematic", "params": [], "units": []},
58 | ],
59 | },
60 | },
61 | "err_corr_str_pressure_y": {
62 | "dtype": np.float32,
63 | "dim": ["y", "y"],
64 | "attributes": {"units": ""},
65 | },
66 | "n_moles": {
67 | "dtype": np.float32,
68 | "dim": ["x", "y", "time"],
69 | "attributes": {"units": "", "unc_comps": ["u_ran_n_moles"]},
70 | },
71 | "u_ran_n_moles": {
72 | "dtype": np.float32,
73 | "dim": ["x", "y", "time"],
74 | "attributes": {
75 | "units": "",
76 | "err_corr": [
77 | {"dim": "x", "form": "random", "params": [], "units": []},
78 | {"dim": "y", "form": "random", "params": [], "units": []},
79 | {"dim": "time", "form": "random", "params": [], "units": []},
80 | ],
81 | },
82 | },
83 | "R": {
84 | "full_name": "molar gas constant (defined as the Avogadro constant multiplied by the Boltzmann constant)",
85 | "dtype": np.float32,
86 | "dim": [],
87 | "attributes": {"units": "J K^-1 mol^-1", "unc_comps": ["u_R"]},
88 | },
89 | "u_R": {
90 | "full_name": "molar gas constant (defined as the Avogadro constant multiplied by the Boltzmann constant)",
91 | "dtype": np.float32,
92 | "dim": [],
93 | "attributes": {
94 | "units": "%",
95 | "err_corr": [],
96 | },
97 | },
98 | }
99 |
100 | # define dim_size_dict to specify size of arrays
101 | dim_sizes = {"x": 20, "y": 30, "time": 6}
102 |
103 | # create dataset template
104 | ds = obsarray.create_ds(template, dim_sizes)
105 |
106 | # populate with example data
107 | ds["temperature"].values = 293 * np.ones((20, 30, 6))
108 | ds["u_ran_temperature"].values = 1 * np.ones((20, 30, 6))
109 | ds["u_sys_temperature"].values = 0.4 * np.ones((20, 30, 6))
110 | ds["pressure"].values = 10**5 * np.ones((20, 30, 6))
111 | ds["u_str_pressure"].values = 10 * np.ones((20, 30, 6))
112 | ds["err_corr_str_pressure_y"].values = 0.5 * np.ones((30, 30)) + 0.5 * np.eye(30)
113 | ds["n_moles"].values = 40 * np.ones((20, 30, 6))
114 | ds["u_ran_n_moles"].values = 1 * np.ones((20, 30, 6))
115 | ds["R"].values = 8.31446261815324
116 | ds["u_R"].values = 10**-6
117 |
118 | # store example file
119 | dir_path = os.path.dirname(os.path.realpath(__file__))
120 | ds.to_netcdf(os.path.join(dir_path, "digital_effects_table_gaslaw_example.nc"))
121 |
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/make_hypernets_det.py:
--------------------------------------------------------------------------------
1 | import obsarray
2 | import xarray as xr
3 | from variables import L0_IRR_VARIABLES, CAL_VARIABLES
4 | import numpy as np
5 |
6 | ds_in = xr.open_dataset("test_l0.nc") # read digital effects table
7 | ds_cal = xr.open_dataset("test_cal.nc") # read digital effects table
8 |
9 | # relative uncertainties should be in percent
10 | for var in ds_in.variables:
11 | if "u_rel_" in var:
12 | ds_in[var].values = ds_in[var].values * 100
13 | # ds_in[var].attrs["units"]=="%"
14 |
15 | for var in ds_cal.variables:
16 | if (
17 | "u_rel_" in var
18 | and "units" in ds_cal[var].attrs
19 | and ds_cal[var].attrs["units"] == "-"
20 | ):
21 | ds_cal[var].values = ds_cal[var].values * 100
22 | ds_cal[var].attrs["units"] == "%"
23 |
24 | # define dim_size_dict to specify size of arrays
25 | dim_sizes = {
26 | "wavelength": len(ds_in.wavelength),
27 | "scan": len(ds_in.scan),
28 | }
29 |
30 | dim_sizes_cal = {
31 | "wavelength": len(ds_cal.wavelength),
32 | "calibrationdates": 1,
33 | "nonlinearcoef": 13,
34 | "nonlineardates": 1,
35 | "wavcoef": 5,
36 | "wavdates": 1,
37 | }
38 |
39 | # create dataset template
40 | ds_out = obsarray.create_ds(L0_IRR_VARIABLES, dim_sizes)
41 | ds_cal_out = obsarray.create_ds(CAL_VARIABLES, dim_sizes_cal)
42 |
43 | print(ds_out.digital_number)
44 |
45 | for key in ds_out.keys():
46 | ds_out[key].values = ds_in[key].values
47 |
48 | ds_out.assign_coords(wavelength=ds_in.wavelength)
49 | ds_out.assign_coords(scan=ds_in.scan)
50 |
51 | for key in ds_cal_out.keys():
52 | ds_cal_out[key].values = ds_cal[key].values.reshape(ds_cal_out[key].shape)
53 |
54 | # store example file
55 | ds_out.to_netcdf("det_hypernets_l0.nc")
56 | ds_cal_out.to_netcdf("det_hypernets_cal.nc")
57 |
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/propagate_ds_example.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/propagate_ds_example.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/test_cal.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/test_cal.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/test_fillvalues.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests for mc propagation class
3 | """
4 |
5 | import os.path
6 | import unittest
7 |
8 | import numpy as np
9 | import numpy.testing as npt
10 | import xarray as xr
11 | import obsarray
12 |
13 | from punpy import MeasurementFunction, MCPropagation
14 |
15 | """___Authorship___"""
16 | __author__ = "Pieter De Vis"
17 | __created__ = "28/07/2021"
18 | __maintainer__ = "Pieter De Vis"
19 | __email__ = "pieter.de.vis@npl.co.uk"
20 | __status__ = "Development"
21 |
22 | dir_path = os.path.dirname(os.path.realpath(__file__))
23 | ds_gaslaw = xr.open_dataset(
24 | os.path.join(dir_path, "digital_effects_table_gaslaw_example.nc")
25 | )
26 |
27 | # define dim_size_dict to specify size of arrays
28 | dim_sizes = {"x": 20, "y": 30, "time": 6}
29 |
30 | # define ds variables
31 | template = {
32 | "temperature": {
33 | "dtype": np.float32,
34 | "dim": ["x", "y", "time"],
35 | "attributes": {"units": "K", "unc_comps": ["u_ran_temperature"]},
36 | },
37 | "u_ran_temperature": {
38 | "dtype": np.float32,
39 | "dim": ["x", "y", "time"],
40 | "attributes": {
41 | "units": "K",
42 | "err_corr": [
43 | {"dim": "x", "form": "random", "params": [], "units": []},
44 | {"dim": "y", "form": "random", "params": [], "units": []},
45 | {"dim": "time", "form": "random", "params": [], "units": []},
46 | ],
47 | },
48 | },
49 | }
50 |
51 | template_encoding = {
52 | "temperature": {
53 | "dtype": np.float32,
54 | "dim": ["x", "y", "time"],
55 | "attributes": {"units": "K", "unc_comps": ["u_ran_temperature"]},
56 | "encoding": {"dtype": np.uint16, "scale_factor": 0.01},
57 | },
58 | "u_ran_temperature": {
59 | "dtype": np.float32,
60 | "dim": ["x", "y", "time"],
61 | "attributes": {
62 | "units": "K",
63 | "err_corr": [
64 | {"dim": "x", "form": "random", "params": [], "units": []},
65 | {"dim": "y", "form": "random", "params": [], "units": []},
66 | {"dim": "time", "form": "random", "params": [], "units": []},
67 | ],
68 | },
69 | },
70 | }
71 |
72 |
73 | class TestFillValue(unittest.TestCase):
74 | """
75 | Class for unit tests
76 | """
77 |
78 | def test_to_netcdf_encoding_fixed(self):
79 | # create dataset template
80 | ds = obsarray.create_ds(template_encoding, dim_sizes)
81 |
82 | # populate with example data
83 | ds["temperature"].values = 293 * np.ones((20, 30, 6))
84 | ds["temperature"].values[0, 0, 0] = np.nan
85 | ds["temperature"].values[0, 0, 1] = 0
86 | path = os.path.join(dir_path, "test_encoding.nc")
87 | ds.to_netcdf(path)
88 |
89 | ds_load = xr.open_dataset(path)
90 | assert np.isnan(ds_load.temperature.values[0, 0, 0])
91 | assert ds_load.temperature.values[0, 0, 1] == 0
92 | # os.remove(path)
93 |
94 | def test_to_netcdf_encoding_popencoding(self):
95 | # create dataset template
96 | ds = obsarray.create_ds(template_encoding, dim_sizes)
97 | # populate with example data
98 | ds["temperature"].values = 293 * np.ones((20, 30, 6))
99 | ds["temperature"].values[0, 0, 0] = np.nan
100 | ds["temperature"].values[0, 0, 1] = 0.0
101 | ds["temperature"].encoding.pop("_FillValue")
102 | path = os.path.join(dir_path, "test_encoding.nc")
103 | ds.to_netcdf(path)
104 |
105 | ds_load = xr.open_dataset(path)
106 | print(
107 | ds_load.temperature.values[0, 0],
108 | ds_load.temperature.dtype,
109 | ds_load.temperature.attrs,
110 | )
111 | assert (
112 | ds_load.temperature.values[0, 0, 0] == 0
113 | ) # here nans are replaces by 0, which is wrong fillvalue (i.e. encoding fillvalue should not be removed)
114 | assert ds_load.temperature.values[0, 0, 1] == 0
115 | # os.remove(path)
116 |
117 | def test_to_netcdf_popattr(self):
118 | # create dataset template
119 | ds = obsarray.create_ds(template, dim_sizes)
120 |
121 | # populate with example data
122 | ds["temperature"].values = 293 * np.ones((20, 30, 6))
123 | ds["temperature"].values[0, 0, 0] = np.nan
124 | ds["temperature"].values[0, 0, 1] = 0
125 | ds["temperature"].attrs.pop("_FillValue")
126 | path = os.path.join(dir_path, "test_encoding.nc")
127 | ds.to_netcdf(path)
128 |
129 | ds_load = xr.open_dataset(path)
130 | assert np.isnan(ds_load.temperature.values[0, 0, 0])
131 | assert ds_load.temperature.values[0, 0, 1] == 0
132 | # os.remove(path)
133 |
134 | def test_to_netcdf(self):
135 | # create dataset template
136 | ds = obsarray.create_ds(template, dim_sizes)
137 |
138 | # populate with example data
139 | ds["temperature"].values = 293 * np.ones((20, 30, 6))
140 | ds["temperature"].values[0, 0, 0] = np.nan
141 | ds["temperature"].values[0, 0, 1] = 0
142 | path = os.path.join(dir_path, "test_encoding.nc")
143 | ds.to_netcdf(path)
144 |
145 | ds_load = xr.open_dataset(path)
146 | assert np.isnan(ds_load.temperature.values[0, 0, 0])
147 | assert ds_load.temperature.values[0, 0, 1] == 0
148 | # os.remove(path)
149 |
150 |
151 | if __name__ == "__main__":
152 | unittest.main()
153 |
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/test_l0.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/test_l0.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/test_l1.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/digital_effects_table/tests/test_l1.nc
--------------------------------------------------------------------------------
/punpy/digital_effects_table/tests/test_measurement_function.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests for mc propagation class
3 | """
4 |
5 | import os.path
6 | import unittest
7 |
8 | import numpy as np
9 | import numpy.testing as npt
10 | import xarray as xr
11 |
12 | np.random.seed(2222)
13 |
14 | from punpy import MeasurementFunction, MCPropagation
15 |
16 | """___Authorship___"""
17 | __author__ = "Pieter De Vis"
18 | __created__ = "28/07/2021"
19 | __maintainer__ = "Pieter De Vis"
20 | __email__ = "pieter.de.vis@npl.co.uk"
21 | __status__ = "Development"
22 |
23 |
24 | class HypernetsMF(MeasurementFunction):
25 | def setup(self, min_value):
26 | self.min_value = min_value
27 |
28 | def meas_function(self, digital_number, gains, dark_signal, non_linear, int_time):
29 | DN = digital_number - dark_signal
30 | DN[DN == 0] = self.min_value
31 | corrected_DN = DN / (
32 | non_linear[0]
33 | + non_linear[1] * DN
34 | + non_linear[2] * DN**2
35 | + non_linear[3] * DN**3
36 | + non_linear[4] * DN**4
37 | + non_linear[5] * DN**5
38 | + non_linear[6] * DN**6
39 | + non_linear[7] * DN**7
40 | )
41 | if gains.ndim == 1:
42 | return gains[:, None] * corrected_DN / int_time * 1000
43 | else:
44 | return gains * corrected_DN / int_time * 1000
45 |
46 | def get_argument_names(self):
47 | return [
48 | "digital_number",
49 | "gains",
50 | "dark_signal",
51 | "non_linearity_coefficients",
52 | "integration_time",
53 | ]
54 |
55 |
56 | dir_path = os.path.dirname(os.path.realpath(__file__))
57 | calib_data = xr.open_dataset(os.path.join(dir_path, "det_hypernets_cal.nc"))
58 | L0data = xr.open_dataset(os.path.join(dir_path, "det_hypernets_l0.nc"))
59 | L1data = xr.open_dataset(os.path.join(dir_path, "test_l1.nc"))
60 |
61 |
62 | # Define your measurement function inside a subclass of MeasurementFunction
63 | class IdealGasLaw(MeasurementFunction):
64 | @staticmethod
65 | def meas_function(pres, temp, n):
66 | return (n * temp * 8.134) / pres
67 |
68 |
69 | # Define your measurement function inside a subclass of MeasurementFunction
70 | class IdealGasLaw_R(MeasurementFunction):
71 | @staticmethod
72 | def meas_function(pres, temp, n, R):
73 | return (n * temp * R) / pres
74 |
75 |
76 | # Define your measurement function inside a subclass of MeasurementFunction
77 | class IdealGasLaw_2out(MeasurementFunction):
78 | def meas_function(self, pres, temp, n):
79 | return (n * temp * 8.134) / pres, pres / temp
80 |
81 |
82 | # Define your measurement function inside a subclass of MeasurementFunction
83 | class IdealGasLaw_2out_diffdim(MeasurementFunction):
84 | def meas_function(self, pres, temp, n):
85 | return (n * temp * 8.134) / pres, np.mean(pres / temp, axis=0)
86 |
87 |
88 | dir_path = os.path.dirname(os.path.realpath(__file__))
89 | ds = xr.open_dataset(os.path.join(dir_path, "digital_effects_table_gaslaw_example.nc"))
90 |
91 | volume = np.ones(ds["temperature"].values.shape) * 0.9533
92 | u_tot_volume = (
93 | np.ones(ds["temperature"].values.shape)
94 | * 0.9533
95 | * ((1 / 10000) ** 2 + (1 / 293) ** 2 + (0.4 / 293) ** 2 + (1 / 40) ** 2) ** 0.5
96 | )
97 |
98 | u_ran_volume = (
99 | np.ones(ds["temperature"].values.shape)
100 | * 0.9533
101 | * ((1 / 293) ** 2 + (1 / 40) ** 2) ** 0.5
102 | )
103 | u_sys_volume = np.ones(ds["temperature"].values.shape) * 0.9533 * (0.4 / 293)
104 | u_str_volume = np.ones(ds["temperature"].values.shape) * 0.9533 * (1 / 10000)
105 |
106 |
107 | class TestMeasurementFunction(unittest.TestCase):
108 | """
109 | Class for unit tests
110 | """
111 |
112 | def test_gaslaw(self):
113 |
114 | prop = MCPropagation(1000, dtype="float32", verbose=False, parallel_cores=4)
115 |
116 | gl = IdealGasLaw(
117 | prop,
118 | ["pressure", "temperature", "n_moles"],
119 | yvariable="volume",
120 | yunit="m^3",
121 | )
122 | ds_y_tot = gl.propagate_ds_total(ds)
123 |
124 | npt.assert_(
125 | ds_y_tot["u_tot_volume"].attrs["err_corr_1_params"][0]
126 | in list(ds_y_tot.variables)
127 | )
128 |
129 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
130 |
131 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.1)
132 |
133 | prop = MCPropagation(1000, dtype="float32", verbose=False, parallel_cores=1)
134 |
135 | gl = IdealGasLaw(
136 | prop,
137 | ["pressure", "temperature", "n_moles"],
138 | yvariable="volume",
139 | yunit="m^3",
140 | use_err_corr_dict=True,
141 | )
142 | ds_y_tot = gl.propagate_ds_total(ds)
143 |
144 | prop = MCPropagation(3000, dtype="float32", verbose=False)
145 | gl = IdealGasLaw(
146 | prop,
147 | ["pressure", "temperature", "n_moles"],
148 | yvariable="volume",
149 | yunit="m^3",
150 | repeat_dims=[0, 2],
151 | )
152 | ds_y = gl.propagate_ds(ds)
153 |
154 | npt.assert_(
155 | ds_y["u_str_volume"].attrs["err_corr_3_params"][0] in list(ds_y.variables)
156 | )
157 |
158 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
159 | npt.assert_allclose(ds_y["u_ran_volume"].values, u_ran_volume, rtol=0.08)
160 | npt.assert_allclose(ds_y["u_sys_volume"].values, u_sys_volume, rtol=0.08)
161 | npt.assert_allclose(ds_y["u_str_volume"].values, u_str_volume, rtol=0.08)
162 | npt.assert_allclose(ds_y["u_str_volume"].values, u_str_volume, rtol=0.08)
163 | npt.assert_allclose(
164 | ds_y.unc["volume"].total_unc().values, u_tot_volume, rtol=0.12
165 | )
166 |
167 | gl = IdealGasLaw(
168 | prop,
169 | ["pressure", "temperature", "n_moles"],
170 | uncxvariables=["pressure"],
171 | yvariable="volume",
172 | yunit="m^3",
173 | repeat_dims=[0, 2],
174 | )
175 | ds_y = gl.propagate_ds(ds)
176 |
177 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
178 |
179 | def test_gaslaw_scalar(self):
180 | prop = MCPropagation(100, dtype="float32", verbose=True, parallel_cores=1)
181 |
182 | gl = IdealGasLaw_R(
183 | prop,
184 | ["pressure", "temperature", "n_moles", "R"],
185 | yvariable="volume",
186 | yunit="m^3",
187 | )
188 | ds_y_tot = gl.propagate_ds_total(ds)
189 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.03)
190 |
191 | def test_gaslaw_2out(self):
192 | prop = MCPropagation(1000, dtype="float32", verbose=True)
193 |
194 | gl = IdealGasLaw_2out(
195 | prop,
196 | ["pressure", "temperature", "n_moles"],
197 | yvariable=["volume", "P/T"],
198 | yunit=["m^3", "Pa/K"],
199 | )
200 | ds_y_tot = gl.propagate_ds_total(ds)
201 |
202 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
203 | npt.assert_equal(ds_y_tot["err_corr_tot_volume"].values.shape, (3600, 3600))
204 | npt.assert_equal(ds_y_tot["err_corr_tot_P/T"].values.shape, (3600, 3600))
205 |
206 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.1)
207 |
208 | def test_gaslaw_2out_diffdim(self):
209 | prop = MCPropagation(100, dtype="float32", verbose=False, parallel_cores=1)
210 |
211 | gl = IdealGasLaw_2out_diffdim(
212 | prop,
213 | ["pressure", "temperature", "n_moles"],
214 | yvariable=["volume", "P/T"],
215 | yunit=["m^3", "Pa/K"],
216 | ydims=[["x", "y", "time"], ["y", "time"]],
217 | )
218 | ds_y = gl.propagate_ds(ds)
219 |
220 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
221 | npt.assert_equal(ds_y["err_corr_str_volume"].values.shape, (3600, 3600))
222 | npt.assert_equal(ds_y["err_corr_str_P/T"].values.shape, (180, 180))
223 |
224 | gl = IdealGasLaw_2out_diffdim(
225 | prop,
226 | ["pressure", "temperature", "n_moles"],
227 | yvariable=["volume", "P/T"],
228 | yunit=["m^3", "Pa/K"],
229 | ydims=[["x", "y", "time"], ["y", "time"]],
230 | corr_dims=["x", None],
231 | separate_corr_dims=True,
232 | )
233 | ds_y = gl.propagate_ds(ds)
234 |
235 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
236 | npt.assert_equal(ds_y["err_corr_str_volume_x"].values.shape, (20, 20))
237 |
238 | gl = IdealGasLaw_2out_diffdim(
239 | prop,
240 | ["pressure", "temperature", "n_moles"],
241 | yvariable=["volume", "P/T"],
242 | yunit=["m^3", "Pa/K"],
243 | ydims=[["x", "y", "time"], ["y", "time"]],
244 | corr_dims=["x"],
245 | )
246 | ds_y = gl.propagate_ds(ds)
247 |
248 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
249 | npt.assert_equal(ds_y["err_corr_str_volume_x"].values.shape, (20, 20))
250 |
251 | gl = IdealGasLaw_2out_diffdim(
252 | prop,
253 | ["pressure", "temperature", "n_moles"],
254 | yvariable=["volume", "P/T"],
255 | yunit=["m^3", "Pa/K"],
256 | ydims=[["x", "y", "time"], ["y", "time"]],
257 | corr_dims=["x", "y.time"],
258 | )
259 | ds_y_tot = gl.propagate_ds_total(ds)
260 |
261 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
262 | npt.assert_equal(ds_y_tot["err_corr_tot_volume_x"].values.shape, (20, 20))
263 | npt.assert_equal(
264 | ds_y_tot["err_corr_tot_volume_y.time"].values.shape, (180, 180)
265 | )
266 | npt.assert_equal(ds_y_tot["err_corr_tot_P/T_y.time"].values.shape, (180, 180))
267 |
268 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.4)
269 |
270 | def test_gaslaw_errcorrdict(self):
271 | prop = MCPropagation(1000, dtype="float32", verbose=False)
272 |
273 | gl = IdealGasLaw(
274 | prop,
275 | ["pressure", "temperature", "n_moles"],
276 | yvariable="volume",
277 | yunit="m^3",
278 | use_err_corr_dict=True,
279 | )
280 | ds_y_tot = gl.propagate_ds_total(ds)
281 |
282 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
283 |
284 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.1)
285 |
286 | prop = MCPropagation(3000, dtype="float32", verbose=False)
287 | gl = IdealGasLaw(
288 | prop,
289 | ["pressure", "temperature", "n_moles"],
290 | yvariable="volume",
291 | yunit="m^3",
292 | repeat_dims=[0, 2],
293 | )
294 | ds_y = gl.propagate_ds(ds)
295 |
296 | npt.assert_allclose(ds_y["volume"].values, volume, rtol=0.002)
297 | npt.assert_allclose(ds_y["u_ran_volume"].values, u_ran_volume, rtol=0.06)
298 | npt.assert_allclose(ds_y["u_sys_volume"].values, u_sys_volume, rtol=0.06)
299 | npt.assert_allclose(ds_y["u_str_volume"].values, u_str_volume, rtol=0.07)
300 | npt.assert_allclose(
301 | ds_y.unc["volume"].total_unc().values, u_tot_volume, rtol=0.06
302 | )
303 |
304 | def test_gaslaw_corrdim(self):
305 | prop = MCPropagation(1000, dtype="float32", verbose=False)
306 |
307 | gl = IdealGasLaw(
308 | prop,
309 | ["pressure", "temperature", "n_moles"],
310 | yvariable="volume",
311 | yunit="m^3",
312 | corr_dims=["0.1", 2],
313 | )
314 | ds_y_tot = gl.propagate_ds_total(ds)
315 |
316 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
317 |
318 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.1)
319 |
320 | gl = IdealGasLaw(
321 | prop,
322 | ["pressure", "temperature", "n_moles"],
323 | yvariable="volume",
324 | yunit="m^3",
325 | corr_dims=["x.y", "time"],
326 | )
327 | ds_y_tot = gl.propagate_ds_total(ds)
328 |
329 | npt.assert_allclose(ds_y_tot["volume"].values, volume, rtol=0.002)
330 |
331 | npt.assert_allclose(ds_y_tot["u_tot_volume"].values, u_tot_volume, rtol=0.1)
332 |
333 | def test_hypernets(self):
334 | prop = MCPropagation(3000, dtype="float32", parallel_cores=1, verbose=False)
335 |
336 | hmf = HypernetsMF(
337 | prop=prop,
338 | xvariables=[
339 | "digital_number",
340 | "gains",
341 | "dark_signal",
342 | "non_linearity_coefficients",
343 | "integration_time",
344 | ],
345 | yvariable="irradiance",
346 | yunit="W m^-2",
347 | corr_between=None,
348 | param_fixed=None,
349 | )
350 |
351 | hmf.setup(0.1)
352 | y = hmf.run(calib_data, L0data.variables, L0data)
353 | u_y_rand = hmf.propagate_random(L0data, calib_data)
354 | print(u_y_rand)
355 | # print(u_y_rand,L0data)
356 | mask = np.where(
357 | (
358 | (L1data["wavelength"].values < 1350)
359 | | (L1data["wavelength"].values > 1450)
360 | )
361 | )
362 |
363 | u_y_syst_indep = hmf.propagate_specific("systematic_indep", L0data, calib_data)
364 | u_y_syst_corr = hmf.propagate_specific(
365 | "u_rel_systematic_corr_rad_irr", L0data, calib_data
366 | )
367 |
368 | u_y_syst = (u_y_syst_indep**2 + u_y_syst_corr**2) ** 0.5
369 | u_y_tot = (u_y_syst_indep**2 + u_y_syst_corr**2 + u_y_rand**2) ** 0.5
370 |
371 | ds_tot = hmf.propagate_ds_total(L0data, calib_data, store_unc_percent=True)
372 | mask = np.where(
373 | (np.isfinite(u_y_tot / y) & np.isfinite(ds_tot["u_rel_tot_irradiance"]))
374 | )[0]
375 | npt.assert_allclose(
376 | ds_tot["u_rel_tot_irradiance"][mask],
377 | u_y_tot[mask] / y[mask] * 100,
378 | rtol=0.07,
379 | atol=0.07,
380 | )
381 |
382 | def test_hypernets_expand(self):
383 | prop = MCPropagation(3000, dtype="float32", parallel_cores=1, verbose=False)
384 |
385 | hmf = HypernetsMF(
386 | prop=prop,
387 | xvariables=[
388 | "digital_number",
389 | "gains",
390 | "dark_signal",
391 | "non_linearity_coefficients",
392 | "integration_time",
393 | ],
394 | uncxvariables=["gains"],
395 | yvariable="irradiance",
396 | yunit="W m^-2",
397 | corr_between=None,
398 | param_fixed=[False, False, False, True, False],
399 | broadcast_correlation="rand",
400 | )
401 |
402 | hmf.setup(0.1)
403 |
404 | ds_tot = hmf.propagate_ds_total(
405 | L0data, calib_data, store_unc_percent=True, expand=True
406 | )
407 | npt.assert_allclose(
408 | ds_tot["err_corr_tot_irradiance"].values[:9, :9], np.eye(9), atol=0.07
409 | )
410 |
411 | ds_tot = hmf.propagate_ds_total(
412 | L0data, calib_data, store_unc_percent=True, expand=False
413 | )
414 | npt.assert_allclose(
415 | ds_tot["err_corr_tot_irradiance"].values[:9, :9], np.ones((9, 9)), atol=0.07
416 | )
417 |
418 | def test_hypernets_repeat_dim(self):
419 | prop = MCPropagation(3000, dtype="float32", parallel_cores=0, verbose=False)
420 |
421 | hmf = HypernetsMF(
422 | prop=prop,
423 | yvariable="irradiance",
424 | yunit="W m^-2",
425 | corr_between=None,
426 | repeat_dims="scan",
427 | corr_dims=-99,
428 | )
429 | hmf.setup(0.1)
430 | y = hmf.run(calib_data, L0data)
431 | u_y_rand = hmf.propagate_random(L0data, calib_data)
432 | # print(list(L1data.variables))
433 | mask = np.where(
434 | (
435 | (L1data["wavelength"].values < 1350)
436 | | (L1data["wavelength"].values > 1450)
437 | )
438 | )
439 |
440 | npt.assert_allclose(L1data["irradiance"].values, y, rtol=0.03)
441 |
442 | npt.assert_allclose(
443 | L1data["u_rel_random_irradiance"].values[mask][
444 | np.where(np.isfinite(u_y_rand[mask]))
445 | ],
446 | (u_y_rand[mask] / y[mask] * 100)[np.where(np.isfinite(u_y_rand[mask]))],
447 | rtol=0.03,
448 | atol=0.2,
449 | )
450 |
451 | ds_all = hmf.propagate_ds_all(L0data, calib_data, store_unc_percent=True)
452 | ds_main = hmf.propagate_ds(L0data, calib_data, store_unc_percent=True)
453 | ds_spec = hmf.propagate_ds_specific(
454 | ["random", "systematic_indep", "systematic_corr_rad_irr"],
455 | L0data,
456 | calib_data,
457 | store_unc_percent=True,
458 | )
459 |
460 | ds_main.to_netcdf("propagate_ds_example.nc")
461 |
462 | u_y_syst_indep = hmf.propagate_specific(
463 | "u_rel_systematic_indep", L0data, calib_data
464 | )
465 | u_y_syst_corr = hmf.propagate_specific(
466 | "u_rel_systematic_corr_rad_irr", L0data, calib_data, return_corr=False
467 | )
468 |
469 | u_y_syst = (u_y_syst_indep**2 + u_y_syst_corr**2) ** 0.5
470 | u_y_tot = (u_y_syst_indep**2 + u_y_syst_corr**2 + u_y_rand**2) ** 0.5
471 |
472 | npt.assert_allclose(
473 | L1data["u_rel_systematic_indep_irradiance"].values[mask],
474 | u_y_syst_indep[mask] / y[mask] * 100,
475 | rtol=0.03,
476 | atol=0.2,
477 | )
478 |
479 | npt.assert_allclose(
480 | L1data["u_rel_systematic_corr_rad_irr_irradiance"].values[mask],
481 | u_y_syst_corr[mask] / y[mask] * 100,
482 | rtol=0.03,
483 | atol=0.3,
484 | )
485 |
486 | npt.assert_allclose(
487 | L1data["u_rel_systematic_indep_irradiance"].values[mask],
488 | ds_spec["u_rel_systematic_indep_irradiance"].values[mask],
489 | rtol=0.03,
490 | atol=0.2,
491 | )
492 |
493 | npt.assert_allclose(
494 | L1data["u_rel_systematic_corr_rad_irr_irradiance"].values[mask],
495 | ds_spec["u_rel_systematic_corr_rad_irr_irradiance"].values[mask],
496 | rtol=0.03,
497 | atol=0.3,
498 | )
499 |
500 | npt.assert_allclose(
501 | L1data["u_rel_systematic_indep_irradiance"].values[mask],
502 | ds_all["u_rel_systematic_indep_irradiance"].values[mask],
503 | rtol=0.03,
504 | atol=0.2,
505 | )
506 |
507 | npt.assert_allclose(
508 | L1data["u_rel_systematic_corr_rad_irr_irradiance"].values[mask],
509 | ds_all["u_rel_systematic_corr_rad_irr_irradiance"].values[mask],
510 | rtol=0.03,
511 | atol=0.3,
512 | )
513 | # plt.plot(
514 | # L1data["wavelength"][mask],
515 | # ds_main["u_rel_str_irradiance"][mask] - (u_y_syst[mask] / y[mask]),
516 | # "r-",
517 | # )
518 | # plt.show()
519 |
520 | npt.assert_allclose(
521 | ds_main["u_rel_ran_irradiance"][mask],
522 | u_y_rand[mask] / y[mask] * 100,
523 | rtol=0.03,
524 | atol=0.2,
525 | )
526 | npt.assert_allclose(
527 | ds_main["u_rel_str_irradiance"][mask],
528 | u_y_syst[mask] / y[mask] * 100,
529 | rtol=0.03,
530 | atol=0.2,
531 | )
532 |
533 |
534 | if __name__ == "__main__":
535 | unittest.main()
536 |
--------------------------------------------------------------------------------
/punpy/lpu/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/lpu/__init__.py
--------------------------------------------------------------------------------
/punpy/lpu/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/lpu/tests/__init__.py
--------------------------------------------------------------------------------
/punpy/main.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/main.py
--------------------------------------------------------------------------------
/punpy/mc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/mc/__init__.py
--------------------------------------------------------------------------------
/punpy/mc/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/mc/tests/__init__.py
--------------------------------------------------------------------------------
/punpy/utilities/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/utilities/__init__.py
--------------------------------------------------------------------------------
/punpy/utilities/correlation_forms.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | """___Authorship___"""
4 | __author__ = "Pieter De Vis"
5 | __created__ = "30/03/2019"
6 | __maintainer__ = "Pieter De Vis"
7 | __email__ = "pieter.de.vis@npl.co.uk"
8 | __status__ = "Development"
9 |
10 |
11 | def bell_relative(len, n, sigma=None):
12 | if sigma is None:
13 | sigma = (n / 2 - 1) / 3**0.5
14 | corr = np.eye(len)
15 | for i in range(n):
16 | idx_row = np.arange(i, len)
17 | idx_col = np.arange(len - i)
18 | corr[idx_row, idx_col] = np.exp(-0.5 * (i / sigma) ** 2)
19 | corr[idx_col, idx_row] = np.exp(-0.5 * (i / sigma) ** 2)
20 | return corr
21 |
22 |
23 | def triangular_relative(len, n):
24 | corr = np.eye(len)
25 | for i in range(n):
26 | idx_row = np.arange(i, len)
27 | idx_col = np.arange(len - i)
28 | corr[idx_row, idx_col] = (n - i) / n
29 | corr[idx_col, idx_row] = (n - i) / n
30 | return corr
31 |
--------------------------------------------------------------------------------
/punpy/utilities/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/punpy/utilities/tests/__init__.py
--------------------------------------------------------------------------------
/punpy/utilities/tests/test_utilities.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests for mc propagation class
3 | """
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | import numpy.testing as npt
9 |
10 | import punpy.utilities.utilities as util
11 |
12 | """___Authorship___"""
13 | __author__ = "Pieter De Vis"
14 | __created__ = "14/4/2020"
15 | __maintainer__ = "Pieter De Vis"
16 | __email__ = "pieter.de.vis@npl.co.uk"
17 | __status__ = "Development"
18 |
19 |
20 | def function(x1, x2):
21 | return x1**2 - 10 * x2 + 30.0
22 |
23 |
24 | x1 = np.ones(200) * 10
25 | x2 = np.ones(200) * 30
26 | x1err = np.ones(200)
27 | x2err = 2 * np.ones(200)
28 |
29 | xs = [x1, x2]
30 | xerrs = [x1err, x2err]
31 |
32 | # below, the higher order Taylor expansion terms have been taken into account, and amount to 2.
33 | yerr_uncorr = 802**0.5 * np.ones(200)
34 | yerr_corr = 2**0.5 * np.ones(200)
35 |
36 |
37 | def functionb(x1, x2):
38 | return 2 * x1 - x2
39 |
40 |
41 | x1b = np.ones((20, 30)) * 50
42 | x2b = np.ones((20, 30)) * 30
43 | x1errb = np.ones((20, 30))
44 | x2errb = 2 * np.ones((20, 30))
45 |
46 | xsb = np.array([x1b, x2b])
47 | xerrsb = np.array([x1errb, x2errb])
48 |
49 | yerr_uncorrb = 8**0.5 * np.ones((20, 30))
50 | yerr_corrb = np.zeros((20, 30))
51 |
52 |
53 | def functionc(x1, x2, x3):
54 | return x1 + 4 * x2 - 2 * x3
55 |
56 |
57 | x1c = np.ones(200) * 10
58 | x2c = np.ones(200) * 10
59 | x3c = np.ones(200) * 10
60 |
61 | x1errc = 12 * np.ones(200)
62 | x2errc = 5 * np.ones(200)
63 | x3errc = np.zeros(200)
64 |
65 | xsc = np.array([x1c, x2c, x3c])
66 | xerrsc = np.array([x1errc, x2errc, x3errc])
67 | corr_c = np.array([[1, 0.9999999, 0], [0.99999999, 1.0, 0], [0.0, 0.0, 1.0]])
68 | yerr_uncorrc = 544**0.5 * np.ones(200)
69 | yerr_corrc = 1024**0.5 * np.ones(200)
70 |
71 |
72 | def functiond(x1, x2):
73 | return 2 * x1 - x2, 2 * x1 + x2
74 |
75 |
76 | x1d = np.ones((20, 3, 4)) * 50
77 | x2d = np.ones((20, 3, 4)) * 30
78 | x1errd = np.ones((20, 3, 4))
79 | x2errd = 2 * np.ones((20, 3, 4))
80 |
81 | xsd = [x1d, x2d]
82 | xerrsd = [x1errd, x2errd]
83 | corr_d = np.ones(
84 | (2, 2)
85 | ) # np.array([[1,0.9999999,0.9999999],[0.99999999,1.,0.99999999],[0.9999999,0.9999999,1.]])
86 |
87 | yerr_uncorrd = [
88 | np.array(8**0.5 * np.ones((20, 3, 4))),
89 | np.array(8**0.5 * np.ones((20, 3, 4))),
90 | ]
91 | yerr_corrd = [np.zeros((20, 3, 4)), 16**0.5 * np.ones((20, 3, 4))]
92 |
93 |
94 | class TestUtilities(unittest.TestCase):
95 | """
96 | Class for unit tests
97 | """
98 |
99 | def test_select_repeated_x(self):
100 | xsb2 = [x1b, 1.0]
101 | xerrsb2 = [x1errb, 2.0]
102 |
103 | out = util.select_repeated_x(xsb2, xerrsb2, [False, True], 0, [0], x1b.shape)
104 | npt.assert_allclose(out[0][0], x1b[0], atol=0.06)
105 | npt.assert_allclose(out[0][1], 1, atol=0.06)
106 | npt.assert_allclose(out[1][0], x1errb[0], atol=0.06)
107 | npt.assert_allclose(out[1][1], 2.0, atol=0.06)
108 |
109 |
110 | if __name__ == "__main__":
111 | unittest.main()
112 |
--------------------------------------------------------------------------------
/punpy/utilities/utilities.py:
--------------------------------------------------------------------------------
1 | """Use Monte Carlo to propagate uncertainties"""
2 |
3 | import numpy as np
4 | import comet_maths as cm
5 |
6 | """___Authorship___"""
7 | __author__ = "Pieter De Vis"
8 | __created__ = "30/03/2019"
9 | __maintainer__ = "Pieter De Vis"
10 | __email__ = "pieter.de.vis@npl.co.uk"
11 | __status__ = "Development"
12 |
13 |
14 | def select_repeated_x(x, u_x, param_fixed, i, repeat_dims, repeat_shape):
15 | """
16 | Select one (index i) of multiple repeated entries and return the input quantities and uncertainties for that entry.
17 |
18 | :param x: list of input quantities (usually numpy arrays)
19 | :type x: list[array]
20 | :param u_x: list of uncertainties/covariances on input quantities (usually numpy arrays)
21 | :type u_x: list[array]
22 | :param param_fixed: when repeat_dims>=0, set to true or false to indicate for each input quantity whether it has repeated measurements that should be split (param_fixed=False) or whether the input is fixed (param fixed=True), defaults to None (no inputs fixed).
23 | :type param_fixed: list of bools, optional
24 | :param i: index of the repeated measurement
25 | :type i: int
26 | :param repeat_dims: dimension along which the measurements are repeated
27 | :type repeat_dims: int
28 | :param repeat_shape: shape of measurements along which to select repeats
29 | :type repeat_shape: tuple
30 | :return: list of input quantities, list of uncertainties for single measurement
31 | :rtype: list[array]. list[array]
32 | """
33 | xb = np.zeros(len(x), dtype=object)
34 | u_xb = np.zeros(len(u_x), dtype=object)
35 | for j in range(len(x)):
36 | selected = False
37 | if param_fixed is not None:
38 | if param_fixed[j] == True:
39 | xb[j] = x[j]
40 | u_xb[j] = u_x[j]
41 | selected = True
42 | if not selected:
43 | index = list(np.ndindex(repeat_shape))[i]
44 | xb[j] = x[j]
45 | u_xb[j] = u_x[j]
46 | for idim in range(len(repeat_dims)):
47 | repeat_axis = repeat_dims[idim]
48 | ii = index[idim]
49 | if len(xb[j].shape) > repeat_axis:
50 | sli = tuple(
51 | [
52 | ii if (idim == repeat_axis) else slice(None)
53 | for idim in range(xb[j].ndim)
54 | ]
55 | )
56 | xb[j] = xb[j][sli]
57 | u_xb[j] = u_xb[j][sli]
58 | elif len(xb[j]) > 1:
59 | try:
60 | xb[j] = xb[j][ii]
61 | u_xb[j] = u_xb[j][ii]
62 | except:
63 | pass
64 |
65 | return xb, u_xb
66 |
67 |
68 | def convert_corr_to_cov(corr: np.ndarray, u: np.ndarray) -> np.ndarray:
69 | """
70 | Convert correlation matrix to covariance matrix (uses comet_maths.convert_corr_to_cov())
71 |
72 | :param corr: correlation matrix
73 | :param u: uncertainties
74 | :return: covariance matrix
75 | """
76 | return cm.convert_corr_to_cov(corr, u)
77 |
78 |
79 | def convert_cov_to_corr(cov: np.ndarray, u: np.ndarray) -> np.ndarray:
80 | """
81 | Convert covariance matrix to correlation matrix (uses comet_maths.convert_cov_to_corr())
82 |
83 | :param corr: covariance matrix
84 | :param u: uncertainties
85 | :return: correlation matrix
86 | """
87 | return cm.convert_cov_to_corr(cov, u)
88 |
89 |
90 | def correlation_from_covariance(covariance: np.ndarray) -> np.ndarray:
91 | """
92 | Convert covariance matrix to correlation matrix (uses comet_maths.correlation_from_covariance())
93 |
94 | :param covariance: Covariance matrix
95 | :return: Correlation matrix
96 | """
97 | return cm.correlation_from_covariance(covariance)
98 |
99 |
100 | def uncertainty_from_covariance(covariance: np.ndarray) -> np.ndarray:
101 | """
102 | Convert covariance matrix to uncertainty (uses comet_maths.uncertainty_from_covariance())
103 |
104 | :param covariance: Covariance matrix
105 | :return: uncertainties
106 | """
107 | return cm.uncertainty_from_covariance(covariance)
108 |
--------------------------------------------------------------------------------
/quality_documentation/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.toptal.com/developers/gitignore/api/latex
3 | # Edit at https://www.toptal.com/developers/gitignore?templates=latex
4 |
5 | ### LaTeX ###
6 | ## Core latex/pdflatex auxiliary files:
7 | *.aux
8 | *.lof
9 | *.log
10 | *.lot
11 | *.fls
12 | *.out
13 | *.toc
14 | *.fmt
15 | *.fot
16 | *.cb
17 | *.cb2
18 | .*.lb
19 |
20 | ## Intermediate documents:
21 | *.dvi
22 | *.xdv
23 | *-converted-to.*
24 | # these rules might exclude image files for figures etc.
25 | # *.ps
26 | # *.eps
27 | # *.pdf
28 |
29 | ## Generated if empty string is given at "Please type another file name for output:"
30 | .pdf
31 |
32 | ## Bibliography auxiliary files (bibtex/biblatex/biber):
33 | *.bbl
34 | *.bcf
35 | *.blg
36 | *-blx.aux
37 | *-blx.bib
38 | *.run.xml
39 |
40 | ## Build tool auxiliary files:
41 | *.fdb_latexmk
42 | *.synctex
43 | *.synctex(busy)
44 | *.synctex.gz
45 | *.synctex.gz(busy)
46 | *.pdfsync
47 |
48 | ## Build tool directories for auxiliary files
49 | # latexrun
50 | latex.out/
51 |
52 | ## Auxiliary and intermediate files from other packages:
53 | # algorithms
54 | *.alg
55 | *.loa
56 |
57 | # achemso
58 | acs-*.bib
59 |
60 | # amsthm
61 | *.thm
62 |
63 | # beamer
64 | *.nav
65 | *.pre
66 | *.snm
67 | *.vrb
68 |
69 | # changes
70 | *.soc
71 |
72 | # comment
73 | *.cut
74 |
75 | # cprotect
76 | *.cpt
77 |
78 | # elsarticle (documentclass of Elsevier journals)
79 | *.spl
80 |
81 | # endnotes
82 | *.ent
83 |
84 | # fixme
85 | *.lox
86 |
87 | # feynmf/feynmp
88 | *.mf
89 | *.mp
90 | *.t[1-9]
91 | *.t[1-9][0-9]
92 | *.tfm
93 |
94 | #(r)(e)ledmac/(r)(e)ledpar
95 | *.end
96 | *.?end
97 | *.[1-9]
98 | *.[1-9][0-9]
99 | *.[1-9][0-9][0-9]
100 | *.[1-9]R
101 | *.[1-9][0-9]R
102 | *.[1-9][0-9][0-9]R
103 | *.eledsec[1-9]
104 | *.eledsec[1-9]R
105 | *.eledsec[1-9][0-9]
106 | *.eledsec[1-9][0-9]R
107 | *.eledsec[1-9][0-9][0-9]
108 | *.eledsec[1-9][0-9][0-9]R
109 |
110 | # glossaries
111 | *.acn
112 | *.acr
113 | *.glg
114 | *.glo
115 | *.gls
116 | *.glsdefs
117 | *.lzo
118 | *.lzs
119 |
120 | # uncomment this for glossaries-extra (will ignore makeindex's style files!)
121 | # *.ist
122 |
123 | # gnuplottex
124 | *-gnuplottex-*
125 |
126 | # gregoriotex
127 | *.gaux
128 | *.gtex
129 |
130 | # htlatex
131 | *.4ct
132 | *.4tc
133 | *.idv
134 | *.lg
135 | *.trc
136 | *.xref
137 |
138 | # hyperref
139 | *.brf
140 |
141 | # knitr
142 | *-concordance.tex
143 | # TODO Comment the next line if you want to keep your tikz graphics files
144 | *.tikz
145 | *-tikzDictionary
146 |
147 | # listings
148 | *.lol
149 |
150 | # luatexja-ruby
151 | *.ltjruby
152 |
153 | # makeidx
154 | *.idx
155 | *.ilg
156 | *.ind
157 |
158 | # minitoc
159 | *.maf
160 | *.mlf
161 | *.mlt
162 | *.mtc[0-9]*
163 | *.slf[0-9]*
164 | *.slt[0-9]*
165 | *.stc[0-9]*
166 |
167 | # minted
168 | _minted*
169 | *.pyg
170 |
171 | # morewrites
172 | *.mw
173 |
174 | # nomencl
175 | *.nlg
176 | *.nlo
177 | *.nls
178 |
179 | # pax
180 | *.pax
181 |
182 | # pdfpcnotes
183 | *.pdfpc
184 |
185 | # sagetex
186 | *.sagetex.sage
187 | *.sagetex.py
188 | *.sagetex.scmd
189 |
190 | # scrwfile
191 | *.wrt
192 |
193 | # sympy
194 | *.sout
195 | *.sympy
196 | sympy-plots-for-*.tex/
197 |
198 | # pdfcomment
199 | *.upa
200 | *.upb
201 |
202 | # pythontex
203 | *.pytxcode
204 | pythontex-files-*/
205 |
206 | # tcolorbox
207 | *.listing
208 |
209 | # thmtools
210 | *.loe
211 |
212 | # TikZ & PGF
213 | *.dpth
214 | *.md5
215 | *.auxlock
216 |
217 | # todonotes
218 | *.tdo
219 |
220 | # vhistory
221 | *.hst
222 | *.ver
223 |
224 | # easy-todo
225 | *.lod
226 |
227 | # xcolor
228 | *.xcp
229 |
230 | # xmpincl
231 | *.xmpi
232 |
233 | # xindy
234 | *.xdy
235 |
236 | # xypic precompiled matrices and outlines
237 | *.xyc
238 | *.xyd
239 |
240 | # endfloat
241 | *.ttt
242 | *.fff
243 |
244 | # Latexian
245 | TSWLatexianTemp*
246 |
247 | ## Editors:
248 | # WinEdt
249 | *.bak
250 | *.sav
251 |
252 | # Texpad
253 | .texpadtmp
254 |
255 | # LyX
256 | *.lyx~
257 |
258 | # Kile
259 | *.backup
260 |
261 | # gummi
262 | .*.swp
263 |
264 | # KBibTeX
265 | *~[0-9]*
266 |
267 | # TeXnicCenter
268 | *.tps
269 |
270 | # auto folder when using emacs and auctex
271 | ./auto/*
272 | *.el
273 |
274 | # expex forward references with \gathertags
275 | *-tags.tex
276 |
277 | # standalone packages
278 | *.sta
279 |
280 | # Makeindex log files
281 | *.lpz
282 |
283 | # REVTeX puts footnotes in the bibliography by default, unless the nofootinbib
284 | # option is specified. Footnotes are the stored in a file with suffix Notes.bib.
285 | # Uncomment the next line to have this generated file ignored.
286 | #*Notes.bib
287 |
288 | ### LaTeX Patch ###
289 | # LIPIcs / OASIcs
290 | *.vtc
291 |
292 | # glossaries
293 | *.glstex
294 |
295 | # End of https://www.toptal.com/developers/gitignore/api/latex
296 |
--------------------------------------------------------------------------------
/quality_documentation/QualityDocumentation.tex:
--------------------------------------------------------------------------------
1 | %!TEX options = --shell-escape
2 | % This is a magic comment that tells LatexTools to enable shell-escape
3 | % If you're not using LatexTools to compile, you will need to enable shell escape on your editor
4 |
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %
7 | %
8 | % by Charles Baynham
9 | % 30 May, 2018
10 | %
11 | % Based on template by Charles Baynham,
12 | % modified from template by Ajeet Sandu
13 | % https://www.overleaf.com/latex/templates/requirements-specification-layout/vbrqbjpzcmfy
14 | %
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 |
17 | \documentclass{article}
18 |
19 | \usepackage{softwareRequirements}
20 | \usepackage{standalone}
21 | \usepackage{sphinx}
22 | \usepackage{pgffor}
23 | \usepackage{verbdef}
24 |
25 | \newcounter{pdfpages}
26 | \newcommand*{\getpdfpages}[1]{%
27 | \begingroup
28 | \sbox0{%
29 | \includegraphics{#1}%
30 | \setcounter{pdfpages}{\pdflastximagepages}%
31 | }%
32 | \endgroup
33 | }
34 |
35 | \input{base/CookiecutterMacros}
36 | \begin{document}
37 |
38 | \pagenumbering{roman}
39 | \DeclareGraphicsExtensions{.pdf,.jpg,.png}
40 |
41 | %% Table of contents
42 | \tableofcontents
43 |
44 | %% Version table insertion
45 | \input{base/history}
46 |
47 | \clearpage
48 | \pagestyle{long}
49 |
50 | \pagenumbering{arabic}
51 |
52 | % The content:
53 | \graphicspath{{uml/}{latex/}}
54 |
55 | \section{Introduction}\label{introduction}
56 |
57 | This the quality documentation for the \packagename\ software package.
58 | \packagedescription\ The \packagename\ package is hosted on \packageurl.
59 |
60 | The Quality documentation consists of the following elements:
61 | \begin{itemize}
62 | \item Software quality plan: Included as `\packagename\_sqp.docx' (a separate document in this folder).
63 |
64 | \item User requirements: Included as Section 2 in `\packagename\_requirements.docx' (a separate document in this folder).
65 |
66 | \item Functional requirements: Included as Section 3 in `\packagename\_requirements.docx' (a separate document in this folder).
67 |
68 | \item SIL assesment: The Software Integrity Level (SIL) is determined from assessments of the criticality of usage and the complexity of the program in Section \ref{SIL-assessment} of this document.
69 |
70 | \item Software design: Described using UML diagrams in Section \ref{design}.
71 |
72 | \item Test report: An automated test report is given in Section \ref{testreport}. This includes:
73 | \begin{itemize}
74 | \item A table detailing the environment that was used during testing.
75 | \item A summary of the results, indicating how many tests passed and how long it took.
76 | \item A table showing each of the tests that was run, how long they took, and what output was captured during testing.
77 | \item A test coverage report, showing how many lines of the software were covered by the combined tests.
78 | \end{itemize}
79 | \item User Manual: The \packagename documentation is included as Appendix \ref{UserManual}. This includes:
80 | \begin{itemize}
81 | \item Installation guidelines.
82 | \item Overview of methods.
83 | \item Examples of use.
84 | \item Algorithm theoretical Basis.
85 | \item Sphinx automated API documentation from docstrings in python code.
86 | \end{itemize}
87 | \end{itemize}
88 |
89 | \clearpage
90 | \input{base/\sil}
91 |
92 | \clearpage
93 | \section{Software design}\label{design}
94 | The software design is specified using the following UML diagrams\footnote{These UML diagrams were made using http://www.umlet.com/umletino/umletino.html}.
95 |
96 | \vspace*{1cm}
97 | \input{uml/uml}
98 |
99 | \clearpage
100 | \section{Test report}\label{testreport}
101 | \getpdfpages{test_report.pdf}
102 | \foreach \x in {1,...,\value{pdfpages}} {
103 | \includegraphics[page=\x,trim= 10mm 10mm 10mm 10mm,width=\textwidth]{test_report.pdf}%
104 | \clearpage
105 |
106 | }
107 | \getpdfpages{cov_report.pdf}
108 | \foreach \x in {1,...,\value{pdfpages}} {
109 | \includegraphics[page=\x,trim= 10mm 10mm 10mm 10mm,width=\textwidth]{cov_report.pdf}%
110 | \clearpage
111 | }
112 |
113 | \part*{User Manual}
114 | \phantomsection
115 | \addcontentsline{toc}{part}{User Manual}
116 | \appendix
117 | \label{UserManual}
118 | \def\maketitle{}
119 | \def\tableofcontents{}
120 | \input{./latex/user_manual.tex}
121 |
122 | \end{document}
123 |
--------------------------------------------------------------------------------
/quality_documentation/base/CookiecutterMacros.tex:
--------------------------------------------------------------------------------
1 | \verbdef\packagename{punpy}
2 | \verbdef\packageurl{https://gitlab.npl.co.uk/eco/tools/punpy}
3 | \verbdef\authorname{Pieter De Vis}
4 | \verbdef\authoremail{pieter.de.vis@npl.co.uk}
5 | \newcommand{\packagedescription}{The punpy module is a Python software package to propagate random, structured and systematic uncertainties through a given measurement function.}
6 | \newcommand{\sil}{sil3}
--------------------------------------------------------------------------------
/quality_documentation/base/acronyms.tex:
--------------------------------------------------------------------------------
1 | \vspace{1cm}
2 |
3 | % % Wenn nicht im Inhaltsverzeichnis stehen soll:
4 | \chapter*{Acronyms}
5 | \addcontentsline{toc}{chapter}{Acronyms}
6 |
7 |
8 | \begin{acronym}[ABCD]
9 |
10 | \end{acronym}
11 |
--------------------------------------------------------------------------------
/quality_documentation/base/history.tex:
--------------------------------------------------------------------------------
1 | %!TEX root = ../second_app_planning.tex
2 |
3 | % This document defines the version history. Entries should include the git
4 | % hash of the appropriate milestones
5 |
6 | \section*{Revision History}
7 | \addcontentsline{toc}{section}{Revision History}
8 | \begin{versionhistory}
9 | \vhEntry{0.1 (\#58a9b4)}{2018/05/30}{CFAB}{Automatically generated quality documentation}
10 | \end{versionhistory}
--------------------------------------------------------------------------------
/quality_documentation/base/metadata.tex:
--------------------------------------------------------------------------------
1 | %!TEX root = ../second_app_planning.tex
2 |
3 | \def\Company{National Physical Laboratory}
4 |
5 | \def\BoldTitle{Quality documentation}
6 |
7 | \def\Subtitle{for \\ punpy software \\}
8 | \def\Authors{Prepared by Pieter De Vis}
9 | \def\Shortname{P De Vis}
10 |
11 |
12 | \title{\textbf{\BoldTitle}\\\Subtitle}
13 | \author{\Authors}
14 | \date{\today}
15 |
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 | %% Creation of pdf information
18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
19 | \hypersetup{pdfinfo={
20 | Title={Title},
21 | Author={TR},
22 | Subject={Report}
23 | }}
--------------------------------------------------------------------------------
/quality_documentation/base/pythoninputstyle.tex:
--------------------------------------------------------------------------------
1 | %%Listing style for java.
2 | \definecolor{dkgreen}{rgb}{0,0.6,0}
3 | \definecolor{gray}{rgb}{0.5,0.5,0.5}
4 | \definecolor{mauve}{rgb}{0.58,0,0.82}
5 | \lstset{frame=tb,
6 | language=Python,
7 | aboveskip=3mm,
8 | belowskip=3mm,
9 | showstringspaces=false,
10 | columns=flexible,
11 | basicstyle={\small\ttfamily},
12 | numbers=left,
13 | numberstyle=\tiny\color{gray},
14 | keywordstyle=\color{blue},
15 | commentstyle=\color{dkgreen},
16 | stringstyle=\color{mauve},
17 | breaklines=true,
18 | breakatwhitespace=true,
19 | tabsize=3
20 | }
21 |
--------------------------------------------------------------------------------
/quality_documentation/base/sil2.tex:
--------------------------------------------------------------------------------
1 | \section{SIL Assessment}\label{SIL-assessment}
2 | An assessment is made of the criticality of usage (CU) and the complexity of the program (CP) using the NPL guidelines (see paragraph 6.3 of procedure QPNPL/M/013). The criticality of usage relates to the degree of impact that the software has on the service. The complexity of program relates to how the development team views the program. The Software Integrity Level (SIL) is then derived as being level 2:
3 |
4 | \begin{tabular}{lll}
5 | \toprule
6 | Criticality & Significant & CU 2 \\
7 | Complexity & Simple functionality & CP 2 \\
8 | Moderating factors & Modular approach & Decrease \\
9 | & More than one person developing the software & Increase \\ \midrule
10 | \textbf{Assessment} & & \textbf{SIL2} \\ \bottomrule
11 | \end{tabular}
--------------------------------------------------------------------------------
/quality_documentation/base/sil3.tex:
--------------------------------------------------------------------------------
1 | \section{SIL Assessment}\label{SIL-assessment}
2 | An assessment is made of the criticality of usage (CU) and the complexity of the program (CP) using the NPL guidelines (see paragraph 6.3 of procedure QPNPL/M/013). The criticality of usage relates to the degree of impact that the software has on the service. The complexity of program relates to how the development team views the program. The Software Integrity Level (SIL) is then derived as being level 3 (see also the software plan included in this folder):
3 |
4 | \begin{tabular}{lll}
5 | \toprule
6 | Criticality & Substantial & CU 3 \\
7 | Complexity & Moderate & CP 3 \\
8 | Moderating factors & Modular approach & Decrease \\
9 | & More than one person developing the software & Increase \\ \midrule
10 | \textbf{Assessment} & & \textbf{SIL3} \\ \bottomrule
11 | \end{tabular}
--------------------------------------------------------------------------------
/quality_documentation/base/sources.bib:
--------------------------------------------------------------------------------
1 | @BOOK{MBA,
2 | title="The ten Day MBA",
3 | author="Steven Silbiger",
4 | publisher="Harper Business",
5 | YEAR="2012",
6 | }
7 |
8 | @article{greenwade93,
9 | author = "George D. Greenwade",
10 | title = "The {C}omprehensive {T}ex {A}rchive {N}etwork ({CTAN})",
11 | year = "1993",
12 | journal = "TUGBoat",
13 | volume = "14",
14 | number = "3",
15 | pages = "342--351"
16 | }
17 |
18 | @misc{Hashing,
19 | author = "Wikipedia",
20 | title = "Hashfunktion",
21 | url = "https://de.wikipedia.org/wiki/Hashfunktion#Erkl.C3.A4rung"
22 | }
--------------------------------------------------------------------------------
/quality_documentation/base/sources.tex:
--------------------------------------------------------------------------------
1 | \bibliographystyle{alpha}
2 |
3 | \renewcommand\bibname{References}
4 | \bibliography{base/sources}
5 |
6 |
--------------------------------------------------------------------------------
/quality_documentation/countpdfpages.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 | pdfFile=$1
3 | echo "Processing $pdfFile"
4 | numberOfPages=$(/usr/local/bin/identify "$pdfFile" 2>/dev/null | wc -l | tr -d ' ')
5 | #Identify gets info for each page, dump stderr to dev null
6 | #count the lines of output
7 | #trim the whitespace from the wc -l outout
8 | echo "The number of pages is: $numberOfPages"
--------------------------------------------------------------------------------
/quality_documentation/footnotehyper-sphinx.sty:
--------------------------------------------------------------------------------
1 | \NeedsTeXFormat{LaTeX2e}
2 | \ProvidesPackage{footnotehyper-sphinx}%
3 | [2017/10/27 v1.7 hyperref aware footnote.sty for sphinx (JFB)]
4 | %%
5 | %% Package: footnotehyper-sphinx
6 | %% Version: based on footnotehyper.sty 2017/03/07 v1.0
7 | %% as available at https://www.ctan.org/pkg/footnotehyper
8 | %% License: the one applying to Sphinx
9 | %%
10 | %% Refer to the PDF documentation at https://www.ctan.org/pkg/footnotehyper for
11 | %% the code comments.
12 | %%
13 | %% Differences:
14 | %% 1. a partial tabulary compatibility layer added (enough for Sphinx mark-up),
15 | %% 2. use of \spx@opt@BeforeFootnote from sphinx.sty,
16 | %% 3. use of \sphinxunactivateextrasandspace from sphinx.sty,
17 | %% 4. macro definition \sphinxfootnotemark,
18 | %% 5. macro definition \sphinxlongtablepatch
19 | %% 6. replaced an \undefined by \@undefined
20 | \DeclareOption*{\PackageWarning{footnotehyper-sphinx}{Option `\CurrentOption' is unknown}}%
21 | \ProcessOptions\relax
22 | \newbox\FNH@notes
23 | \newdimen\FNH@width
24 | \let\FNH@colwidth\columnwidth
25 | \newif\ifFNH@savingnotes
26 | \AtBeginDocument {%
27 | \let\FNH@latex@footnote \footnote
28 | \let\FNH@latex@footnotetext\footnotetext
29 | \let\FNH@H@@footnotetext \@footnotetext
30 | \newenvironment{savenotes}
31 | {\FNH@savenotes\ignorespaces}{\FNH@spewnotes\ignorespacesafterend}%
32 | \let\spewnotes \FNH@spewnotes
33 | \let\footnote \FNH@footnote
34 | \let\footnotetext \FNH@footnotetext
35 | \let\endfootnote \FNH@endfntext
36 | \let\endfootnotetext\FNH@endfntext
37 | \@ifpackageloaded{hyperref}
38 | {\ifHy@hyperfootnotes
39 | \let\FNH@H@@footnotetext\H@@footnotetext
40 | \else
41 | \let\FNH@hyper@fntext\FNH@nohyp@fntext
42 | \fi}%
43 | {\let\FNH@hyper@fntext\FNH@nohyp@fntext}%
44 | }%
45 | \def\FNH@hyper@fntext{\FNH@fntext\FNH@hyper@fntext@i}%
46 | \def\FNH@nohyp@fntext{\FNH@fntext\FNH@nohyp@fntext@i}%
47 | \def\FNH@fntext #1{%
48 | \ifx\ifmeasuring@\@undefined
49 | \expandafter\@secondoftwo\else\expandafter\@firstofone\fi
50 | % these two lines modified for Sphinx (tabulary compatibility):
51 | {\ifmeasuring@\expandafter\@gobbletwo\else\expandafter\@firstofone\fi}%
52 | {\ifx\equation$\expandafter\@gobbletwo\fi #1}%$
53 | }%
54 | \long\def\FNH@hyper@fntext@i#1{%
55 | \global\setbox\FNH@notes\vbox
56 | {\unvbox\FNH@notes
57 | \FNH@startnote
58 | \@makefntext
59 | {\rule\z@\footnotesep\ignorespaces
60 | \ifHy@nesting\expandafter\ltx@firstoftwo
61 | \else\expandafter\ltx@secondoftwo
62 | \fi
63 | {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}{#1}}%
64 | {\Hy@raisedlink
65 | {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}%
66 | {\relax}}%
67 | \let\@currentHref\Hy@footnote@currentHref
68 | \let\@currentlabelname\@empty
69 | #1}%
70 | \@finalstrut\strutbox
71 | }%
72 | \FNH@endnote
73 | }%
74 | }%
75 | \long\def\FNH@nohyp@fntext@i#1{%
76 | \global\setbox\FNH@notes\vbox
77 | {\unvbox\FNH@notes
78 | \FNH@startnote
79 | \@makefntext{\rule\z@\footnotesep\ignorespaces#1\@finalstrut\strutbox}%
80 | \FNH@endnote
81 | }%
82 | }%
83 | \def\FNH@startnote{%
84 | \hsize\FNH@colwidth
85 | \interlinepenalty\interfootnotelinepenalty
86 | \reset@font\footnotesize
87 | \floatingpenalty\@MM
88 | \@parboxrestore
89 | \protected@edef\@currentlabel{\csname p@\@mpfn\endcsname\@thefnmark}%
90 | \color@begingroup
91 | }%
92 | \def\FNH@endnote{\color@endgroup}%
93 | \def\FNH@savenotes{%
94 | \begingroup
95 | \ifFNH@savingnotes\else
96 | \FNH@savingnotestrue
97 | \let\@footnotetext \FNH@hyper@fntext
98 | \let\@mpfootnotetext \FNH@hyper@fntext
99 | \let\H@@mpfootnotetext\FNH@nohyp@fntext
100 | \FNH@width\columnwidth
101 | \let\FNH@colwidth\FNH@width
102 | \global\setbox\FNH@notes\box\voidb@x
103 | \let\FNH@thempfn\thempfn
104 | \let\FNH@mpfn\@mpfn
105 | \ifx\@minipagerestore\relax\let\@minipagerestore\@empty\fi
106 | \expandafter\def\expandafter\@minipagerestore\expandafter{%
107 | \@minipagerestore
108 | \let\thempfn\FNH@thempfn
109 | \let\@mpfn\FNH@mpfn
110 | }%
111 | \fi
112 | }%
113 | \def\FNH@spewnotes {%
114 | \endgroup
115 | \ifFNH@savingnotes\else
116 | \ifvoid\FNH@notes\else
117 | \begingroup
118 | \let\@makefntext\@empty
119 | \let\@finalstrut\@gobble
120 | \let\rule\@gobbletwo
121 | \FNH@H@@footnotetext{\unvbox\FNH@notes}%
122 | \endgroup
123 | \fi
124 | \fi
125 | }%
126 | \def\FNH@footnote@envname {footnote}%
127 | \def\FNH@footnotetext@envname{footnotetext}%
128 | \def\FNH@footnote{%
129 | % this line added for Sphinx:
130 | \spx@opt@BeforeFootnote
131 | \ifx\@currenvir\FNH@footnote@envname
132 | \expandafter\FNH@footnoteenv
133 | \else
134 | \expandafter\FNH@latex@footnote
135 | \fi
136 | }%
137 | \def\FNH@footnoteenv{%
138 | % this line added for Sphinx (footnotes in parsed literal blocks):
139 | \catcode13=5 \sphinxunactivateextrasandspace
140 | \@ifnextchar[%
141 | \FNH@footnoteenv@i %]
142 | {\stepcounter\@mpfn
143 | \protected@xdef\@thefnmark{\thempfn}%
144 | \@footnotemark
145 | \def\FNH@endfntext@fntext{\@footnotetext}%
146 | \FNH@startfntext}%
147 | }%
148 | \def\FNH@footnoteenv@i[#1]{%
149 | \begingroup
150 | \csname c@\@mpfn\endcsname #1\relax
151 | \unrestored@protected@xdef\@thefnmark{\thempfn}%
152 | \endgroup
153 | \@footnotemark
154 | \def\FNH@endfntext@fntext{\@footnotetext}%
155 | \FNH@startfntext
156 | }%
157 | \def\FNH@footnotetext{%
158 | \ifx\@currenvir\FNH@footnotetext@envname
159 | \expandafter\FNH@footnotetextenv
160 | \else
161 | \expandafter\FNH@latex@footnotetext
162 | \fi
163 | }%
164 | \def\FNH@footnotetextenv{%
165 | \@ifnextchar[%
166 | \FNH@footnotetextenv@i %]
167 | {\protected@xdef\@thefnmark{\thempfn}%
168 | \def\FNH@endfntext@fntext{\@footnotetext}%
169 | \FNH@startfntext}%
170 | }%
171 | \def\FNH@footnotetextenv@i[#1]{%
172 | \begingroup
173 | \csname c@\@mpfn\endcsname #1\relax
174 | \unrestored@protected@xdef\@thefnmark{\thempfn}%
175 | \endgroup
176 | \ifFNH@savingnotes
177 | \def\FNH@endfntext@fntext{\FNH@nohyp@fntext}%
178 | \else
179 | \def\FNH@endfntext@fntext{\FNH@H@@footnotetext}%
180 | \fi
181 | \FNH@startfntext
182 | }%
183 | \def\FNH@startfntext{%
184 | \setbox\z@\vbox\bgroup
185 | \FNH@startnote
186 | \FNH@prefntext
187 | \rule\z@\footnotesep\ignorespaces
188 | }%
189 | \def\FNH@endfntext {%
190 | \@finalstrut\strutbox
191 | \FNH@postfntext
192 | \FNH@endnote
193 | \egroup
194 | \begingroup
195 | \let\@makefntext\@empty\let\@finalstrut\@gobble\let\rule\@gobbletwo
196 | \FNH@endfntext@fntext {\unvbox\z@}%
197 | \endgroup
198 | }%
199 | \AtBeginDocument{%
200 | \let\FNH@@makefntext\@makefntext
201 | \ifx\@makefntextFB\@undefined
202 | \expandafter\@gobble\else\expandafter\@firstofone\fi
203 | {\ifFBFrenchFootnotes \let\FNH@@makefntext\@makefntextFB \else
204 | \let\FNH@@makefntext\@makefntextORI\fi}%
205 | \expandafter\FNH@check@a\FNH@@makefntext{1.2!3?4,}%
206 | \FNH@@@1.2!3?4,\FNH@@@\relax
207 | }%
208 | \long\def\FNH@check@a #11.2!3?4,#2\FNH@@@#3{%
209 | \ifx\relax#3\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
210 | \FNH@bad@makefntext@alert
211 | {\def\FNH@prefntext{#1}\def\FNH@postfntext{#2}\FNH@check@b}%
212 | }%
213 | \def\FNH@check@b #1\relax{%
214 | \expandafter\expandafter\expandafter\FNH@check@c
215 | \expandafter\meaning\expandafter\FNH@prefntext
216 | \meaning\FNH@postfntext1.2!3?4,\FNH@check@c\relax
217 | }%
218 | \def\FNH@check@c #11.2!3?4,#2#3\relax{%
219 | \ifx\FNH@check@c#2\expandafter\@gobble\fi\FNH@bad@makefntext@alert
220 | }%
221 | % slight reformulation for Sphinx
222 | \def\FNH@bad@makefntext@alert{%
223 | \PackageWarningNoLine{footnotehyper-sphinx}%
224 | {Footnotes will be sub-optimal, sorry. This is due to the document class or^^J
225 | some package modifying macro \string\@makefntext.^^J
226 | You can try to report this incompatibility at^^J
227 | https://github.com/sphinx-doc/sphinx with this info:}%
228 | \typeout{\meaning\@makefntext}%
229 | \let\FNH@prefntext\@empty\let\FNH@postfntext\@empty
230 | }%
231 | % this macro from original footnote.sty is not used anymore by Sphinx
232 | % but for simplicity sake let's just keep it as is
233 | \def\makesavenoteenv{\@ifnextchar[\FNH@msne@ii\FNH@msne@i}%]
234 | \def\FNH@msne@i #1{%
235 | \expandafter\let\csname FNH$#1\expandafter\endcsname %$
236 | \csname #1\endcsname
237 | \expandafter\let\csname endFNH$#1\expandafter\endcsname %$
238 | \csname end#1\endcsname
239 | \FNH@msne@ii[#1]{FNH$#1}%$
240 | }%
241 | \def\FNH@msne@ii[#1]#2{%
242 | \expandafter\edef\csname#1\endcsname{%
243 | \noexpand\savenotes
244 | \expandafter\noexpand\csname#2\endcsname
245 | }%
246 | \expandafter\edef\csname end#1\endcsname{%
247 | \expandafter\noexpand\csname end#2\endcsname
248 | \noexpand\expandafter
249 | \noexpand\spewnotes
250 | \noexpand\if@endpe\noexpand\@endpetrue\noexpand\fi
251 | }%
252 | }%
253 | % end of footnotehyper 2017/02/16 v0.99
254 | % some extras for Sphinx :
255 | % \sphinxfootnotemark: usable in section titles and silently removed from TOCs.
256 | \def\sphinxfootnotemark [#1]%
257 | {\ifx\thepage\relax\else\protect\spx@opt@BeforeFootnote
258 | \protect\footnotemark[#1]\fi}%
259 | \AtBeginDocument{%
260 | % let hyperref less complain
261 | \pdfstringdefDisableCommands{\def\sphinxfootnotemark [#1]{}}%
262 | % to obtain hyperlinked footnotes in longtable environment we must replace
263 | % hyperref's patch of longtable's patch of \@footnotetext by our own
264 | \let\LT@p@ftntext\FNH@hyper@fntext
265 | % this *requires* longtable to be used always wrapped in savenotes environment
266 | }%
267 | \endinput
268 | %%
269 | %% End of file `footnotehyper-sphinx.sty'.
270 |
--------------------------------------------------------------------------------
/quality_documentation/punpy_QF-59.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/punpy_QF-59.docx
--------------------------------------------------------------------------------
/quality_documentation/punpy_requirements.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/punpy_requirements.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16a.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16a.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16b.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16b.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16c.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16c.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16d.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16d.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16e.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16e.docx
--------------------------------------------------------------------------------
/quality_documentation/review_checklists/qf-16f.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/review_checklists/qf-16f.docx
--------------------------------------------------------------------------------
/quality_documentation/softwareRequirements.sty:
--------------------------------------------------------------------------------
1 | %!TEX root = second_app_planning.tex
2 |
3 | \usepackage[colorlinks=false,hidelinks]{hyperref}%%The hyperref package is used to handle cross-referencing commands in LATEX to produce hypertext links in the document.
4 | \usepackage[english]{babel}
5 | \usepackage{amsmath}
6 | \usepackage[T1]{fontenc}
7 | \usepackage[utf8]{inputenc}
8 | \usepackage[pdftex]{graphicx} %%Graphics in pdfLaTeX
9 | \usepackage{pdfpages}
10 | \usepackage{longtable} %%For tables that exceed a page width
11 | % \usepackage{pdflscape} %%Adds PDF support to the landscape environment of package
12 | \usepackage{caption} %%Provides many ways to customise the captions in floating environments like figure and table
13 | \usepackage{float} %%Improves the interface for defining floating objects such as figures and tables
14 | \usepackage[tablegrid,nochapter]{vhistory} %%Vhistory simplifies the creation of a history of versions of a document
15 | \usepackage{cite} %%The package supports compressed, sorted lists of numerical citations, and also deals with various punctuation and other issues of representation, including comprehensive management of break points
16 | \usepackage[]{acronym} %%This package ensures that all acronyms used in the text are spelled out in full at least once. It also provides an environment to build a list of acronyms used
17 |
18 | % This package provides an easy and flexible user interface to customize page layout, implementing auto-centering and auto-balancing mechanisms so that the users have only to give the least description for the page layout. For example, if you want to set each margin 2cm without header space, what you need is just \usepackage[margin=2cm,nohead]{geometry}.
19 | \usepackage[margin=4cm]{geometry}
20 |
21 | \usepackage{booktabs}
22 |
23 | % \usepackage{layout} %%The package defines a command \layout, which will show a summary of the layout of the current document
24 | % \usepackage{subfigure} %%Provides support for the manipulation and reference of small or ‘sub’ figures and tables within a single figure or table environment.
25 | % \usepackage[toc]{glossaries} %%The glossaries package supports acronyms and multiple glossaries, and has provision for operation in several languages (using the facilities of either babel or polyglossia).
26 | \usepackage[left,pagewise,modulo]{lineno} %%Adds line numbers to selected paragraphs with reference possible through the LATEX \ref and \pageref cross reference mechanism
27 |
28 | \usepackage{metainfo}
29 | %\usepackage[pagestyles,raggedright]{titlesec}
30 |
31 | \usepackage{siunitx}
32 |
33 | \usepackage{cleveref}
34 |
35 | \usepackage{etoolbox}
36 | \usepackage{%
37 | array, %%An extended implementation of the array and tabular environments which extends the options for column formats, and provides "programmable" format specifications
38 | booktabs, %%The package enhances the quality of tables in LATEX, providing extra commands as well as behind-the-scenes optimisation
39 | dcolumn, %%
40 | rotating,
41 | shortvrb,
42 | units,
43 | url,
44 | lastpage,
45 | longtable,
46 | lscape,
47 | qtree,
48 | skmath,
49 | }
50 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
51 | %% Python --> latex
52 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
53 | \usepackage{listings}
54 | \usepackage{color}
55 | \definecolor{pblue}{rgb}{0.13,0.13,1}
56 | \definecolor{pgreen}{rgb}{0,0.5,0}
57 | \definecolor{pred}{rgb}{0.9,0,0}
58 | \definecolor{pgrey}{rgb}{0.46,0.45,0.48}
59 |
60 | \input{base/pythoninputstyle}
61 |
62 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
63 | %% Make an enviroment for requirements lists
64 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
65 | \newenvironment{reqs}
66 | {\begin{enumerate}[label={\thesubsection.\arabic*}]}
67 | {\end{enumerate}}
68 |
69 | \usepackage{enumitem} % Allows for referencing to individual \items
70 |
71 | \crefalias{enumi}{requirement}
72 | \crefname{requirement}{}{}
73 | \creflabelformat{requirement}{#2R#1#3}%
74 | \crefmultiformat{requirement}{#2R#1#3}%
75 | { \& #2R#1#3}{, #2R#1#3}{ \&~#2R#1#3}
76 | \Crefmultiformat{requirement}{#2#1#3}%
77 | { \& #2R#1#3}{, #2R#1#3}{ \&~#2R#1#3}
78 |
79 |
80 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
81 | %% Inserting the metadata
82 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
83 | \input{base/metadata}
84 |
85 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
86 | %% Creating the frontpage
87 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
88 | \AtBeginDocument{
89 | \maketitle
90 | \thispagestyle{empty}
91 | }
92 |
93 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
94 | %% Creation of the header
95 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
96 | \patchcmd{\chapter}{plain}{short}{}{} %$ <-- the header on chapter 1
97 |
98 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
99 | %% Creation of page-styles
100 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
101 | % \newpagestyle{long}{%
102 | % \sethead[\thepage][][\chaptername\ \thechapter:\ \chaptertitle]{\chaptername\ \thechapter:\ \chaptertitle}{}{\thepage}
103 | % \headrule
104 | % }
105 |
106 | % \newpagestyle{short}{%
107 | % \sethead[\thepage][][]{}{}{\thepage}
108 | % \headrule
109 | % }
110 |
111 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
112 | %% Git stuff
113 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
114 |
115 | \usepackage{fancyhdr}
116 |
117 | % Get current git version
118 | % Note: this command requires a) git installed and b) shell escape in latex enabled
119 | % \immediate\write18{git describe --always --tags --dirty --long --abbrev=6 > \jobname_desc.nogit.txt }
120 | \immediate\write18{ git describe --match nopenopenope --always --long --dirty --abbrev=6 > \jobname_desc.nogit.txt }
121 |
122 | \IfFileExists{\jobname_desc.nogit.txt}{
123 | \usepackage{catchfile}
124 | \CatchFileDef{\gitVer}{\jobname_desc.nogit.txt}{}
125 | }{
126 | \newcommand\gitVer{unknown}
127 | }
128 |
129 | % Add the git version and the date/time to the header
130 | % \newcommand{\versionBox}{\fbox{Manuscript version: \textit{\#\gitVer} - \today{} \currenttime{} }}
131 |
132 | % \renewcommand{\sectionmark}[1]{\markright{\thesection.\ #1}}
133 |
134 | \fancypagestyle{short}{
135 | \fancyhf{}
136 | % \lfoot{\versionBox{}}
137 | \rfoot{\thepage}
138 | \lhead{\scshape\rightmark}
139 | \rhead{\textit{\#\gitVer}}
140 | }
141 |
142 | \fancypagestyle{long}{
143 | \fancyhf{}
144 | % \lfoot{\versionBox{}}
145 | \rfoot{\thepage}
146 | \lhead{\scshape\rightmark}
147 | \rhead{\textit{\#\gitVer}}
148 | }
149 |
150 | % Put it into the metadata too
151 | \hypersetup{
152 | pdfsubject={Version \#\gitVer{}}
153 | }
--------------------------------------------------------------------------------
/quality_documentation/sphinxcyrillic.sty:
--------------------------------------------------------------------------------
1 | %% CYRILLIC IN NON-CYRILLIC DOCUMENTS (pdflatex only)
2 | %
3 | % refs: https://tex.stackexchange.com/q/460271/
4 | \ProvidesPackage{sphinxcyrillic}%
5 | [2018/11/21 v2.0 support for Cyrillic in non-Cyrillic documents]
6 | \RequirePackage{kvoptions}
7 | \SetupKeyvalOptions{prefix=spx@cyropt@} % use \spx@cyropt@ prefix
8 | \DeclareBoolOption[false]{Xtwo}
9 | \DeclareBoolOption[false]{TtwoA}
10 | \DeclareDefaultOption{\@unknownoptionerror}
11 | \ProcessLocalKeyvalOptions* % ignore class options
12 |
13 | \ifspx@cyropt@Xtwo
14 | % original code by tex.sx user egreg (updated 2019/10/28):
15 | % https://tex.stackexchange.com/a/460325/
16 | % 159 Cyrillic glyphs as available in X2 TeX 8bit font encoding
17 | % This assumes inputenc loaded with utf8 option, or LaTeX release
18 | % as recent as 2018/04/01 which does it automatically.
19 | \@tfor\next:=%
20 | {Ё}{Ђ}{Є}{Ѕ}{І}{Ј}{Љ}{Њ}{Ћ}{Ў}{Џ}{А}{Б}{В}{Г}{Д}{Е}{Ж}{З}{И}{Й}%
21 | {К}{Л}{М}{Н}{О}{П}{Р}{С}{Т}{У}{Ф}{Х}{Ц}{Ч}{Ш}{Щ}{Ъ}{Ы}{Ь}{Э}{Ю}%
22 | {Я}{а}{б}{в}{г}{д}{е}{ж}{з}{и}{й}{к}{л}{м}{н}{о}{п}{р}{с}{т}{у}%
23 | {ф}{х}{ц}{ч}{ш}{щ}{ъ}{ы}{ь}{э}{ю}{я}{ё}{ђ}{є}{ѕ}{і}{ј}{љ}{њ}{ћ}%
24 | {ў}{џ}{Ѣ}{ѣ}{Ѫ}{ѫ}{Ѵ}{ѵ}{Ґ}{ґ}{Ғ}{ғ}{Ҕ}{ҕ}{Җ}{җ}{Ҙ}{ҙ}{Қ}{қ}{Ҝ}{ҝ}%
25 | {Ҟ}{ҟ}{Ҡ}{ҡ}{Ң}{ң}{Ҥ}{ҥ}{Ҧ}{ҧ}{Ҩ}{ҩ}{Ҫ}{ҫ}{Ҭ}{ҭ}{Ү}{ү}{Ұ}{ұ}{Ҳ}{ҳ}%
26 | {Ҵ}{ҵ}{Ҷ}{ҷ}{Ҹ}{ҹ}{Һ}{һ}{Ҽ}{ҽ}{Ҿ}{ҿ}{Ӏ}{Ӄ}{ӄ}{Ӆ}{ӆ}{Ӈ}{ӈ}{Ӌ}{ӌ}%
27 | {Ӎ}{ӎ}{Ӕ}{ӕ}{Ә}{ә}{Ӡ}{ӡ}{Ө}{ө}\do
28 | {%
29 | \begingroup\def\IeC{\protect\DeclareTextSymbolDefault}%
30 | \protected@edef\@temp{\endgroup
31 | \@ifl@t@r{\fmtversion}{2019/10/01}{\csname u8:\next\endcsname}{\next}}%
32 | \@temp{X2}%
33 | }%
34 | \else
35 | \ifspx@cyropt@TtwoA
36 | % original code by tex.sx user jfbu:
37 | % https://tex.stackexchange.com/a/460305/
38 | % 63*2+1=127 Cyrillic glyphs as found in T2A 8bit TeX font-encoding
39 | \@tfor\@tempa:=%
40 | {ae}{a}{b}{chrdsc}{chvcrs}{ch}{c}{dje}{dze}{dzhe}{d}{erev}{ery}{e}%
41 | {f}{ghcrs}{gup}{g}{hdsc}{hrdsn}{h}{ie}{ii}{ishrt}{i}{je}%
42 | {kbeak}{kdsc}{kvcrs}{k}{lje}{l}{m}{ndsc}{ng}{nje}{n}{otld}{o}{p}{r}%
43 | {schwa}{sdsc}{sftsn}{shch}{shha}{sh}{s}{tshe}{t}{ushrt}{u}{v}%
44 | {ya}{yhcrs}{yi}{yo}{yu}{y}{zdsc}{zhdsc}{zh}{z}\do
45 | {%
46 | \expandafter\DeclareTextSymbolDefault\expandafter
47 | {\csname cyr\@tempa\endcsname}{T2A}%
48 | \expandafter\uppercase\expandafter{\expandafter
49 | \def\expandafter\@tempa\expandafter{\@tempa}}%
50 | \expandafter\DeclareTextSymbolDefault\expandafter
51 | {\csname CYR\@tempa\endcsname}{T2A}%
52 | }%
53 | \DeclareTextSymbolDefault{\CYRpalochka}{T2A}%
54 | \fi\fi
55 | \endinput
56 |
--------------------------------------------------------------------------------
/quality_documentation/sphinxhighlight.sty:
--------------------------------------------------------------------------------
1 | \NeedsTeXFormat{LaTeX2e}[1995/12/01]
2 | \ProvidesPackage{sphinxhighlight}[2016/05/29 stylesheet for highlighting with pygments]
3 |
4 |
5 | \makeatletter
6 | \def\PYG@reset{\let\PYG@it=\relax \let\PYG@bf=\relax%
7 | \let\PYG@ul=\relax \let\PYG@tc=\relax%
8 | \let\PYG@bc=\relax \let\PYG@ff=\relax}
9 | \def\PYG@tok#1{\csname PYG@tok@#1\endcsname}
10 | \def\PYG@toks#1+{\ifx\relax#1\empty\else%
11 | \PYG@tok{#1}\expandafter\PYG@toks\fi}
12 | \def\PYG@do#1{\PYG@bc{\PYG@tc{\PYG@ul{%
13 | \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}}
14 | \def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}}
15 |
16 | \expandafter\def\csname PYG@tok@w\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
17 | \expandafter\def\csname PYG@tok@c\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}}
18 | \expandafter\def\csname PYG@tok@cp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
19 | \expandafter\def\csname PYG@tok@cs\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}
20 | \expandafter\def\csname PYG@tok@k\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
21 | \expandafter\def\csname PYG@tok@kp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
22 | \expandafter\def\csname PYG@tok@kt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.56,0.13,0.00}{##1}}}
23 | \expandafter\def\csname PYG@tok@o\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
24 | \expandafter\def\csname PYG@tok@ow\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
25 | \expandafter\def\csname PYG@tok@nb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
26 | \expandafter\def\csname PYG@tok@nf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.49}{##1}}}
27 | \expandafter\def\csname PYG@tok@nc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}}
28 | \expandafter\def\csname PYG@tok@nn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}}
29 | \expandafter\def\csname PYG@tok@ne\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
30 | \expandafter\def\csname PYG@tok@nv\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}}
31 | \expandafter\def\csname PYG@tok@no\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.38,0.68,0.84}{##1}}}
32 | \expandafter\def\csname PYG@tok@nl\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.13,0.44}{##1}}}
33 | \expandafter\def\csname PYG@tok@ni\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.84,0.33,0.22}{##1}}}
34 | \expandafter\def\csname PYG@tok@na\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
35 | \expandafter\def\csname PYG@tok@nt\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.45}{##1}}}
36 | \expandafter\def\csname PYG@tok@nd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}}
37 | \expandafter\def\csname PYG@tok@s\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
38 | \expandafter\def\csname PYG@tok@sd\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
39 | \expandafter\def\csname PYG@tok@si\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.44,0.63,0.82}{##1}}}
40 | \expandafter\def\csname PYG@tok@se\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
41 | \expandafter\def\csname PYG@tok@sr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.14,0.33,0.53}{##1}}}
42 | \expandafter\def\csname PYG@tok@ss\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.32,0.47,0.09}{##1}}}
43 | \expandafter\def\csname PYG@tok@sx\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}}
44 | \expandafter\def\csname PYG@tok@m\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
45 | \expandafter\def\csname PYG@tok@gh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
46 | \expandafter\def\csname PYG@tok@gu\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
47 | \expandafter\def\csname PYG@tok@gd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
48 | \expandafter\def\csname PYG@tok@gi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
49 | \expandafter\def\csname PYG@tok@gr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
50 | \expandafter\def\csname PYG@tok@ge\endcsname{\let\PYG@it=\textit}
51 | \expandafter\def\csname PYG@tok@gs\endcsname{\let\PYG@bf=\textbf}
52 | \expandafter\def\csname PYG@tok@gp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}}
53 | \expandafter\def\csname PYG@tok@go\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.20}{##1}}}
54 | \expandafter\def\csname PYG@tok@gt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
55 | \expandafter\def\csname PYG@tok@err\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
56 | \expandafter\def\csname PYG@tok@kc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
57 | \expandafter\def\csname PYG@tok@kd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
58 | \expandafter\def\csname PYG@tok@kn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
59 | \expandafter\def\csname PYG@tok@kr\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
60 | \expandafter\def\csname PYG@tok@bp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}}
61 | \expandafter\def\csname PYG@tok@fm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.49}{##1}}}
62 | \expandafter\def\csname PYG@tok@vc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}}
63 | \expandafter\def\csname PYG@tok@vg\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}}
64 | \expandafter\def\csname PYG@tok@vi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}}
65 | \expandafter\def\csname PYG@tok@vm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}}
66 | \expandafter\def\csname PYG@tok@sa\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
67 | \expandafter\def\csname PYG@tok@sb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
68 | \expandafter\def\csname PYG@tok@sc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
69 | \expandafter\def\csname PYG@tok@dl\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
70 | \expandafter\def\csname PYG@tok@s2\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
71 | \expandafter\def\csname PYG@tok@sh\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
72 | \expandafter\def\csname PYG@tok@s1\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}}
73 | \expandafter\def\csname PYG@tok@mb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
74 | \expandafter\def\csname PYG@tok@mf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
75 | \expandafter\def\csname PYG@tok@mh\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
76 | \expandafter\def\csname PYG@tok@mi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
77 | \expandafter\def\csname PYG@tok@il\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
78 | \expandafter\def\csname PYG@tok@mo\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}}
79 | \expandafter\def\csname PYG@tok@ch\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}}
80 | \expandafter\def\csname PYG@tok@cm\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}}
81 | \expandafter\def\csname PYG@tok@cpf\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}}
82 | \expandafter\def\csname PYG@tok@c1\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}}
83 |
84 | \def\PYGZbs{\char`\\}
85 | \def\PYGZus{\char`\_}
86 | \def\PYGZob{\char`\{}
87 | \def\PYGZcb{\char`\}}
88 | \def\PYGZca{\char`\^}
89 | \def\PYGZam{\char`\&}
90 | \def\PYGZlt{\char`\<}
91 | \def\PYGZgt{\char`\>}
92 | \def\PYGZsh{\char`\#}
93 | \def\PYGZpc{\char`\%}
94 | \def\PYGZdl{\char`\$}
95 | \def\PYGZhy{\char`\-}
96 | \def\PYGZsq{\char`\'}
97 | \def\PYGZdq{\char`\"}
98 | \def\PYGZti{\char`\~}
99 | % for compatibility with earlier versions
100 | \def\PYGZat{@}
101 | \def\PYGZlb{[}
102 | \def\PYGZrb{]}
103 | \makeatother
104 |
105 | \renewcommand\PYGZsq{\textquotesingle}
106 |
--------------------------------------------------------------------------------
/quality_documentation/sphinxhowto.cls:
--------------------------------------------------------------------------------
1 | %
2 | % sphinxhowto.cls for Sphinx (http://sphinx-doc.org/)
3 | %
4 |
5 | \NeedsTeXFormat{LaTeX2e}[1995/12/01]
6 | \ProvidesClass{sphinxhowto}[2019/12/01 v2.3.0 Document class (Sphinx howto)]
7 |
8 | % 'oneside' option overriding the 'twoside' default
9 | \newif\if@oneside
10 | \DeclareOption{oneside}{\@onesidetrue}
11 | % Pass remaining document options to the parent class.
12 | \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}}
13 | \ProcessOptions\relax
14 |
15 | % Default to two-side document
16 | \if@oneside
17 | % nothing to do (oneside is the default)
18 | \else
19 | \PassOptionsToClass{twoside}{\sphinxdocclass}
20 | \fi
21 |
22 | \LoadClass{\sphinxdocclass}
23 |
24 | % Set some sane defaults for section numbering depth and TOC depth. You can
25 | % reset these counters in your preamble.
26 | %
27 | \setcounter{secnumdepth}{2}
28 | \setcounter{tocdepth}{2}% i.e. section and subsection
29 |
30 | % Adapt \and command to the flushright context of \sphinxmaketitle, to
31 | % avoid ragged line endings if author names do not fit all on one single line
32 | \DeclareRobustCommand{\and}{%
33 | \end{tabular}\kern-\tabcolsep
34 | \allowbreak
35 | \hskip\dimexpr1em+\tabcolsep\@plus.17fil\begin{tabular}[t]{c}%
36 | }%
37 | % If it is desired that each author name be on its own line, use in preamble:
38 | %\DeclareRobustCommand{\and}{%
39 | % \end{tabular}\kern-\tabcolsep\\\begin{tabular}[t]{c}%
40 | %}%
41 | % Change the title page to look a bit better, and fit in with the fncychap
42 | % ``Bjarne'' style a bit better.
43 | %
44 | \newcommand{\sphinxmaketitle}{%
45 | \noindent\rule{\textwidth}{1pt}\par
46 | \begingroup % for PDF information dictionary
47 | \def\endgraf{ }\def\and{\& }%
48 | \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup
49 | \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}%
50 | \endgroup
51 | \begin{flushright}
52 | \sphinxlogo
53 | \py@HeaderFamily
54 | {\Huge \@title }\par
55 | {\itshape\large \py@release \releaseinfo}\par
56 | \vspace{25pt}
57 | {\Large
58 | \begin{tabular}[t]{c}
59 | \@author
60 | \end{tabular}\kern-\tabcolsep}\par
61 | \vspace{25pt}
62 | \@date \par
63 | \py@authoraddress \par
64 | \end{flushright}
65 | \@thanks
66 | \setcounter{footnote}{0}
67 | \let\thanks\relax\let\maketitle\relax
68 | %\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
69 | }
70 |
71 | \newcommand{\sphinxtableofcontents}{%
72 | \begingroup
73 | \parskip \z@skip
74 | \sphinxtableofcontentshook
75 | \tableofcontents
76 | \endgroup
77 | \noindent\rule{\textwidth}{1pt}\par
78 | \vspace{12pt}%
79 | }
80 | \newcommand\sphinxtableofcontentshook{}
81 | \pagenumbering{arabic}
82 |
83 | % Fix the bibliography environment to add an entry to the Table of
84 | % Contents.
85 | % For an article document class this environment is a section,
86 | % so no page break before it.
87 | %
88 | \newenvironment{sphinxthebibliography}[1]{%
89 | % \phantomsection % not needed here since TeXLive 2010's hyperref
90 | \begin{thebibliography}{#1}%
91 | \addcontentsline{toc}{section}{\ifdefined\refname\refname\else\ifdefined\bibname\bibname\fi\fi}}{\end{thebibliography}}
92 |
93 |
94 | % Same for the indices.
95 | % The memoir class already does this, so we don't duplicate it in that case.
96 | %
97 | \@ifclassloaded{memoir}
98 | {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}
99 | {\newenvironment{sphinxtheindex}{%
100 | \phantomsection % needed because no chapter, section, ... is created by theindex
101 | \begin{theindex}%
102 | \addcontentsline{toc}{section}{\indexname}}{\end{theindex}}}
103 |
--------------------------------------------------------------------------------
/quality_documentation/sphinxmanual.cls:
--------------------------------------------------------------------------------
1 | %
2 | % sphinxmanual.cls for Sphinx (http://sphinx-doc.org/)
3 | %
4 |
5 | \NeedsTeXFormat{LaTeX2e}[1995/12/01]
6 | \ProvidesClass{sphinxmanual}[2019/12/01 v2.3.0 Document class (Sphinx manual)]
7 |
8 | % chapters starting at odd pages (overridden by 'openany' document option)
9 | \PassOptionsToClass{openright}{\sphinxdocclass}
10 |
11 | % 'oneside' option overriding the 'twoside' default
12 | \newif\if@oneside
13 | \DeclareOption{oneside}{\@onesidetrue}
14 | % Pass remaining document options to the parent class.
15 | \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}}
16 | \ProcessOptions\relax
17 |
18 | % Defaults two-side document
19 | \if@oneside
20 | % nothing to do (oneside is the default)
21 | \else
22 | \PassOptionsToClass{twoside}{\sphinxdocclass}
23 | \fi
24 |
25 | \LoadClass{\sphinxdocclass}
26 |
27 | % Set some sane defaults for section numbering depth and TOC depth. You can
28 | % reset these counters in your preamble.
29 | %
30 | \setcounter{secnumdepth}{2}
31 | \setcounter{tocdepth}{1}
32 |
33 | % Adapt \and command to the flushright context of \sphinxmaketitle, to
34 | % avoid ragged line endings if author names do not fit all on one single line
35 | \DeclareRobustCommand{\and}{%
36 | \end{tabular}\kern-\tabcolsep
37 | \allowbreak
38 | \hskip\dimexpr1em+\tabcolsep\@plus.17fil\begin{tabular}[t]{c}%
39 | }%
40 | % If it is desired that each author name be on its own line, use in preamble:
41 | %\DeclareRobustCommand{\and}{%
42 | % \end{tabular}\kern-\tabcolsep\\\begin{tabular}[t]{c}%
43 | %}%
44 | % Change the title page to look a bit better, and fit in with the fncychap
45 | % ``Bjarne'' style a bit better.
46 | %
47 | \newcommand{\sphinxmaketitle}{%
48 | \let\sphinxrestorepageanchorsetting\relax
49 | \ifHy@pageanchor\def\sphinxrestorepageanchorsetting{\Hy@pageanchortrue}\fi
50 | \hypersetup{pageanchor=false}% avoid duplicate destination warnings
51 | \begin{titlepage}%
52 | \let\footnotesize\small
53 | \let\footnoterule\relax
54 | \noindent\rule{\textwidth}{1pt}\par
55 | \begingroup % for PDF information dictionary
56 | \def\endgraf{ }\def\and{\& }%
57 | \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup
58 | \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}%
59 | \endgroup
60 | \begin{flushright}%
61 | \sphinxlogo
62 | \py@HeaderFamily
63 | {\Huge \@title \par}
64 | {\itshape\LARGE \py@release\releaseinfo \par}
65 | \vfill
66 | {\LARGE
67 | \begin{tabular}[t]{c}
68 | \@author
69 | \end{tabular}\kern-\tabcolsep
70 | \par}
71 | \vfill\vfill
72 | {\large
73 | \@date \par
74 | \vfill
75 | \py@authoraddress \par
76 | }%
77 | \end{flushright}%\par
78 | \@thanks
79 | \end{titlepage}%
80 | \setcounter{footnote}{0}%
81 | \let\thanks\relax\let\maketitle\relax
82 | %\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
83 | \clearpage
84 | \ifdefined\sphinxbackoftitlepage\sphinxbackoftitlepage\fi
85 | \if@openright\cleardoublepage\else\clearpage\fi
86 | \sphinxrestorepageanchorsetting
87 | }
88 |
89 | \newcommand{\sphinxtableofcontents}{%
90 | \pagenumbering{roman}%
91 | \begingroup
92 | \parskip \z@skip
93 | \sphinxtableofcontentshook
94 | \tableofcontents
95 | \endgroup
96 | % before resetting page counter, let's do the right thing.
97 | \if@openright\cleardoublepage\else\clearpage\fi
98 | \pagenumbering{arabic}%
99 | }
100 |
101 | % This is needed to get the width of the section # area wide enough in the
102 | % library reference. Doing it here keeps it the same for all the manuals.
103 | %
104 | \newcommand{\sphinxtableofcontentshook}{%
105 | \renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}}%
106 | \renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}}%
107 | }
108 |
109 | % Fix the bibliography environment to add an entry to the Table of
110 | % Contents.
111 | % For a report document class this environment is a chapter.
112 | %
113 | \newenvironment{sphinxthebibliography}[1]{%
114 | \if@openright\cleardoublepage\else\clearpage\fi
115 | % \phantomsection % not needed here since TeXLive 2010's hyperref
116 | \begin{thebibliography}{#1}%
117 | \addcontentsline{toc}{chapter}{\bibname}}{\end{thebibliography}}
118 |
119 | % Same for the indices.
120 | % The memoir class already does this, so we don't duplicate it in that case.
121 | %
122 | \@ifclassloaded{memoir}
123 | {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}
124 | {\newenvironment{sphinxtheindex}{%
125 | \if@openright\cleardoublepage\else\clearpage\fi
126 | \phantomsection % needed as no chapter, section, ... created
127 | \begin{theindex}%
128 | \addcontentsline{toc}{chapter}{\indexname}}{\end{theindex}}}
129 |
--------------------------------------------------------------------------------
/quality_documentation/sphinxmessages.sty:
--------------------------------------------------------------------------------
1 | %
2 | % sphinxmessages.sty
3 | %
4 | % message resources for Sphinx
5 | %
6 | \ProvidesPackage{sphinxmessages}[2019/01/04 v2.0 Localized LaTeX macros (Sphinx team)]
7 |
8 | \renewcommand{\literalblockcontinuedname}{continued from previous page}
9 | \renewcommand{\literalblockcontinuesname}{continues on next page}
10 | \renewcommand{\sphinxnonalphabeticalgroupname}{Non\sphinxhyphen{}alphabetical}
11 | \renewcommand{\sphinxsymbolsname}{Symbols}
12 | \renewcommand{\sphinxnumbersname}{Numbers}
13 | \def\pageautorefname{page}
14 |
15 | \addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }}
16 | \def\fnum@figure{\figurename\thefigure{}}
17 |
18 | \addto\captionsenglish{\renewcommand{\tablename}{Table }}
19 | \def\fnum@table{\tablename\thetable{}}
20 |
21 | \addto\captionsenglish{\renewcommand{\literalblockname}{Listing}}
--------------------------------------------------------------------------------
/quality_documentation/sphinxmulticell.sty:
--------------------------------------------------------------------------------
1 | \NeedsTeXFormat{LaTeX2e}
2 | \ProvidesPackage{sphinxmulticell}%
3 | [2017/02/23 v1.6 better span rows and columns of a table (Sphinx team)]%
4 | \DeclareOption*{\PackageWarning{sphinxmulticell}{Option `\CurrentOption' is unknown}}%
5 | \ProcessOptions\relax
6 | %
7 | % --- MULTICOLUMN ---
8 | % standard LaTeX's \multicolumn
9 | % 1. does not allow verbatim contents,
10 | % 2. interacts very poorly with tabulary.
11 | %
12 | % It is needed to write own macros for Sphinx: to allow code-blocks in merged
13 | % cells rendered by tabular/longtable, and to allow multi-column cells with
14 | % paragraphs to be taken into account sanely by tabulary algorithm for column
15 | % widths.
16 | %
17 | % This requires quite a bit of hacking. First, in Sphinx, the multi-column
18 | % contents will *always* be wrapped in a varwidth environment. The issue
19 | % becomes to pass it the correct target width. We must trick tabulary into
20 | % believing the multicolumn is simply separate columns, else tabulary does not
21 | % incorporate the contents in its algorithm. But then we must clear the
22 | % vertical rules...
23 | %
24 | % configuration of tabulary
25 | \setlength{\tymin}{3\fontcharwd\font`0 }% minimal width of "squeezed" columns
26 | \setlength{\tymax}{10000pt}% allow enough room for paragraphs to "compete"
27 | % we need access to tabulary's final computed width. \@tempdima is too volatile
28 | % to hope it has kept tabulary's value when \sphinxcolwidth needs it.
29 | \newdimen\sphinx@TY@tablewidth
30 | \def\tabulary{%
31 | \def\TY@final{\sphinx@TY@tablewidth\@tempdima\tabular}%
32 | \let\endTY@final\endtabular
33 | \TY@tabular}%
34 | % next hack is needed only if user has set latex_use_latex_multicolumn to True:
35 | % it fixes tabulary's bug with \multicolumn defined "short" in first pass. (if
36 | % upstream tabulary adds a \long, our extra one causes no harm)
37 | \def\sphinx@tempa #1\def\multicolumn#2#3#4#5#6#7#8#9\sphinx@tempa
38 | {\def\TY@tab{#1\long\def\multicolumn####1####2####3{\multispan####1\relax}#9}}%
39 | \expandafter\sphinx@tempa\TY@tab\sphinx@tempa
40 | %
41 | % TN. 1: as \omit is never executed, Sphinx multicolumn does not need to worry
42 | % like standard multicolumn about |l| vs l|. On the other hand it assumes
43 | % columns are separated by a | ... (if not it will add extraneous
44 | % \arrayrulewidth space for each column separation in its estimate of available
45 | % width).
46 | %
47 | % TN. 1b: as Sphinx multicolumn uses neither \omit nor \span, it can not
48 | % (easily) get rid of extra macros from >{...} or <{...} between columns. At
49 | % least, it has been made compatible with colortbl's \columncolor.
50 | %
51 | % TN. 2: tabulary's second pass is handled like tabular/longtable's single
52 | % pass, with the difference that we hacked \TY@final to set in
53 | % \sphinx@TY@tablewidth the final target width as computed by tabulary. This is
54 | % needed only to handle columns with a "horizontal" specifier: "p" type columns
55 | % (inclusive of tabulary's LJRC) holds the target column width in the
56 | % \linewidth dimension.
57 | %
58 | % TN. 3: use of \begin{sphinxmulticolumn}...\end{sphinxmulticolumn} mark-up
59 | % would need some hacking around the fact that groups can not span across table
60 | % cells (the code does inserts & tokens, see TN1b). It was decided to keep it
61 | % simple with \sphinxstartmulticolumn...\sphinxstopmulticolumn.
62 | %
63 | % MEMO about nesting: if sphinxmulticolumn is encountered in a nested tabular
64 | % inside a tabulary it will think to be at top level in the tabulary. But
65 | % Sphinx generates no nested tables, and if some LaTeX macro uses internally a
66 | % tabular this will not have a \sphinxstartmulticolumn within it!
67 | %
68 | \def\sphinxstartmulticolumn{%
69 | \ifx\equation$% $ tabulary's first pass
70 | \expandafter\sphinx@TYI@start@multicolumn
71 | \else % either not tabulary or tabulary's second pass
72 | \expandafter\sphinx@start@multicolumn
73 | \fi
74 | }%
75 | \def\sphinxstopmulticolumn{%
76 | \ifx\equation$% $ tabulary's first pass
77 | \expandafter\sphinx@TYI@stop@multicolumn
78 | \else % either not tabulary or tabulary's second pass
79 | \ignorespaces
80 | \fi
81 | }%
82 | \def\sphinx@TYI@start@multicolumn#1{%
83 | % use \gdef always to avoid stack space build up
84 | \gdef\sphinx@tempa{#1}\begingroup\setbox\z@\hbox\bgroup
85 | }%
86 | \def\sphinx@TYI@stop@multicolumn{\egroup % varwidth was used with \tymax
87 | \xdef\sphinx@tempb{\the\dimexpr\wd\z@/\sphinx@tempa}% per column width
88 | \endgroup
89 | \expandafter\sphinx@TYI@multispan\expandafter{\sphinx@tempa}%
90 | }%
91 | \def\sphinx@TYI@multispan #1{%
92 | \kern\sphinx@tempb\ignorespaces % the per column occupied width
93 | \ifnum#1>\@ne % repeat, taking into account subtleties of TeX's & ...
94 | \expandafter\sphinx@TYI@multispan@next\expandafter{\the\numexpr#1-\@ne\expandafter}%
95 | \fi
96 | }%
97 | \def\sphinx@TYI@multispan@next{&\relax\sphinx@TYI@multispan}%
98 | %
99 | % Now the branch handling either the second pass of tabulary or the single pass
100 | % of tabular/longtable. This is the delicate part where we gather the
101 | % dimensions from the p columns either set-up by tabulary or by user p column
102 | % or Sphinx \X, \Y columns. The difficulty is that to get the said width, the
103 | % template must be inserted (other hacks would be horribly complicated except
104 | % if we rewrote crucial parts of LaTeX's \@array !) and we can not do
105 | % \omit\span like standard \multicolumn's easy approach. Thus we must cancel
106 | % the \vrule separators. Also, perhaps the column specifier is of the l, c, r
107 | % type, then we attempt an ad hoc rescue to give varwidth a reasonable target
108 | % width.
109 | \def\sphinx@start@multicolumn#1{%
110 | \gdef\sphinx@multiwidth{0pt}\gdef\sphinx@tempa{#1}\sphinx@multispan{#1}%
111 | }%
112 | \def\sphinx@multispan #1{%
113 | \ifnum#1=\@ne\expandafter\sphinx@multispan@end
114 | \else\expandafter\sphinx@multispan@next
115 | \fi {#1}%
116 | }%
117 | \def\sphinx@multispan@next #1{%
118 | % trick to recognize L, C, R, J or p, m, b type columns
119 | \ifdim\baselineskip>\z@
120 | \gdef\sphinx@tempb{\linewidth}%
121 | \else
122 | % if in an l, r, c type column, try and hope for the best
123 | \xdef\sphinx@tempb{\the\dimexpr(\ifx\TY@final\@undefined\linewidth\else
124 | \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa
125 | -\tw@\tabcolsep-\arrayrulewidth\relax}%
126 | \fi
127 | \noindent\kern\sphinx@tempb\relax
128 | \xdef\sphinx@multiwidth
129 | {\the\dimexpr\sphinx@multiwidth+\sphinx@tempb+\tw@\tabcolsep+\arrayrulewidth}%
130 | % hack the \vline and the colortbl macros
131 | \sphinx@hack@vline\sphinx@hack@CT&\relax
132 | % repeat
133 | \expandafter\sphinx@multispan\expandafter{\the\numexpr#1-\@ne}%
134 | }%
135 | % packages like colortbl add group levels, we need to "climb back up" to be
136 | % able to hack the \vline and also the colortbl inserted tokens. This creates
137 | % empty space whether or not the columns were | separated:
138 | \def\sphinx@hack@vline{\ifnum\currentgrouptype=6\relax
139 | \kern\arrayrulewidth\arrayrulewidth\z@\else\aftergroup\sphinx@hack@vline\fi}%
140 | \def\sphinx@hack@CT{\ifnum\currentgrouptype=6\relax
141 | \let\CT@setup\sphinx@CT@setup\else\aftergroup\sphinx@hack@CT\fi}%
142 | % It turns out \CT@row@color is not expanded contrarily to \CT@column@color
143 | % during LaTeX+colortbl preamble preparation, hence it would be possible for
144 | % \sphinx@CT@setup to discard only the column color and choose to obey or not
145 | % row color and cell color. It would even be possible to propagate cell color
146 | % to row color for the duration of the Sphinx multicolumn... the (provisional?)
147 | % choice has been made to cancel the colortbl colours for the multicolumn
148 | % duration.
149 | \def\sphinx@CT@setup #1\endgroup{\endgroup}% hack to remove colour commands
150 | \def\sphinx@multispan@end#1{%
151 | % first, trace back our steps horizontally
152 | \noindent\kern-\dimexpr\sphinx@multiwidth\relax
153 | % and now we set the final computed width for the varwidth environment
154 | \ifdim\baselineskip>\z@
155 | \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+\linewidth}%
156 | \else
157 | \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+
158 | (\ifx\TY@final\@undefined\linewidth\else
159 | \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa
160 | -\tw@\tabcolsep-\arrayrulewidth\relax}%
161 | \fi
162 | % we need to remove colour set-up also for last cell of the multi-column
163 | \aftergroup\sphinx@hack@CT
164 | }%
165 | \newcommand*\sphinxcolwidth[2]{%
166 | % this dimension will always be used for varwidth, and serves as maximum
167 | % width when cells are merged either via multirow or multicolumn or both,
168 | % as always their contents is wrapped in varwidth environment.
169 | \ifnum#1>\@ne % multi-column (and possibly also multi-row)
170 | % we wrote our own multicolumn code especially to handle that (and allow
171 | % verbatim contents)
172 | \ifx\equation$%$
173 | \tymax % first pass of tabulary (cf MEMO above regarding nesting)
174 | \else % the \@gobble thing is for compatibility with standard \multicolumn
175 | \sphinx@multiwidth\@gobble{#1/#2}%
176 | \fi
177 | \else % single column multirow
178 | \ifx\TY@final\@undefined % not a tabulary.
179 | \ifdim\baselineskip>\z@
180 | % in a p{..} type column, \linewidth is the target box width
181 | \linewidth
182 | \else
183 | % l, c, r columns. Do our best.
184 | \dimexpr(\linewidth-\arrayrulewidth)/#2-
185 | \tw@\tabcolsep-\arrayrulewidth\relax
186 | \fi
187 | \else % in tabulary
188 | \ifx\equation$%$% first pass
189 | \tymax % it is set to a big value so that paragraphs can express themselves
190 | \else
191 | % second pass.
192 | \ifdim\baselineskip>\z@
193 | \linewidth % in a L, R, C, J column or a p, \X, \Y ...
194 | \else
195 | % we have hacked \TY@final to put in \sphinx@TY@tablewidth the table width
196 | \dimexpr(\sphinx@TY@tablewidth-\arrayrulewidth)/#2-
197 | \tw@\tabcolsep-\arrayrulewidth\relax
198 | \fi
199 | \fi
200 | \fi
201 | \fi
202 | }%
203 | % fallback default in case user has set latex_use_latex_multicolumn to True:
204 | % \sphinxcolwidth will use this only inside LaTeX's standard \multicolumn
205 | \def\sphinx@multiwidth #1#2{\dimexpr % #1 to gobble the \@gobble (!)
206 | (\ifx\TY@final\@undefined\linewidth\else\sphinx@TY@tablewidth\fi
207 | -\arrayrulewidth)*#2-\tw@\tabcolsep-\arrayrulewidth\relax}%
208 | %
209 | % --- MULTIROW ---
210 | % standard \multirow
211 | % 1. does not allow verbatim contents,
212 | % 2. does not allow blank lines in its argument,
213 | % 3. its * specifier means to typeset "horizontally" which is very
214 | % bad for paragraph content. 2016 version has = specifier but it
215 | % must be used with p type columns only, else results are bad,
216 | % 4. it requires manual intervention if the contents is too long to fit
217 | % in the asked-for number of rows.
218 | % 5. colour panels (either from \rowcolor or \columncolor) will hide
219 | % the bottom part of multirow text, hence manual tuning is needed
220 | % to put the multirow insertion at the _bottom_.
221 | %
222 | % The Sphinx solution consists in always having contents wrapped
223 | % in a varwidth environment so that it makes sense to estimate how many
224 | % lines it will occupy, and then ensure by insertion of suitable struts
225 | % that the table rows have the needed height. The needed mark-up is done
226 | % by LaTeX writer, which has its own id for the merged cells.
227 | %
228 | % The colour issue is solved by clearing colour panels in all cells,
229 | % whether or not the multirow is single-column or multi-column.
230 | %
231 | % In passing we obtain baseline alignements across rows (only if
232 | % \arraystretch is 1, as LaTeX's does not obey \arraystretch in "p"
233 | % multi-line contents, only first and last line...)
234 | %
235 | % TODO: examine the situation with \arraystretch > 1. The \extrarowheight
236 | % is hopeless for multirow anyhow, it makes baseline alignment strictly
237 | % impossible.
238 | \newcommand\sphinxmultirow[2]{\begingroup
239 | % #1 = nb of spanned rows, #2 = Sphinx id of "cell", #3 = contents
240 | % but let's fetch #3 in a way allowing verbatim contents !
241 | \def\sphinx@nbofrows{#1}\def\sphinx@cellid{#2}%
242 | \afterassignment\sphinx@multirow\let\next=
243 | }%
244 | \def\sphinx@multirow {%
245 | \setbox\z@\hbox\bgroup\aftergroup\sphinx@@multirow\strut
246 | }%
247 | \def\sphinx@@multirow {%
248 | % The contents, which is a varwidth environment, has been captured in
249 | % \box0 (a \hbox).
250 | % We have with \sphinx@cellid an assigned unique id. The goal is to give
251 | % about the same height to all the involved rows.
252 | % For this Sphinx will insert a \sphinxtablestrut{cell_id} mark-up
253 | % in LaTeX file and the expansion of the latter will do the suitable thing.
254 | \dimen@\dp\z@
255 | \dimen\tw@\ht\@arstrutbox
256 | \advance\dimen@\dimen\tw@
257 | \advance\dimen\tw@\dp\@arstrutbox
258 | \count@=\dimen@ % type conversion dim -> int
259 | \count\tw@=\dimen\tw@
260 | \divide\count@\count\tw@ % TeX division truncates
261 | \advance\dimen@-\count@\dimen\tw@
262 | % 1300sp is about 0.02pt. For comparison a rule default width is 0.4pt.
263 | % (note that if \count@ holds 0, surely \dimen@>1300sp)
264 | \ifdim\dimen@>1300sp \advance\count@\@ne \fi
265 | % now \count@ holds the count L of needed "lines"
266 | % and \sphinx@nbofrows holds the number N of rows
267 | % we have L >= 1 and N >= 1
268 | % if L is a multiple of N, ... clear what to do !
269 | % else write L = qN + r, 1 <= r < N and we will
270 | % arrange for each row to have enough space for:
271 | % q+1 "lines" in each of the first r rows
272 | % q "lines" in each of the (N-r) bottom rows
273 | % for a total of (q+1) * r + q * (N-r) = q * N + r = L
274 | % It is possible that q == 0.
275 | \count\tw@\count@
276 | % the TeX division truncates
277 | \divide\count\tw@\sphinx@nbofrows\relax
278 | \count4\count\tw@ % q
279 | \multiply\count\tw@\sphinx@nbofrows\relax
280 | \advance\count@-\count\tw@ % r
281 | \expandafter\xdef\csname sphinx@tablestrut_\sphinx@cellid\endcsname
282 | {\noexpand\sphinx@tablestrut{\the\count4}{\the\count@}{\sphinx@cellid}}%
283 | \dp\z@\z@
284 | % this will use the real height if it is >\ht\@arstrutbox
285 | \sphinxtablestrut{\sphinx@cellid}\box\z@
286 | \endgroup % group was opened in \sphinxmultirow
287 | }%
288 | \newcommand*\sphinxtablestrut[1]{%
289 | % #1 is a "cell_id", i.e. the id of a merged group of table cells
290 | \csname sphinx@tablestrut_#1\endcsname
291 | }%
292 | % LaTeX typesets the table row by row, hence each execution can do
293 | % an update for the next row.
294 | \newcommand*\sphinx@tablestrut[3]{\begingroup
295 | % #1 = q, #2 = (initially) r, #3 = cell_id, q+1 lines in first r rows
296 | % if #2 = 0, create space for max(q,1) table lines
297 | % if #2 > 0, create space for q+1 lines and decrement #2
298 | \leavevmode
299 | \count@#1\relax
300 | \ifnum#2=\z@
301 | \ifnum\count@=\z@\count@\@ne\fi
302 | \else
303 | % next row will be with a #2 decremented by one
304 | \expandafter\xdef\csname sphinx@tablestrut_#3\endcsname
305 | {\noexpand\sphinx@tablestrut{#1}{\the\numexpr#2-\@ne}{#3}}%
306 | \advance\count@\@ne
307 | \fi
308 | \vrule\@height\ht\@arstrutbox
309 | \@depth\dimexpr\count@\ht\@arstrutbox+\count@\dp\@arstrutbox-\ht\@arstrutbox\relax
310 | \@width\z@
311 | \endgroup
312 | % we need this to avoid colour panels hiding bottom parts of multirow text
313 | \sphinx@hack@CT
314 | }%
315 | \endinput
316 | %%
317 | %% End of file `sphinxmulticell.sty'.
318 |
--------------------------------------------------------------------------------
/quality_documentation/uml/Diagram 2020-11-20 17-25-02.uxf:
--------------------------------------------------------------------------------
1 | 10UMLClass3805021070punpy package
2 | --
3 | mc module
4 | jacobian module
5 | utilities moduleUMLClass13017021080mc module
6 | --
7 | tests
8 | MCPropagation class
9 | MCMCRetrieval classUMLClass62017021070jacobian module
10 | --
11 | tests
12 | JacobianPropagation class
13 | JacobianRetrieval class
14 | Relation4701103080lt=-10;60;10;10Relation23011027080lt=-10;60;250;10Relation47011026080lt=-240;60;10;10UMLClass370170210120utilities module
15 | --
16 | tests
17 | calculate_Jacobian()
18 | convert_corr_to_cov()
19 | convert_cov_to_corr()
20 | nearestPD_cholesky()
21 | isPD()UMLClass500360210130JacobianPropagation class
22 | --
23 | parallel_cores: int
24 | --
25 | propagate_random()
26 | propagate_systematic()
27 | propagate_both()
28 | propagate_cov()
29 | process_jacobian()UMLClass730360210190JacobianRetrieval class
30 | --
31 | measurement_function: function
32 | observed: array
33 | rand_uncertainty: array
34 | syst_uncertainty: array
35 | invcov: array
36 | uplims: array
37 | downlims: array
38 | --
39 | run_retrieval()
40 | process_inverse_jacobian()
41 | find_chisum()UMLClass20360210270MCPropagation class
42 | --
43 | parallel_cores: int
44 | MCsteps: int
45 | pool: multiprocessing pool
46 | --
47 | propagate_random()
48 | propagate_systematic()
49 | propagate_cov()
50 | _perform_checks()
51 | select_repeated_x()
52 | _combine_repeated_outs()
53 | process_samples()
54 | calculate_corr()
55 | generate_samples_random()
56 | generate_samples_systematic()
57 | generate_samples_cov()
58 | correlate_samples_corr()UMLClass250360210240MCMCRetrieval class
59 | --
60 | measurement_function: function
61 | observed: array
62 | rand_uncertainty: array
63 | syst_uncertainty: array
64 | invcov: array
65 | uplims: array
66 | downlims: array
67 | parallel_cores: int
68 | --
69 | run_retrieval()
70 | find_chisum()
71 | lnlike()
72 | lnprior()
73 | lnprob()
74 |
75 | Relation600230130150lt=-10;130;110;10Relation710230130150lt=-110;130;10;10Relation230240140140lt=-120;120;10;10Relation100240160140lt=-10;120;140;10Relation51028070100lt=<..10;10;50;80Relation37028040100lt=<..20;10;10;80Relation180260210120lt=<..190;10;10;100Relation570260210120lt=<..10;10;190;100
--------------------------------------------------------------------------------
/quality_documentation/uml/Diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comet-toolkit/punpy/2f246de69f60e6f47097d0cd1f248aa2ec322881/quality_documentation/uml/Diagram.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # required modules
2 | numpy >= 1.18.1
3 | matplotlib >= 3.1.0
4 | xarray
5 | scipy
6 | obsarray >= 1.0.0
7 | comet_maths >= 1.0.0
8 |
9 | # for docs
10 | sphinx
11 | sphinx_design
12 | ipython
13 | sphinx_autosummary_accessors
14 | sphinx_book_theme
15 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 |
2 |
3 | [flake8]
4 | exclude = docs
5 | max-line-length = 120
6 | extend-ignore = W291
7 |
8 |
9 | [mypy]
10 | allow_untyped_globals = True
11 | ignore_missing_imports = True
12 |
13 |
14 | [mypy-setup]
15 | ignore_errors = True
16 |
17 | [mypy-tests]
18 | ignore_errors = True
19 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import re
4 | from distutils.core import setup
5 |
6 | from setuptools import find_packages
7 |
8 | exec(open("punpy/_version.py").read())
9 |
10 | # from Cython.Build import cythonize
11 |
12 |
13 | def read(filename):
14 | filename = os.path.join(os.path.dirname(__file__), filename)
15 | text_type = type("")
16 | with io.open(filename, mode="r", encoding="utf-8") as fd:
17 | return re.sub(text_type(r":[a-z]+:`~?(.*?)`"), text_type(r"``\1``"), fd.read())
18 |
19 |
20 | # extensions = [Extension(
21 | # name="utilities",
22 | # sources=["punpy/utilities/utilities.pyx"],
23 | # include_dirs=[numpy.get_include()],
24 | # )
25 | # ]
26 |
27 | setup(
28 | version=__version__,
29 | name="punpy",
30 | url="https://github.com/comet-toolkit/punpy",
31 | license="LGPLv3",
32 | author="CoMet Toolkit Team",
33 | author_email="team@comet-toolkit.org",
34 | description="Propagating UNcertainties in PYthon",
35 | long_description=read("README.md"),
36 | packages=find_packages(exclude=("tests",)),
37 | install_requires=[
38 | "comet_maths>=1.0.1",
39 | "obsarray>=1.0.1",
40 | ],
41 | extras_require={
42 | "dev": [
43 | "pre-commit",
44 | "tox",
45 | "sphinx",
46 | "sphinx_design",
47 | "sphinx_book_theme",
48 | "ipython",
49 | "sphinx_autosummary_accessors",
50 | ],
51 | # ':python_version >= "3.9"': "xarray>=2023.6.0",
52 | # ':python_version < "3.9"': "xarray==0.19.0",
53 | },
54 | # ext_modules=cythonize(extensions),
55 | )
56 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [coverage:run]
2 | omit =
3 | punpy/_*
4 |
5 |
6 | [pytest]
7 | addopts = -p no:warnings
8 |
9 | [testenv]
10 | whitelist_externals = git
11 | setenv =
12 | PYTHONPATH = {toxinidir}
13 | passenv = *
14 | deps =
15 | pytest-html
16 | pytest-cov
17 | commands =
18 | pytest --html=test_report/report.html
19 | pytest --cov-report html:test_report/cov_report --cov=punpy
--------------------------------------------------------------------------------