├── .devcontainer
├── devcontainer.json
└── docker-compose.yml
├── .github
└── workflows
│ └── pypi_deployer.yml
├── .gitignore
├── .travis.yml
├── .vscode
├── launch.json
└── settings.json
├── LICENCE
├── MANIFEST.in
├── README.rst
├── docker-compose.yml
├── docs
├── .gitignore
├── Ketchametal200GMRpreprint.pdf
├── examples
│ ├── .gitignore
│ ├── Age Calculations.ipynb
│ ├── Chi2 Tests.ipynb
│ ├── Manual.ipynb
│ ├── Sample.ipynb
│ ├── Sample_datasets.ipynb
│ ├── The_External_Detector_Method.ipynb
│ ├── ThermalHistory.ipynb
│ ├── ThermalHistory_Generator.ipynb
│ ├── Viewer.ipynb
│ └── WOLF_Histories_ForwardModels.ipynb
├── images
│ ├── image1.png
│ └── logo.png
└── readthedocs
│ ├── Makefile
│ ├── make.bat
│ ├── requirements.txt
│ └── src
│ ├── Installation.rst
│ ├── UserGuide.rst
│ ├── conf.py
│ ├── img
│ └── logo.png
│ └── index.rst
├── joss
├── paper.bib
└── paper.md
├── pyFTracks
├── .gitignore
├── __init__.py
├── age_calculations.py
├── annealing.pyx
├── path_generators.py
├── radialplot
│ ├── FTradialplot.py
│ ├── __init__.py
│ ├── age_calculations.py
│ ├── radialplot.py
│ └── utilities.py
├── ressources
│ ├── .gitignore
│ ├── Gleadow.h5
│ ├── Miller.h5
│ ├── __init__.py
│ └── __pycache__
│ │ └── __init__.cpython-37.pyc
├── structures.py
├── thermal_history.pyx
├── utilities.py
└── viewer.py
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests
├── __init__.py
└── test_simple.py
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | // If you want to run as a non-root user in the container, see .devcontainer/docker-compose.yml.
2 | {
3 | "name": "Existing Docker Compose (Extend)",
4 |
5 | // Update the 'dockerComposeFile' list if you have more compose files or use different names.
6 | // The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
7 | "dockerComposeFile": [
8 | "../docker-compose.yml",
9 | "docker-compose.yml"
10 | ],
11 |
12 | // The 'service' property is the name of the service for the container that VS Code should
13 | // use. Update this value and .devcontainer/docker-compose.yml to the real service name.
14 | "service": "pyFTracks",
15 |
16 | // The optional 'workspaceFolder' property is the path VS Code should open by default when
17 | // connected. This is typically a file mount in .devcontainer/docker-compose.yml
18 | "workspaceFolder": "/opt/pyFTracks",
19 |
20 | // Use 'settings' to set *default* container specific settings.json values on container create.
21 | // You can edit these settings after create using File > Preferences > Settings > Remote.
22 | "settings": {
23 | // This will ignore your local shell user setting for Linux since shells like zsh are typically
24 | // not in base container images. You can also update this to an specific shell to ensure VS Code
25 | // uses the right one for terminals and tasks. For example, /bin/bash (or /bin/ash for Alpine).
26 | "terminal.integrated.shell.linux": null
27 | },
28 |
29 | // Uncomment the next line if you want start specific services in your Docker Compose config.
30 | // "runServices": [],
31 |
32 | // Uncomment the next line if you want to keep your containers running after VS Code shuts down.
33 | // "shutdownAction": "none",
34 |
35 | // Uncomment the next line to run commands after the container is created - for example installing git.
36 | // "postCreateCommand": "apt-get update && apt-get install -y git",
37 |
38 | // Add the IDs of extensions you want installed when the container is created in the array below.
39 | "extensions": ["ms-python.python", "ms-vscode.cpptools"]
40 | }
41 |
--------------------------------------------------------------------------------
/.devcontainer/docker-compose.yml:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------------------------------------
2 | # Copyright (c) Microsoft Corporation. All rights reserved.
3 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
4 | #-------------------------------------------------------------------------------------------------------------
5 |
6 | version: '3.7'
7 | services:
8 | # Update this to the name of the service you want to work with in your docker-compose.yml file
9 | pyFTracks:
10 | # You may want to add a non-root user to your Dockerfile. On Linux, this will prevent
11 | # new files getting created as root. See https://aka.ms/vscode-remote/containers/non-root-user
12 | # for the needed Dockerfile updates and then uncomment the next line.
13 | user: root
14 | privileged: true
15 |
16 | # Uncomment if you want to add a different Dockerfile in the .devcontainer folder
17 | # build:
18 | # context: .
19 | # dockerfile: Dockerfile
20 |
21 | # Uncomment if you want to expose any additional ports. The snippet below exposes port 3000.
22 | # ports:
23 | # - 3000:3000
24 |
25 | volumes:
26 | # Update this to wherever you want VS Code to mount the folder of your project
27 | - .:/opt/pyFTracks
28 |
29 | # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-in-docker-compose for details.
30 | # - /var/run/docker.sock:/var/run/docker.sock
31 |
32 | # Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
33 | # cap_add:
34 | # - SYS_PTRACE
35 | # security_opt:
36 | # - seccomp:unconfined
37 |
38 | # Overrides default command so things don't shut down after the process ends.
39 | command: /bin/sh -c "while sleep 1000; do :; done"
40 |
41 |
--------------------------------------------------------------------------------
/.github/workflows/pypi_deployer.yml:
--------------------------------------------------------------------------------
1 | name: pypi deployer
2 | on:
3 | push:
4 | tags:
5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
6 | jobs:
7 | # Release new version on github releases
8 | Github-release:
9 | name: Create Release
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout code
13 | uses: actions/checkout@v2
14 | - name: Create Release
15 | id: create_release
16 | uses: actions/create-release@v1
17 | env:
18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
19 | with:
20 | tag_name: ${{ github.ref }}
21 | release_name: Release ${{ github.ref }}
22 | draft: false
23 | prerelease: true
24 |
25 | # Build and deploy manylinux wheel
26 | Linux-build:
27 | runs-on: ubuntu-latest
28 | env:
29 | TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
30 | TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
31 | steps:
32 | - uses: actions/checkout@v2
33 | - name: build and upload manylinux wheels
34 | uses: Niraj-Kamdar/manylinux-wheel-builder@master
35 | with:
36 | python-versions: "3.*"
37 | # if true then github actions won't stop even if build for this job fails
38 | continue-on-error: true
39 |
40 | # deploy source distribution
41 | Source-dist:
42 | runs-on: windows-latest
43 | env:
44 | TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
45 | TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
46 | steps:
47 | - uses: actions/checkout@v2
48 | - name: Set up Python
49 | uses: actions/setup-python@v2
50 | with:
51 | python-version: 3.7
52 | - name: create source distribution
53 | run: python setup.py sdist
54 | - name: upload source distribution
55 | run: |
56 | pip install twine
57 | twine upload dist/*
58 | continue-on-error: true
59 |
60 | # Build and deploy wheels for macos and windows using setup-python action.
61 | # This has nothing to do with manylinux-wheel-builder.
62 | # I have just put them for the purpose of completion.
63 | Matrix-build:
64 | runs-on: ${{ matrix.os }}
65 | env:
66 | TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
67 | TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
68 | strategy:
69 | matrix:
70 | os: [macos-latest, windows-latest]
71 | python-version: [3.6, 3.7, 3.8]
72 | steps:
73 | - uses: actions/checkout@v2
74 | - name: Set up Python
75 | uses: actions/setup-python@v2
76 | with:
77 | python-version: ${{ matrix.python-version }}
78 | - name: build wheel
79 | run: |
80 | pip install wheel
81 | python setup.py bdist_wheel
82 | - name: upload wheel
83 | run: |
84 | pip install twine
85 | twine upload dist/*
86 | continue-on-error: true
87 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | dist
3 | *.so
4 | .eggs
5 | *.egg-info
6 | material
7 | *.pyc
8 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # As written, this configuration will build your wheels on every
2 | # commit, but will only push to PyPI on tagged commits.
3 |
4 | language: python
5 |
6 | jobs:
7 | include:
8 | # perform a linux build
9 | - services: docker
10 | # and a mac build
11 | - os: osx
12 | language: shell
13 | # and a windows build
14 | - os: windows
15 | language: shell
16 | before_install:
17 | - choco install python --version 3.8.0
18 | - export PATH="/c/Python38:/c/Python38/Scripts:$PATH"
19 |
20 | env:
21 | global:
22 | # Skip building on Python 2.7 on all platforms
23 | - CIBW_SKIP="cp27-*"
24 | - TWINE_USERNAME=__token__
25 | # Note: TWINE_PASSWORD is set to a PyPI API token in Travis settings
26 |
27 | install:
28 | - python -m pip install twine cibuildwheel==1.1.0
29 |
30 | script:
31 | # build the wheels, put them into './wheelhouse'
32 | - python -m cibuildwheel --output-dir wheelhouse
33 |
34 | after_success:
35 | # if the release was tagged, upload them to PyPI
36 | - |
37 | if [[ $TRAVIS_TAG ]]; then
38 | python -m twine upload wheelhouse/*.whl
39 | fi
40 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "GDB Attach",
9 | "type": "cppdbg",
10 | "request": "attach",
11 | "program": "/usr/bin/python3",
12 | "processId": "${command:pickProcess}",
13 | "MIMode": "gdb",
14 | "setupCommands": [
15 | {
16 | "description": "Enable pretty-printing for gdb",
17 | "text": "-enable-pretty-printing",
18 | "ignoreFailures": false
19 | }
20 | ]
21 | },
22 | {
23 | "name": "Python: Current File",
24 | "type": "python",
25 | "request": "launch",
26 | "program": "${file}",
27 | "console": "integratedTerminal",
28 | "justMyCode": false,
29 | },
30 | ]
31 | }
32 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.pythonPath": "/usr/bin/python",
3 | "restructuredtext.confPath": "",
4 | "python.testing.pytestArgs": [
5 | "tests"
6 | ],
7 | "python.testing.unittestEnabled": false,
8 | "python.testing.nosetestsEnabled": false,
9 | "python.testing.pytestEnabled": true
10 | }
--------------------------------------------------------------------------------
/LICENCE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Romain Beucher
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include docs/*
3 | include requirements.txt
4 | include README.rst
5 | include setup.cfg
6 | include pyFTracks/ressources/*.h5
7 | recursive-include pyFTracks *.c *h *.pyx
8 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 |
2 | .. image:: https://raw.githubusercontent.com/rbeucher/pyFTracks/master/docs/images/logo.png
3 | :align: center
4 |
5 | ================================================
6 | Fission Track Modelling and Analysis with python
7 | ================================================
8 |
9 | .. image:: https://img.shields.io/pypi/v/pyftracks.svg
10 | :target: https://pypi.python.org/pypi/pyftracks
11 | :alt: Pip
12 | .. image:: https://www.travis-ci.org/rbeucher/pyFTracks.svg?branch=master
13 | :alt: Travis
14 | .. image:: https://mybinder.org/badge_logo.svg
15 | :target: https://mybinder.org/v2/gh/rbeucher/pyFTracks.git/master
16 | :alt: Logo
17 |
18 |
19 | pyFTracks is a Python utility which predicts Fission-track ages and track-lengths
20 | distributions for some given thermal-histories and given kinetic parameters.
21 | It is an open source version of programs such as AFTSolve or HeFty developped by
22 | Richard Ketcham and describe in Ketcham, 2000, 2005.
23 |
24 | We provide the code in the hope that it will be useful to the community.
25 |
26 | We have chosen Python to allow for interaction with the broad range of scientific libraries
27 | available in that language. Python is becoming a language of choice for teaching programming,
28 | it has also many advantages for Research Workflow, such as rapid prototyping and interactivity.
29 |
30 |
31 | .. image:: https://raw.githubusercontent.com/rbeucher/pyFTracks/master/docs/images/image1.png
32 | :align: center
33 |
34 |
35 | ------------
36 | Installation
37 | ------------
38 |
39 | The code is available on pypi and should work on any Linux distributions, MacOSX and Windows 10.
40 | To install it just run:
41 |
42 | .. code:: bash
43 |
44 | pip install pyFTracks
45 |
46 | in the console.
47 |
48 | You can install the package from the latest github source by doing:
49 |
50 | .. code:: bash
51 |
52 | pip install git+https://github.com/rbeucher/pyFTracks.git
53 |
54 | ------------
55 | Dependencies
56 | ------------
57 |
58 | The pip install should take care of the dependencies, if not you might want to
59 | check that you have the following packages installed on your system:
60 |
61 | - Python >= 3.5.x
62 | - Cython >= 0.29.14
63 | - matplotlib >= 3.1.1
64 | - numpy >= 1.17.4
65 | - scipy >= 1.3.2
66 | - pandas >= 0.25.3
67 | - tables >= 3.6.1
68 |
69 | -----------
70 | Recommended
71 | -----------
72 | We recommend using Jupyter:
73 |
74 | - jupyter
75 |
76 | ---------
77 | Licensing
78 | ---------
79 |
80 | pyFTracks is an open-source project licensed under the MiT License. See LICENSE.md for details.
81 |
82 | ------------
83 | Contributing
84 | ------------
85 |
86 | -------
87 | Contact
88 | -------
89 |
90 | Dr Romain BEUCHER,
91 | The Australian National University
92 | romain.beucher@anu.edu.au
93 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 | services:
3 |
4 | pyFTracks:
5 | build:
6 | context: .
7 | dockerfile: ./Dockerfile
8 | container_name: pyFTracks
9 | image: rbeucher/pyftracks:latest
10 | command: "jupyter notebook --ip=0.0.0.0 --no-browser"
11 | ports:
12 | - "8888:8888"
13 | volumes:
14 | - $PWD:/opt/pyFTracks
15 | - $PWD/docs:/home/jovyan
16 | - /tmp/.X11-unix:/tmp/.X11-unix
17 | # Link display (set xhost + on localhost)
18 | environment:
19 | - DISPLAY=unix$DISPLAY
20 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | .*
2 |
--------------------------------------------------------------------------------
/docs/Ketchametal200GMRpreprint.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/docs/Ketchametal200GMRpreprint.pdf
--------------------------------------------------------------------------------
/docs/examples/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 |
--------------------------------------------------------------------------------
/docs/examples/Age Calculations.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from pyFTracks.ressources import Miller"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 3,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "data": {
19 | "text/html": [
20 | "
\n",
21 | "\n",
34 | "
\n",
35 | " \n",
36 | " \n",
37 | " | \n",
38 | " Ns | \n",
39 | " Ni | \n",
40 | " A | \n",
41 | " Ns/Ni | \n",
42 | " RhoS | \n",
43 | " RhoI | \n",
44 | " Ages | \n",
45 | " Ages Errors | \n",
46 | "
\n",
47 | " \n",
48 | " \n",
49 | " \n",
50 | " 0 | \n",
51 | " 31 | \n",
52 | " 41 | \n",
53 | " 40 | \n",
54 | " 0.756098 | \n",
55 | " 0.861111 | \n",
56 | " 1.138889 | \n",
57 | " 170.272777 | \n",
58 | " 40.938206 | \n",
59 | "
\n",
60 | " \n",
61 | " 1 | \n",
62 | " 19 | \n",
63 | " 22 | \n",
64 | " 20 | \n",
65 | " 0.863636 | \n",
66 | " 1.055556 | \n",
67 | " 1.222222 | \n",
68 | " 194.129222 | \n",
69 | " 61.156267 | \n",
70 | "
\n",
71 | " \n",
72 | " 2 | \n",
73 | " 56 | \n",
74 | " 63 | \n",
75 | " 60 | \n",
76 | " 0.888889 | \n",
77 | " 1.037037 | \n",
78 | " 1.166667 | \n",
79 | " 199.718474 | \n",
80 | " 37.303586 | \n",
81 | "
\n",
82 | " \n",
83 | " 3 | \n",
84 | " 67 | \n",
85 | " 71 | \n",
86 | " 80 | \n",
87 | " 0.943662 | \n",
88 | " 0.930556 | \n",
89 | " 0.986111 | \n",
90 | " 211.825011 | \n",
91 | " 36.791026 | \n",
92 | "
\n",
93 | " \n",
94 | " 4 | \n",
95 | " 88 | \n",
96 | " 90 | \n",
97 | " 90 | \n",
98 | " 0.977778 | \n",
99 | " 1.086420 | \n",
100 | " 1.111111 | \n",
101 | " 219.354179 | \n",
102 | " 33.720434 | \n",
103 | "
\n",
104 | " \n",
105 | " 5 | \n",
106 | " 6 | \n",
107 | " 7 | \n",
108 | " 15 | \n",
109 | " 0.857143 | \n",
110 | " 0.444444 | \n",
111 | " 0.518519 | \n",
112 | " 192.691202 | \n",
113 | " 107.403652 | \n",
114 | "
\n",
115 | " \n",
116 | " 6 | \n",
117 | " 18 | \n",
118 | " 14 | \n",
119 | " 20 | \n",
120 | " 1.285714 | \n",
121 | " 1.000000 | \n",
122 | " 0.777778 | \n",
123 | " 286.919063 | \n",
124 | " 102.707829 | \n",
125 | "
\n",
126 | " \n",
127 | " 7 | \n",
128 | " 40 | \n",
129 | " 41 | \n",
130 | " 40 | \n",
131 | " 0.975610 | \n",
132 | " 1.111111 | \n",
133 | " 1.138889 | \n",
134 | " 218.875970 | \n",
135 | " 49.209223 | \n",
136 | "
\n",
137 | " \n",
138 | " 8 | \n",
139 | " 36 | \n",
140 | " 49 | \n",
141 | " 40 | \n",
142 | " 0.734694 | \n",
143 | " 1.000000 | \n",
144 | " 1.361111 | \n",
145 | " 165.514024 | \n",
146 | " 36.766051 | \n",
147 | "
\n",
148 | " \n",
149 | " 9 | \n",
150 | " 54 | \n",
151 | " 79 | \n",
152 | " 60 | \n",
153 | " 0.683544 | \n",
154 | " 1.000000 | \n",
155 | " 1.462963 | \n",
156 | " 154.127518 | \n",
157 | " 27.714542 | \n",
158 | "
\n",
159 | " \n",
160 | " 10 | \n",
161 | " 35 | \n",
162 | " 52 | \n",
163 | " 40 | \n",
164 | " 0.673077 | \n",
165 | " 0.972222 | \n",
166 | " 1.444444 | \n",
167 | " 151.794873 | \n",
168 | " 33.587209 | \n",
169 | "
\n",
170 | " \n",
171 | " 11 | \n",
172 | " 52 | \n",
173 | " 76 | \n",
174 | " 70 | \n",
175 | " 0.684211 | \n",
176 | " 0.825397 | \n",
177 | " 1.206349 | \n",
178 | " 154.275956 | \n",
179 | " 28.256344 | \n",
180 | "
\n",
181 | " \n",
182 | " 12 | \n",
183 | " 51 | \n",
184 | " 74 | \n",
185 | " 49 | \n",
186 | " 0.689189 | \n",
187 | " 1.156463 | \n",
188 | " 1.678005 | \n",
189 | " 155.385125 | \n",
190 | " 28.768616 | \n",
191 | "
\n",
192 | " \n",
193 | " 13 | \n",
194 | " 47 | \n",
195 | " 66 | \n",
196 | " 50 | \n",
197 | " 0.712121 | \n",
198 | " 1.044444 | \n",
199 | " 1.466667 | \n",
200 | " 160.491558 | \n",
201 | " 31.114275 | \n",
202 | "
\n",
203 | " \n",
204 | " 14 | \n",
205 | " 27 | \n",
206 | " 39 | \n",
207 | " 36 | \n",
208 | " 0.692308 | \n",
209 | " 0.833333 | \n",
210 | " 1.203704 | \n",
211 | " 156.079781 | \n",
212 | " 39.434473 | \n",
213 | "
\n",
214 | " \n",
215 | " 15 | \n",
216 | " 36 | \n",
217 | " 44 | \n",
218 | " 40 | \n",
219 | " 0.818182 | \n",
220 | " 1.000000 | \n",
221 | " 1.222222 | \n",
222 | " 184.056340 | \n",
223 | " 41.834664 | \n",
224 | "
\n",
225 | " \n",
226 | " 16 | \n",
227 | " 64 | \n",
228 | " 86 | \n",
229 | " 50 | \n",
230 | " 0.744186 | \n",
231 | " 1.422222 | \n",
232 | " 1.911111 | \n",
233 | " 167.624884 | \n",
234 | " 28.253537 | \n",
235 | "
\n",
236 | " \n",
237 | " 17 | \n",
238 | " 68 | \n",
239 | " 90 | \n",
240 | " 50 | \n",
241 | " 0.755556 | \n",
242 | " 1.511111 | \n",
243 | " 2.000000 | \n",
244 | " 170.152315 | \n",
245 | " 27.945372 | \n",
246 | "
\n",
247 | " \n",
248 | " 18 | \n",
249 | " 61 | \n",
250 | " 91 | \n",
251 | " 60 | \n",
252 | " 0.670330 | \n",
253 | " 1.129630 | \n",
254 | " 1.685185 | \n",
255 | " 151.182510 | \n",
256 | " 25.540172 | \n",
257 | "
\n",
258 | " \n",
259 | " 19 | \n",
260 | " 30 | \n",
261 | " 41 | \n",
262 | " 30 | \n",
263 | " 0.731707 | \n",
264 | " 1.111111 | \n",
265 | " 1.518519 | \n",
266 | " 164.849733 | \n",
267 | " 40.001291 | \n",
268 | "
\n",
269 | " \n",
270 | "
\n",
271 | "
"
272 | ],
273 | "text/plain": [
274 | " Ns Ni A Ns/Ni RhoS RhoI Ages Ages Errors\n",
275 | "0 31 41 40 0.756098 0.861111 1.138889 170.272777 40.938206\n",
276 | "1 19 22 20 0.863636 1.055556 1.222222 194.129222 61.156267\n",
277 | "2 56 63 60 0.888889 1.037037 1.166667 199.718474 37.303586\n",
278 | "3 67 71 80 0.943662 0.930556 0.986111 211.825011 36.791026\n",
279 | "4 88 90 90 0.977778 1.086420 1.111111 219.354179 33.720434\n",
280 | "5 6 7 15 0.857143 0.444444 0.518519 192.691202 107.403652\n",
281 | "6 18 14 20 1.285714 1.000000 0.777778 286.919063 102.707829\n",
282 | "7 40 41 40 0.975610 1.111111 1.138889 218.875970 49.209223\n",
283 | "8 36 49 40 0.734694 1.000000 1.361111 165.514024 36.766051\n",
284 | "9 54 79 60 0.683544 1.000000 1.462963 154.127518 27.714542\n",
285 | "10 35 52 40 0.673077 0.972222 1.444444 151.794873 33.587209\n",
286 | "11 52 76 70 0.684211 0.825397 1.206349 154.275956 28.256344\n",
287 | "12 51 74 49 0.689189 1.156463 1.678005 155.385125 28.768616\n",
288 | "13 47 66 50 0.712121 1.044444 1.466667 160.491558 31.114275\n",
289 | "14 27 39 36 0.692308 0.833333 1.203704 156.079781 39.434473\n",
290 | "15 36 44 40 0.818182 1.000000 1.222222 184.056340 41.834664\n",
291 | "16 64 86 50 0.744186 1.422222 1.911111 167.624884 28.253537\n",
292 | "17 68 90 50 0.755556 1.511111 2.000000 170.152315 27.945372\n",
293 | "18 61 91 60 0.670330 1.129630 1.685185 151.182510 25.540172\n",
294 | "19 30 41 30 0.731707 1.111111 1.518519 164.849733 40.001291"
295 | ]
296 | },
297 | "execution_count": 3,
298 | "metadata": {},
299 | "output_type": "execute_result"
300 | }
301 | ],
302 | "source": [
303 | "Miller"
304 | ]
305 | },
306 | {
307 | "cell_type": "code",
308 | "execution_count": 6,
309 | "metadata": {},
310 | "outputs": [],
311 | "source": [
312 | "Ns = Miller[\"Ns\"]\n",
313 | "Ni = Miller[\"Ni\"]\n",
314 | "\n",
315 | "zeta = 350.\n",
316 | "zeta_err = 10. / 350.\n",
317 | "\n",
318 | "rhod = 1.304\n",
319 | "rhod_err = 0\n",
320 | "Nd = 2936"
321 | ]
322 | },
323 | {
324 | "cell_type": "code",
325 | "execution_count": 7,
326 | "metadata": {},
327 | "outputs": [],
328 | "source": [
329 | "from pyFTracks.age_calculations import calculate_central_age\n",
330 | "from pyFTracks.age_calculations import calculate_pooled_age\n",
331 | "from pyFTracks.age_calculations import calculate_ages"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": 8,
337 | "metadata": {},
338 | "outputs": [
339 | {
340 | "data": {
341 | "text/plain": [
342 | "{'Central': 175.5672998835018,\n",
343 | " 'se': 8.510137020148957,\n",
344 | " 'sigma': 5.1978616298287776e-05}"
345 | ]
346 | },
347 | "execution_count": 8,
348 | "metadata": {},
349 | "output_type": "execute_result"
350 | }
351 | ],
352 | "source": [
353 | "calculate_central_age(Ns, Ni, zeta, zeta_err, rhod, Nd)"
354 | ]
355 | },
356 | {
357 | "cell_type": "code",
358 | "execution_count": 9,
359 | "metadata": {},
360 | "outputs": [
361 | {
362 | "data": {
363 | "text/plain": [
364 | "{'Pooled Age': 175.56729987574315, 'se': 9.878490119317705}"
365 | ]
366 | },
367 | "execution_count": 9,
368 | "metadata": {},
369 | "output_type": "execute_result"
370 | }
371 | ],
372 | "source": [
373 | "calculate_pooled_age(Ns, Ni, zeta, zeta_err, rhod, Nd)"
374 | ]
375 | },
376 | {
377 | "cell_type": "code",
378 | "execution_count": 10,
379 | "metadata": {},
380 | "outputs": [
381 | {
382 | "data": {
383 | "text/plain": [
384 | "{'Age(s)': array([170.27277726, 194.12922188, 199.71847392, 211.82501094,\n",
385 | " 219.35417906, 192.69120201, 286.91906312, 218.87597033,\n",
386 | " 165.51402406, 154.12751783, 151.7948727 , 154.27595627,\n",
387 | " 155.38512454, 160.49155828, 156.07978109, 184.05633985,\n",
388 | " 167.62488355, 170.15231471, 151.18251036, 164.84973261]),\n",
389 | " 'se(s)': array([ 40.93820562, 61.15626671, 37.30358567, 36.79102639,\n",
390 | " 33.72043367, 107.40365187, 102.70782874, 49.20922289,\n",
391 | " 36.7660512 , 27.71454167, 33.5872093 , 28.25634367,\n",
392 | " 28.7686159 , 31.11427488, 39.43447289, 41.83466447,\n",
393 | " 28.25353735, 27.94537164, 25.54017193, 40.0012906 ])}"
394 | ]
395 | },
396 | "execution_count": 10,
397 | "metadata": {},
398 | "output_type": "execute_result"
399 | }
400 | ],
401 | "source": [
402 | "calculate_ages(Ns, Ni, zeta, zeta_err, rhod, Nd)"
403 | ]
404 | },
405 | {
406 | "cell_type": "code",
407 | "execution_count": 11,
408 | "metadata": {},
409 | "outputs": [],
410 | "source": [
411 | "from pyFTracks.ressources import Gleadow"
412 | ]
413 | },
414 | {
415 | "cell_type": "code",
416 | "execution_count": 12,
417 | "metadata": {},
418 | "outputs": [],
419 | "source": [
420 | "Ns = Gleadow[\"Ns\"]\n",
421 | "Ni = Gleadow[\"Ni\"]\n",
422 | "zeta = 380.\n",
423 | "zeta_err = 5. / 380.\n",
424 | "\n",
425 | "rhod = 1.257\n",
426 | "rhod_err = 0\n",
427 | "Nd = 8188"
428 | ]
429 | },
430 | {
431 | "cell_type": "code",
432 | "execution_count": 13,
433 | "metadata": {},
434 | "outputs": [
435 | {
436 | "data": {
437 | "text/plain": [
438 | "{'Central': 94.09935886162337,\n",
439 | " 'se': 10.157833459811043,\n",
440 | " 'sigma': 0.446112700671759}"
441 | ]
442 | },
443 | "execution_count": 13,
444 | "metadata": {},
445 | "output_type": "execute_result"
446 | }
447 | ],
448 | "source": [
449 | "calculate_central_age(Ns, Ni, zeta, zeta_err, rhod, Nd)"
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": 14,
455 | "metadata": {},
456 | "outputs": [
457 | {
458 | "data": {
459 | "text/plain": [
460 | "{'Pooled Age': 124.98724612328856, 'se': 6.0944585636007576}"
461 | ]
462 | },
463 | "execution_count": 14,
464 | "metadata": {},
465 | "output_type": "execute_result"
466 | }
467 | ],
468 | "source": [
469 | "calculate_pooled_age(Ns, Ni, zeta, zeta_err, rhod, Nd)"
470 | ]
471 | },
472 | {
473 | "cell_type": "code",
474 | "execution_count": 15,
475 | "metadata": {},
476 | "outputs": [
477 | {
478 | "data": {
479 | "text/plain": [
480 | "{'Age(s)': array([ 0. , 43.27803705, 151.73374642, 118.32243524,\n",
481 | " 30.54674248, 32.48573611, 118.32243524, 83.2599968 ,\n",
482 | " 95.56075553, 67.87851655, 26.482197 , 88.94480733,\n",
483 | " 272.08754802, 92.0203823 , 64.80858573, 138.97927265,\n",
484 | " 94.83105306, 72.28064785, 23.8388675 , 77.29336818,\n",
485 | " 80.90943697, 132.37628366, 67.87851655, 75.70245065,\n",
486 | " 130.43964242, 157.28550116, 59.43268641, 157.28550116,\n",
487 | " 139.60420528, 140.85095558]),\n",
488 | " 'se(s)': array([ 0. , 33.27640223, 45.91421583, 102.49040302,\n",
489 | " 10.27369072, 20.00137932, 72.48591729, 21.68585213,\n",
490 | " 15.78285539, 54.43638673, 27.91839535, 42.6065056 ,\n",
491 | " 25.44704475, 15.11733177, 42.22704272, 55.43841021,\n",
492 | " 79.3580827 , 31.22579735, 25.00577049, 23.82086375,\n",
493 | " 24.23088369, 44.2469251 , 27.23693002, 18.58068178,\n",
494 | " 40.68337352, 34.92831442, 17.78825114, 82.94078305,\n",
495 | " 48.89593844, 15.9753248 ])}"
496 | ]
497 | },
498 | "execution_count": 15,
499 | "metadata": {},
500 | "output_type": "execute_result"
501 | }
502 | ],
503 | "source": [
504 | "calculate_ages(Ns, Ni, zeta, zeta_err, rhod, Nd)"
505 | ]
506 | }
507 | ],
508 | "metadata": {
509 | "kernelspec": {
510 | "display_name": "Python 3",
511 | "language": "python",
512 | "name": "python3"
513 | },
514 | "language_info": {
515 | "codemirror_mode": {
516 | "name": "ipython",
517 | "version": 3
518 | },
519 | "file_extension": ".py",
520 | "mimetype": "text/x-python",
521 | "name": "python",
522 | "nbconvert_exporter": "python",
523 | "pygments_lexer": "ipython3",
524 | "version": "3.8.3"
525 | }
526 | },
527 | "nbformat": 4,
528 | "nbformat_minor": 4
529 | }
530 |
--------------------------------------------------------------------------------
/docs/examples/Chi2 Tests.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from pyFTracks.age_calculations import chi_square\n",
10 | "from pyFTracks.ressources import Miller1995\n",
11 | "from pyFTracks.ressources import Gleadow"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "data": {
21 | "text/html": [
22 | "\n",
23 | "\n",
36 | "
\n",
37 | " \n",
38 | " \n",
39 | " | \n",
40 | " Ns | \n",
41 | " Ni | \n",
42 | " A | \n",
43 | " Ns/Ni | \n",
44 | " RhoS | \n",
45 | " RhoI | \n",
46 | " Ages | \n",
47 | " Ages Errors | \n",
48 | "
\n",
49 | " \n",
50 | " \n",
51 | " \n",
52 | " 0 | \n",
53 | " 31 | \n",
54 | " 41 | \n",
55 | " 40 | \n",
56 | " 0.756098 | \n",
57 | " 8.602509e+05 | \n",
58 | " 1.137751e+06 | \n",
59 | " 170.272777 | \n",
60 | " 40.938206 | \n",
61 | "
\n",
62 | " \n",
63 | " 1 | \n",
64 | " 19 | \n",
65 | " 22 | \n",
66 | " 20 | \n",
67 | " 0.863636 | \n",
68 | " 1.054501e+06 | \n",
69 | " 1.221001e+06 | \n",
70 | " 194.129222 | \n",
71 | " 61.156267 | \n",
72 | "
\n",
73 | " \n",
74 | " 2 | \n",
75 | " 56 | \n",
76 | " 63 | \n",
77 | " 60 | \n",
78 | " 0.888889 | \n",
79 | " 1.036001e+06 | \n",
80 | " 1.165501e+06 | \n",
81 | " 199.718474 | \n",
82 | " 37.303586 | \n",
83 | "
\n",
84 | " \n",
85 | " 3 | \n",
86 | " 67 | \n",
87 | " 71 | \n",
88 | " 80 | \n",
89 | " 0.943662 | \n",
90 | " 9.296259e+05 | \n",
91 | " 9.851260e+05 | \n",
92 | " 211.825011 | \n",
93 | " 36.791026 | \n",
94 | "
\n",
95 | " \n",
96 | " 4 | \n",
97 | " 88 | \n",
98 | " 90 | \n",
99 | " 90 | \n",
100 | " 0.977778 | \n",
101 | " 1.085334e+06 | \n",
102 | " 1.110001e+06 | \n",
103 | " 219.354179 | \n",
104 | " 33.720434 | \n",
105 | "
\n",
106 | " \n",
107 | " 5 | \n",
108 | " 6 | \n",
109 | " 7 | \n",
110 | " 15 | \n",
111 | " 0.857143 | \n",
112 | " 4.440004e+05 | \n",
113 | " 5.180005e+05 | \n",
114 | " 192.691202 | \n",
115 | " 107.403652 | \n",
116 | "
\n",
117 | " \n",
118 | " 6 | \n",
119 | " 18 | \n",
120 | " 14 | \n",
121 | " 20 | \n",
122 | " 1.285714 | \n",
123 | " 9.990010e+05 | \n",
124 | " 7.770008e+05 | \n",
125 | " 286.919063 | \n",
126 | " 102.707829 | \n",
127 | "
\n",
128 | " \n",
129 | " 7 | \n",
130 | " 40 | \n",
131 | " 41 | \n",
132 | " 40 | \n",
133 | " 0.975610 | \n",
134 | " 1.110001e+06 | \n",
135 | " 1.137751e+06 | \n",
136 | " 218.875970 | \n",
137 | " 49.209223 | \n",
138 | "
\n",
139 | " \n",
140 | " 8 | \n",
141 | " 36 | \n",
142 | " 49 | \n",
143 | " 40 | \n",
144 | " 0.734694 | \n",
145 | " 9.990010e+05 | \n",
146 | " 1.359751e+06 | \n",
147 | " 165.514024 | \n",
148 | " 36.766051 | \n",
149 | "
\n",
150 | " \n",
151 | " 9 | \n",
152 | " 54 | \n",
153 | " 79 | \n",
154 | " 60 | \n",
155 | " 0.683544 | \n",
156 | " 9.990010e+05 | \n",
157 | " 1.461501e+06 | \n",
158 | " 154.127518 | \n",
159 | " 27.714542 | \n",
160 | "
\n",
161 | " \n",
162 | " 10 | \n",
163 | " 35 | \n",
164 | " 52 | \n",
165 | " 40 | \n",
166 | " 0.673077 | \n",
167 | " 9.712510e+05 | \n",
168 | " 1.443001e+06 | \n",
169 | " 151.794873 | \n",
170 | " 33.587209 | \n",
171 | "
\n",
172 | " \n",
173 | " 11 | \n",
174 | " 52 | \n",
175 | " 76 | \n",
176 | " 70 | \n",
177 | " 0.684211 | \n",
178 | " 8.245723e+05 | \n",
179 | " 1.205144e+06 | \n",
180 | " 154.275956 | \n",
181 | " 28.256344 | \n",
182 | "
\n",
183 | " \n",
184 | " 12 | \n",
185 | " 51 | \n",
186 | " 74 | \n",
187 | " 49 | \n",
188 | " 0.689189 | \n",
189 | " 1.155307e+06 | \n",
190 | " 1.676328e+06 | \n",
191 | " 155.385125 | \n",
192 | " 28.768616 | \n",
193 | "
\n",
194 | " \n",
195 | " 13 | \n",
196 | " 47 | \n",
197 | " 66 | \n",
198 | " 50 | \n",
199 | " 0.712121 | \n",
200 | " 1.043401e+06 | \n",
201 | " 1.465201e+06 | \n",
202 | " 160.491558 | \n",
203 | " 31.114275 | \n",
204 | "
\n",
205 | " \n",
206 | " 14 | \n",
207 | " 27 | \n",
208 | " 39 | \n",
209 | " 36 | \n",
210 | " 0.692308 | \n",
211 | " 8.325008e+05 | \n",
212 | " 1.202501e+06 | \n",
213 | " 156.079781 | \n",
214 | " 39.434473 | \n",
215 | "
\n",
216 | " \n",
217 | " 15 | \n",
218 | " 36 | \n",
219 | " 44 | \n",
220 | " 40 | \n",
221 | " 0.818182 | \n",
222 | " 9.990010e+05 | \n",
223 | " 1.221001e+06 | \n",
224 | " 184.056340 | \n",
225 | " 41.834664 | \n",
226 | "
\n",
227 | " \n",
228 | " 16 | \n",
229 | " 64 | \n",
230 | " 86 | \n",
231 | " 50 | \n",
232 | " 0.744186 | \n",
233 | " 1.420801e+06 | \n",
234 | " 1.909202e+06 | \n",
235 | " 167.624884 | \n",
236 | " 28.253537 | \n",
237 | "
\n",
238 | " \n",
239 | " 17 | \n",
240 | " 68 | \n",
241 | " 90 | \n",
242 | " 50 | \n",
243 | " 0.755556 | \n",
244 | " 1.509602e+06 | \n",
245 | " 1.998002e+06 | \n",
246 | " 170.152315 | \n",
247 | " 27.945372 | \n",
248 | "
\n",
249 | " \n",
250 | " 18 | \n",
251 | " 61 | \n",
252 | " 91 | \n",
253 | " 60 | \n",
254 | " 0.670330 | \n",
255 | " 1.128501e+06 | \n",
256 | " 1.683502e+06 | \n",
257 | " 151.182510 | \n",
258 | " 25.540172 | \n",
259 | "
\n",
260 | " \n",
261 | " 19 | \n",
262 | " 30 | \n",
263 | " 41 | \n",
264 | " 30 | \n",
265 | " 0.731707 | \n",
266 | " 1.110001e+06 | \n",
267 | " 1.517002e+06 | \n",
268 | " 164.849733 | \n",
269 | " 40.001291 | \n",
270 | "
\n",
271 | " \n",
272 | "
\n",
273 | "
"
274 | ],
275 | "text/plain": [
276 | " Ns Ni A Ns/Ni RhoS RhoI Ages Ages Errors\n",
277 | "0 31 41 40 0.756098 8.602509e+05 1.137751e+06 170.272777 40.938206\n",
278 | "1 19 22 20 0.863636 1.054501e+06 1.221001e+06 194.129222 61.156267\n",
279 | "2 56 63 60 0.888889 1.036001e+06 1.165501e+06 199.718474 37.303586\n",
280 | "3 67 71 80 0.943662 9.296259e+05 9.851260e+05 211.825011 36.791026\n",
281 | "4 88 90 90 0.977778 1.085334e+06 1.110001e+06 219.354179 33.720434\n",
282 | "5 6 7 15 0.857143 4.440004e+05 5.180005e+05 192.691202 107.403652\n",
283 | "6 18 14 20 1.285714 9.990010e+05 7.770008e+05 286.919063 102.707829\n",
284 | "7 40 41 40 0.975610 1.110001e+06 1.137751e+06 218.875970 49.209223\n",
285 | "8 36 49 40 0.734694 9.990010e+05 1.359751e+06 165.514024 36.766051\n",
286 | "9 54 79 60 0.683544 9.990010e+05 1.461501e+06 154.127518 27.714542\n",
287 | "10 35 52 40 0.673077 9.712510e+05 1.443001e+06 151.794873 33.587209\n",
288 | "11 52 76 70 0.684211 8.245723e+05 1.205144e+06 154.275956 28.256344\n",
289 | "12 51 74 49 0.689189 1.155307e+06 1.676328e+06 155.385125 28.768616\n",
290 | "13 47 66 50 0.712121 1.043401e+06 1.465201e+06 160.491558 31.114275\n",
291 | "14 27 39 36 0.692308 8.325008e+05 1.202501e+06 156.079781 39.434473\n",
292 | "15 36 44 40 0.818182 9.990010e+05 1.221001e+06 184.056340 41.834664\n",
293 | "16 64 86 50 0.744186 1.420801e+06 1.909202e+06 167.624884 28.253537\n",
294 | "17 68 90 50 0.755556 1.509602e+06 1.998002e+06 170.152315 27.945372\n",
295 | "18 61 91 60 0.670330 1.128501e+06 1.683502e+06 151.182510 25.540172\n",
296 | "19 30 41 30 0.731707 1.110001e+06 1.517002e+06 164.849733 40.001291"
297 | ]
298 | },
299 | "execution_count": 2,
300 | "metadata": {},
301 | "output_type": "execute_result"
302 | }
303 | ],
304 | "source": [
305 | "Miller1995"
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": 3,
311 | "metadata": {},
312 | "outputs": [],
313 | "source": [
314 | "Ns = Miller1995[\"Ns\"]\n",
315 | "Ni = Miller1995[\"Ni\"]"
316 | ]
317 | },
318 | {
319 | "cell_type": "code",
320 | "execution_count": 4,
321 | "metadata": {},
322 | "outputs": [
323 | {
324 | "data": {
325 | "text/plain": [
326 | "0.9292129985459694"
327 | ]
328 | },
329 | "execution_count": 4,
330 | "metadata": {},
331 | "output_type": "execute_result"
332 | }
333 | ],
334 | "source": [
335 | "chi_square(Ns, Ni)"
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": 5,
341 | "metadata": {},
342 | "outputs": [
343 | {
344 | "data": {
345 | "text/html": [
346 | "\n",
347 | "\n",
360 | "
\n",
361 | " \n",
362 | " \n",
363 | " | \n",
364 | " Ns | \n",
365 | " Ni | \n",
366 | " A | \n",
367 | " Ns/Ni | \n",
368 | " RhoS | \n",
369 | " RhoI | \n",
370 | " Ages | \n",
371 | " Ages Errors | \n",
372 | "
\n",
373 | " \n",
374 | " \n",
375 | " \n",
376 | " 0 | \n",
377 | " 0 | \n",
378 | " 11 | \n",
379 | " 24 | \n",
380 | " 0.000000 | \n",
381 | " 0.000000e+00 | \n",
382 | " 5.876068e+05 | \n",
383 | " 0.000000 | \n",
384 | " 0.000000 | \n",
385 | "
\n",
386 | " \n",
387 | " 1 | \n",
388 | " 2 | \n",
389 | " 11 | \n",
390 | " 44 | \n",
391 | " 0.181818 | \n",
392 | " 5.827506e+04 | \n",
393 | " 3.205128e+05 | \n",
394 | " 43.278037 | \n",
395 | " 33.277678 | \n",
396 | "
\n",
397 | " \n",
398 | " 2 | \n",
399 | " 18 | \n",
400 | " 28 | \n",
401 | " 32 | \n",
402 | " 0.642857 | \n",
403 | " 7.211538e+05 | \n",
404 | " 1.121795e+06 | \n",
405 | " 151.733746 | \n",
406 | " 45.925582 | \n",
407 | "
\n",
408 | " \n",
409 | " 3 | \n",
410 | " 2 | \n",
411 | " 4 | \n",
412 | " 40 | \n",
413 | " 0.500000 | \n",
414 | " 6.410256e+04 | \n",
415 | " 1.282051e+05 | \n",
416 | " 118.322435 | \n",
417 | " 102.493500 | \n",
418 | "
\n",
419 | " \n",
420 | " 4 | \n",
421 | " 10 | \n",
422 | " 78 | \n",
423 | " 90 | \n",
424 | " 0.128205 | \n",
425 | " 1.424501e+05 | \n",
426 | " 1.111111e+06 | \n",
427 | " 30.546742 | \n",
428 | " 10.275749 | \n",
429 | "
\n",
430 | " \n",
431 | " 5 | \n",
432 | " 3 | \n",
433 | " 22 | \n",
434 | " 30 | \n",
435 | " 0.136364 | \n",
436 | " 1.282051e+05 | \n",
437 | " 9.401709e+05 | \n",
438 | " 32.485736 | \n",
439 | " 20.002575 | \n",
440 | "
\n",
441 | " \n",
442 | " 6 | \n",
443 | " 4 | \n",
444 | " 8 | \n",
445 | " 40 | \n",
446 | " 0.500000 | \n",
447 | " 1.282051e+05 | \n",
448 | " 2.564103e+05 | \n",
449 | " 118.322435 | \n",
450 | " 72.490296 | \n",
451 | "
\n",
452 | " \n",
453 | " 7 | \n",
454 | " 20 | \n",
455 | " 57 | \n",
456 | " 50 | \n",
457 | " 0.350877 | \n",
458 | " 5.128205e+05 | \n",
459 | " 1.461538e+06 | \n",
460 | " 83.259997 | \n",
461 | " 21.693098 | \n",
462 | "
\n",
463 | " \n",
464 | " 8 | \n",
465 | " 52 | \n",
466 | " 129 | \n",
467 | " 20 | \n",
468 | " 0.403101 | \n",
469 | " 3.333333e+06 | \n",
470 | " 8.269231e+06 | \n",
471 | " 95.560756 | \n",
472 | " 15.795966 | \n",
473 | "
\n",
474 | " \n",
475 | " 9 | \n",
476 | " 2 | \n",
477 | " 7 | \n",
478 | " 45 | \n",
479 | " 0.285714 | \n",
480 | " 5.698006e+04 | \n",
481 | " 1.994302e+05 | \n",
482 | " 67.878517 | \n",
483 | " 54.438305 | \n",
484 | "
\n",
485 | " \n",
486 | " 10 | \n",
487 | " 1 | \n",
488 | " 9 | \n",
489 | " 35 | \n",
490 | " 0.111111 | \n",
491 | " 3.663004e+04 | \n",
492 | " 3.296703e+05 | \n",
493 | " 26.482197 | \n",
494 | " 27.918965 | \n",
495 | "
\n",
496 | " \n",
497 | " 11 | \n",
498 | " 6 | \n",
499 | " 16 | \n",
500 | " 50 | \n",
501 | " 0.375000 | \n",
502 | " 1.538462e+05 | \n",
503 | " 4.102564e+05 | \n",
504 | " 88.944807 | \n",
505 | " 42.610715 | \n",
506 | "
\n",
507 | " \n",
508 | " 12 | \n",
509 | " 256 | \n",
510 | " 220 | \n",
511 | " 100 | \n",
512 | " 1.163636 | \n",
513 | " 3.282051e+06 | \n",
514 | " 2.820513e+06 | \n",
515 | " 272.087548 | \n",
516 | " 25.512911 | \n",
517 | "
\n",
518 | " \n",
519 | " 13 | \n",
520 | " 52 | \n",
521 | " 134 | \n",
522 | " 24 | \n",
523 | " 0.388060 | \n",
524 | " 2.777778e+06 | \n",
525 | " 7.158120e+06 | \n",
526 | " 92.020382 | \n",
527 | " 15.130025 | \n",
528 | "
\n",
529 | " \n",
530 | " 14 | \n",
531 | " 3 | \n",
532 | " 11 | \n",
533 | " 35 | \n",
534 | " 0.272727 | \n",
535 | " 1.098901e+05 | \n",
536 | " 4.029304e+05 | \n",
537 | " 64.808586 | \n",
538 | " 42.229298 | \n",
539 | "
\n",
540 | " \n",
541 | " 15 | \n",
542 | " 10 | \n",
543 | " 17 | \n",
544 | " 16 | \n",
545 | " 0.588235 | \n",
546 | " 8.012821e+05 | \n",
547 | " 1.362179e+06 | \n",
548 | " 138.979273 | \n",
549 | " 55.446308 | \n",
550 | "
\n",
551 | " \n",
552 | " 16 | \n",
553 | " 2 | \n",
554 | " 5 | \n",
555 | " 12 | \n",
556 | " 0.400000 | \n",
557 | " 2.136752e+05 | \n",
558 | " 5.341880e+05 | \n",
559 | " 94.831053 | \n",
560 | " 79.360652 | \n",
561 | "
\n",
562 | " \n",
563 | " 17 | \n",
564 | " 7 | \n",
565 | " 23 | \n",
566 | " 40 | \n",
567 | " 0.304348 | \n",
568 | " 2.243590e+05 | \n",
569 | " 7.371795e+05 | \n",
570 | " 72.280648 | \n",
571 | " 31.229590 | \n",
572 | "
\n",
573 | " \n",
574 | " 18 | \n",
575 | " 1 | \n",
576 | " 10 | \n",
577 | " 60 | \n",
578 | " 0.100000 | \n",
579 | " 2.136752e+04 | \n",
580 | " 2.136752e+05 | \n",
581 | " 23.838867 | \n",
582 | " 25.006286 | \n",
583 | "
\n",
584 | " \n",
585 | " 19 | \n",
586 | " 14 | \n",
587 | " 43 | \n",
588 | " 24 | \n",
589 | " 0.325581 | \n",
590 | " 7.478632e+05 | \n",
591 | " 2.297009e+06 | \n",
592 | " 77.293368 | \n",
593 | " 23.826549 | \n",
594 | "
\n",
595 | " \n",
596 | " 20 | \n",
597 | " 15 | \n",
598 | " 44 | \n",
599 | " 49 | \n",
600 | " 0.340909 | \n",
601 | " 3.924647e+05 | \n",
602 | " 1.151230e+06 | \n",
603 | " 80.909437 | \n",
604 | " 24.237007 | \n",
605 | "
\n",
606 | " \n",
607 | " 21 | \n",
608 | " 14 | \n",
609 | " 25 | \n",
610 | " 48 | \n",
611 | " 0.560000 | \n",
612 | " 3.739316e+05 | \n",
613 | " 6.677350e+05 | \n",
614 | " 132.376284 | \n",
615 | " 44.255902 | \n",
616 | "
\n",
617 | " \n",
618 | " 22 | \n",
619 | " 8 | \n",
620 | " 28 | \n",
621 | " 32 | \n",
622 | " 0.285714 | \n",
623 | " 3.205128e+05 | \n",
624 | " 1.121795e+06 | \n",
625 | " 67.878517 | \n",
626 | " 27.240765 | \n",
627 | "
\n",
628 | " \n",
629 | " 23 | \n",
630 | " 22 | \n",
631 | " 69 | \n",
632 | " 45 | \n",
633 | " 0.318841 | \n",
634 | " 6.267806e+05 | \n",
635 | " 1.965812e+06 | \n",
636 | " 75.702451 | \n",
637 | " 18.587672 | \n",
638 | "
\n",
639 | " \n",
640 | " 24 | \n",
641 | " 16 | \n",
642 | " 29 | \n",
643 | " 44 | \n",
644 | " 0.551724 | \n",
645 | " 4.662005e+05 | \n",
646 | " 8.449883e+05 | \n",
647 | " 130.439642 | \n",
648 | " 40.692853 | \n",
649 | "
\n",
650 | " \n",
651 | " 25 | \n",
652 | " 34 | \n",
653 | " 51 | \n",
654 | " 35 | \n",
655 | " 0.666667 | \n",
656 | " 1.245421e+06 | \n",
657 | " 1.868132e+06 | \n",
658 | " 157.285501 | \n",
659 | " 34.944367 | \n",
660 | "
\n",
661 | " \n",
662 | " 26 | \n",
663 | " 14 | \n",
664 | " 56 | \n",
665 | " 40 | \n",
666 | " 0.250000 | \n",
667 | " 4.487179e+05 | \n",
668 | " 1.794872e+06 | \n",
669 | " 59.432686 | \n",
670 | " 17.792752 | \n",
671 | "
\n",
672 | " \n",
673 | " 27 | \n",
674 | " 6 | \n",
675 | " 9 | \n",
676 | " 5 | \n",
677 | " 0.666667 | \n",
678 | " 1.538462e+06 | \n",
679 | " 2.307692e+06 | \n",
680 | " 157.285501 | \n",
681 | " 82.947544 | \n",
682 | "
\n",
683 | " \n",
684 | " 28 | \n",
685 | " 13 | \n",
686 | " 22 | \n",
687 | " 32 | \n",
688 | " 0.590909 | \n",
689 | " 5.208333e+05 | \n",
690 | " 8.814103e+05 | \n",
691 | " 139.604205 | \n",
692 | " 48.904973 | \n",
693 | "
\n",
694 | " \n",
695 | " 29 | \n",
696 | " 127 | \n",
697 | " 213 | \n",
698 | " 28 | \n",
699 | " 0.596244 | \n",
700 | " 5.815018e+06 | \n",
701 | " 9.752747e+06 | \n",
702 | " 140.850956 | \n",
703 | " 16.003452 | \n",
704 | "
\n",
705 | " \n",
706 | "
\n",
707 | "
"
708 | ],
709 | "text/plain": [
710 | " Ns Ni A Ns/Ni RhoS RhoI Ages \\\n",
711 | "0 0 11 24 0.000000 0.000000e+00 5.876068e+05 0.000000 \n",
712 | "1 2 11 44 0.181818 5.827506e+04 3.205128e+05 43.278037 \n",
713 | "2 18 28 32 0.642857 7.211538e+05 1.121795e+06 151.733746 \n",
714 | "3 2 4 40 0.500000 6.410256e+04 1.282051e+05 118.322435 \n",
715 | "4 10 78 90 0.128205 1.424501e+05 1.111111e+06 30.546742 \n",
716 | "5 3 22 30 0.136364 1.282051e+05 9.401709e+05 32.485736 \n",
717 | "6 4 8 40 0.500000 1.282051e+05 2.564103e+05 118.322435 \n",
718 | "7 20 57 50 0.350877 5.128205e+05 1.461538e+06 83.259997 \n",
719 | "8 52 129 20 0.403101 3.333333e+06 8.269231e+06 95.560756 \n",
720 | "9 2 7 45 0.285714 5.698006e+04 1.994302e+05 67.878517 \n",
721 | "10 1 9 35 0.111111 3.663004e+04 3.296703e+05 26.482197 \n",
722 | "11 6 16 50 0.375000 1.538462e+05 4.102564e+05 88.944807 \n",
723 | "12 256 220 100 1.163636 3.282051e+06 2.820513e+06 272.087548 \n",
724 | "13 52 134 24 0.388060 2.777778e+06 7.158120e+06 92.020382 \n",
725 | "14 3 11 35 0.272727 1.098901e+05 4.029304e+05 64.808586 \n",
726 | "15 10 17 16 0.588235 8.012821e+05 1.362179e+06 138.979273 \n",
727 | "16 2 5 12 0.400000 2.136752e+05 5.341880e+05 94.831053 \n",
728 | "17 7 23 40 0.304348 2.243590e+05 7.371795e+05 72.280648 \n",
729 | "18 1 10 60 0.100000 2.136752e+04 2.136752e+05 23.838867 \n",
730 | "19 14 43 24 0.325581 7.478632e+05 2.297009e+06 77.293368 \n",
731 | "20 15 44 49 0.340909 3.924647e+05 1.151230e+06 80.909437 \n",
732 | "21 14 25 48 0.560000 3.739316e+05 6.677350e+05 132.376284 \n",
733 | "22 8 28 32 0.285714 3.205128e+05 1.121795e+06 67.878517 \n",
734 | "23 22 69 45 0.318841 6.267806e+05 1.965812e+06 75.702451 \n",
735 | "24 16 29 44 0.551724 4.662005e+05 8.449883e+05 130.439642 \n",
736 | "25 34 51 35 0.666667 1.245421e+06 1.868132e+06 157.285501 \n",
737 | "26 14 56 40 0.250000 4.487179e+05 1.794872e+06 59.432686 \n",
738 | "27 6 9 5 0.666667 1.538462e+06 2.307692e+06 157.285501 \n",
739 | "28 13 22 32 0.590909 5.208333e+05 8.814103e+05 139.604205 \n",
740 | "29 127 213 28 0.596244 5.815018e+06 9.752747e+06 140.850956 \n",
741 | "\n",
742 | " Ages Errors \n",
743 | "0 0.000000 \n",
744 | "1 33.277678 \n",
745 | "2 45.925582 \n",
746 | "3 102.493500 \n",
747 | "4 10.275749 \n",
748 | "5 20.002575 \n",
749 | "6 72.490296 \n",
750 | "7 21.693098 \n",
751 | "8 15.795966 \n",
752 | "9 54.438305 \n",
753 | "10 27.918965 \n",
754 | "11 42.610715 \n",
755 | "12 25.512911 \n",
756 | "13 15.130025 \n",
757 | "14 42.229298 \n",
758 | "15 55.446308 \n",
759 | "16 79.360652 \n",
760 | "17 31.229590 \n",
761 | "18 25.006286 \n",
762 | "19 23.826549 \n",
763 | "20 24.237007 \n",
764 | "21 44.255902 \n",
765 | "22 27.240765 \n",
766 | "23 18.587672 \n",
767 | "24 40.692853 \n",
768 | "25 34.944367 \n",
769 | "26 17.792752 \n",
770 | "27 82.947544 \n",
771 | "28 48.904973 \n",
772 | "29 16.003452 "
773 | ]
774 | },
775 | "execution_count": 5,
776 | "metadata": {},
777 | "output_type": "execute_result"
778 | }
779 | ],
780 | "source": [
781 | "Gleadow"
782 | ]
783 | },
784 | {
785 | "cell_type": "code",
786 | "execution_count": 6,
787 | "metadata": {},
788 | "outputs": [],
789 | "source": [
790 | "Ns = Gleadow[\"Ns\"]\n",
791 | "Ni = Gleadow[\"Ni\"]"
792 | ]
793 | },
794 | {
795 | "cell_type": "code",
796 | "execution_count": 7,
797 | "metadata": {},
798 | "outputs": [
799 | {
800 | "data": {
801 | "text/plain": [
802 | "0.0"
803 | ]
804 | },
805 | "execution_count": 7,
806 | "metadata": {},
807 | "output_type": "execute_result"
808 | }
809 | ],
810 | "source": [
811 | "chi_square(Ns, Ni)"
812 | ]
813 | }
814 | ],
815 | "metadata": {
816 | "kernelspec": {
817 | "display_name": "Python 3",
818 | "language": "python",
819 | "name": "python3"
820 | },
821 | "language_info": {
822 | "codemirror_mode": {
823 | "name": "ipython",
824 | "version": 3
825 | },
826 | "file_extension": ".py",
827 | "mimetype": "text/x-python",
828 | "name": "python",
829 | "nbconvert_exporter": "python",
830 | "pygments_lexer": "ipython3",
831 | "version": "3.7.4"
832 | }
833 | },
834 | "nbformat": 4,
835 | "nbformat_minor": 2
836 | }
837 |
--------------------------------------------------------------------------------
/docs/examples/Sample_datasets.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Sample Datasets"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import numpy as np\n",
17 | "import pandas as pd\n",
18 | "\n",
19 | "def h5store(filename, df, **kwargs):\n",
20 | " store = pd.HDFStore(filename)\n",
21 | " store.put('mydata', df)\n",
22 | " store.get_storer('mydata').attrs.metadata = kwargs\n",
23 | " store.close()\n",
24 | "\n",
25 | "def h5load(store):\n",
26 | " data = store['mydata']\n",
27 | " metadata = store.get_storer('mydata').attrs.metadata\n",
28 | " return data, metadata"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "# Gleadow Dataset"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 2,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "data = pd.DataFrame()"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 3,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "data[\"Ns\"] = [0, 2, 18, 2, 10, 3, 4, 20, 52, 2, 1, 6, 256, 52, 3, 10, 2, 7, 1, 14, 15, 14, 8, 22, 16, 34, 14, 6, 13, 127]\n",
54 | "data[\"Ni\"] = [11, 11, 28 ,4, 78, 22, 8, 57, 129, 7, 9, 16, 220, 134, 11, 17, 5, 23, 10, 43, 44, 25, 28, 69, 29, 51, 56, 9, 22, 213]\n",
55 | "data[\"A\"] = [24, 44, 32, 40, 90, 30, 40, 50, 20, 45, 35, 50, 100, 24, 35, 16, 12, 40, 60, 24, 49, 48, 32, 45, 44, 35, 40, 5, 32, 28]"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 4,
61 | "metadata": {},
62 | "outputs": [
63 | {
64 | "data": {
65 | "text/html": [
66 | "\n",
67 | "\n",
80 | "
\n",
81 | " \n",
82 | " \n",
83 | " | \n",
84 | " Ns | \n",
85 | " Ni | \n",
86 | " A | \n",
87 | "
\n",
88 | " \n",
89 | " \n",
90 | " \n",
91 | " 0 | \n",
92 | " 0 | \n",
93 | " 11 | \n",
94 | " 24 | \n",
95 | "
\n",
96 | " \n",
97 | " 1 | \n",
98 | " 2 | \n",
99 | " 11 | \n",
100 | " 44 | \n",
101 | "
\n",
102 | " \n",
103 | " 2 | \n",
104 | " 18 | \n",
105 | " 28 | \n",
106 | " 32 | \n",
107 | "
\n",
108 | " \n",
109 | " 3 | \n",
110 | " 2 | \n",
111 | " 4 | \n",
112 | " 40 | \n",
113 | "
\n",
114 | " \n",
115 | " 4 | \n",
116 | " 10 | \n",
117 | " 78 | \n",
118 | " 90 | \n",
119 | "
\n",
120 | " \n",
121 | " 5 | \n",
122 | " 3 | \n",
123 | " 22 | \n",
124 | " 30 | \n",
125 | "
\n",
126 | " \n",
127 | " 6 | \n",
128 | " 4 | \n",
129 | " 8 | \n",
130 | " 40 | \n",
131 | "
\n",
132 | " \n",
133 | " 7 | \n",
134 | " 20 | \n",
135 | " 57 | \n",
136 | " 50 | \n",
137 | "
\n",
138 | " \n",
139 | " 8 | \n",
140 | " 52 | \n",
141 | " 129 | \n",
142 | " 20 | \n",
143 | "
\n",
144 | " \n",
145 | " 9 | \n",
146 | " 2 | \n",
147 | " 7 | \n",
148 | " 45 | \n",
149 | "
\n",
150 | " \n",
151 | " 10 | \n",
152 | " 1 | \n",
153 | " 9 | \n",
154 | " 35 | \n",
155 | "
\n",
156 | " \n",
157 | " 11 | \n",
158 | " 6 | \n",
159 | " 16 | \n",
160 | " 50 | \n",
161 | "
\n",
162 | " \n",
163 | " 12 | \n",
164 | " 256 | \n",
165 | " 220 | \n",
166 | " 100 | \n",
167 | "
\n",
168 | " \n",
169 | " 13 | \n",
170 | " 52 | \n",
171 | " 134 | \n",
172 | " 24 | \n",
173 | "
\n",
174 | " \n",
175 | " 14 | \n",
176 | " 3 | \n",
177 | " 11 | \n",
178 | " 35 | \n",
179 | "
\n",
180 | " \n",
181 | " 15 | \n",
182 | " 10 | \n",
183 | " 17 | \n",
184 | " 16 | \n",
185 | "
\n",
186 | " \n",
187 | " 16 | \n",
188 | " 2 | \n",
189 | " 5 | \n",
190 | " 12 | \n",
191 | "
\n",
192 | " \n",
193 | " 17 | \n",
194 | " 7 | \n",
195 | " 23 | \n",
196 | " 40 | \n",
197 | "
\n",
198 | " \n",
199 | " 18 | \n",
200 | " 1 | \n",
201 | " 10 | \n",
202 | " 60 | \n",
203 | "
\n",
204 | " \n",
205 | " 19 | \n",
206 | " 14 | \n",
207 | " 43 | \n",
208 | " 24 | \n",
209 | "
\n",
210 | " \n",
211 | " 20 | \n",
212 | " 15 | \n",
213 | " 44 | \n",
214 | " 49 | \n",
215 | "
\n",
216 | " \n",
217 | " 21 | \n",
218 | " 14 | \n",
219 | " 25 | \n",
220 | " 48 | \n",
221 | "
\n",
222 | " \n",
223 | " 22 | \n",
224 | " 8 | \n",
225 | " 28 | \n",
226 | " 32 | \n",
227 | "
\n",
228 | " \n",
229 | " 23 | \n",
230 | " 22 | \n",
231 | " 69 | \n",
232 | " 45 | \n",
233 | "
\n",
234 | " \n",
235 | " 24 | \n",
236 | " 16 | \n",
237 | " 29 | \n",
238 | " 44 | \n",
239 | "
\n",
240 | " \n",
241 | " 25 | \n",
242 | " 34 | \n",
243 | " 51 | \n",
244 | " 35 | \n",
245 | "
\n",
246 | " \n",
247 | " 26 | \n",
248 | " 14 | \n",
249 | " 56 | \n",
250 | " 40 | \n",
251 | "
\n",
252 | " \n",
253 | " 27 | \n",
254 | " 6 | \n",
255 | " 9 | \n",
256 | " 5 | \n",
257 | "
\n",
258 | " \n",
259 | " 28 | \n",
260 | " 13 | \n",
261 | " 22 | \n",
262 | " 32 | \n",
263 | "
\n",
264 | " \n",
265 | " 29 | \n",
266 | " 127 | \n",
267 | " 213 | \n",
268 | " 28 | \n",
269 | "
\n",
270 | " \n",
271 | "
\n",
272 | "
"
273 | ],
274 | "text/plain": [
275 | " Ns Ni A\n",
276 | "0 0 11 24\n",
277 | "1 2 11 44\n",
278 | "2 18 28 32\n",
279 | "3 2 4 40\n",
280 | "4 10 78 90\n",
281 | "5 3 22 30\n",
282 | "6 4 8 40\n",
283 | "7 20 57 50\n",
284 | "8 52 129 20\n",
285 | "9 2 7 45\n",
286 | "10 1 9 35\n",
287 | "11 6 16 50\n",
288 | "12 256 220 100\n",
289 | "13 52 134 24\n",
290 | "14 3 11 35\n",
291 | "15 10 17 16\n",
292 | "16 2 5 12\n",
293 | "17 7 23 40\n",
294 | "18 1 10 60\n",
295 | "19 14 43 24\n",
296 | "20 15 44 49\n",
297 | "21 14 25 48\n",
298 | "22 8 28 32\n",
299 | "23 22 69 45\n",
300 | "24 16 29 44\n",
301 | "25 34 51 35\n",
302 | "26 14 56 40\n",
303 | "27 6 9 5\n",
304 | "28 13 22 32\n",
305 | "29 127 213 28"
306 | ]
307 | },
308 | "execution_count": 4,
309 | "metadata": {},
310 | "output_type": "execute_result"
311 | }
312 | ],
313 | "source": [
314 | "data"
315 | ]
316 | },
317 | {
318 | "cell_type": "code",
319 | "execution_count": 5,
320 | "metadata": {},
321 | "outputs": [],
322 | "source": [
323 | "description = \"\"\"Spontaneous and Induced track counts from 30 Apatites grains from the Mahe Granite, Seychelles. Table 3.3 from Galbraith book. Data originally provided by A. Gleadow\"\"\"\n",
324 | "\n",
325 | "metadata = {\"zeta\": 380., \n",
326 | " \"zeta error\": 5.0 / 380., \n",
327 | " \"nd\": 8188, \n",
328 | " \"rhod\": 1.257, \n",
329 | " \"unit area graticule\": 0.78,\n",
330 | " \"Ns\":\"Number of spontaneous tracks\",\n",
331 | " \"Ni\":\"Number of induced tracks in the detector\",\n",
332 | " \"Area\": \"Number of graticules squares\",\n",
333 | " \"description\": \"\"}"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "execution_count": 6,
339 | "metadata": {},
340 | "outputs": [],
341 | "source": [
342 | "h5store(\"Gleadow.h5\", data, **metadata)"
343 | ]
344 | },
345 | {
346 | "cell_type": "markdown",
347 | "metadata": {},
348 | "source": [
349 | "## Miller Dataset"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": 7,
355 | "metadata": {},
356 | "outputs": [],
357 | "source": [
358 | "data = pd.DataFrame()"
359 | ]
360 | },
361 | {
362 | "cell_type": "code",
363 | "execution_count": 8,
364 | "metadata": {},
365 | "outputs": [],
366 | "source": [
367 | "data[\"Ns\"] = [31, 19, 56, 67, 88, 6, 18, 40, 36, 54, 35, 52, 51, 47, 27, 36, 64, 68, 61, 30]\n",
368 | "data[\"Ni\"] = [41, 22, 63, 71, 90, 7, 14, 41, 49, 79, 52, 76, 74, 66, 39, 44, 86, 90, 91, 41]\n",
369 | "data[\"A\"] = [40, 20, 60, 80, 90, 15, 20, 40, 40, 60, 40, 70, 49, 50, 36, 40, 50, 50, 60, 30]"
370 | ]
371 | },
372 | {
373 | "cell_type": "code",
374 | "execution_count": 9,
375 | "metadata": {},
376 | "outputs": [
377 | {
378 | "data": {
379 | "text/html": [
380 | "\n",
381 | "\n",
394 | "
\n",
395 | " \n",
396 | " \n",
397 | " | \n",
398 | " Ns | \n",
399 | " Ni | \n",
400 | " A | \n",
401 | "
\n",
402 | " \n",
403 | " \n",
404 | " \n",
405 | " 0 | \n",
406 | " 31 | \n",
407 | " 41 | \n",
408 | " 40 | \n",
409 | "
\n",
410 | " \n",
411 | " 1 | \n",
412 | " 19 | \n",
413 | " 22 | \n",
414 | " 20 | \n",
415 | "
\n",
416 | " \n",
417 | " 2 | \n",
418 | " 56 | \n",
419 | " 63 | \n",
420 | " 60 | \n",
421 | "
\n",
422 | " \n",
423 | " 3 | \n",
424 | " 67 | \n",
425 | " 71 | \n",
426 | " 80 | \n",
427 | "
\n",
428 | " \n",
429 | " 4 | \n",
430 | " 88 | \n",
431 | " 90 | \n",
432 | " 90 | \n",
433 | "
\n",
434 | " \n",
435 | " 5 | \n",
436 | " 6 | \n",
437 | " 7 | \n",
438 | " 15 | \n",
439 | "
\n",
440 | " \n",
441 | " 6 | \n",
442 | " 18 | \n",
443 | " 14 | \n",
444 | " 20 | \n",
445 | "
\n",
446 | " \n",
447 | " 7 | \n",
448 | " 40 | \n",
449 | " 41 | \n",
450 | " 40 | \n",
451 | "
\n",
452 | " \n",
453 | " 8 | \n",
454 | " 36 | \n",
455 | " 49 | \n",
456 | " 40 | \n",
457 | "
\n",
458 | " \n",
459 | " 9 | \n",
460 | " 54 | \n",
461 | " 79 | \n",
462 | " 60 | \n",
463 | "
\n",
464 | " \n",
465 | " 10 | \n",
466 | " 35 | \n",
467 | " 52 | \n",
468 | " 40 | \n",
469 | "
\n",
470 | " \n",
471 | " 11 | \n",
472 | " 52 | \n",
473 | " 76 | \n",
474 | " 70 | \n",
475 | "
\n",
476 | " \n",
477 | " 12 | \n",
478 | " 51 | \n",
479 | " 74 | \n",
480 | " 49 | \n",
481 | "
\n",
482 | " \n",
483 | " 13 | \n",
484 | " 47 | \n",
485 | " 66 | \n",
486 | " 50 | \n",
487 | "
\n",
488 | " \n",
489 | " 14 | \n",
490 | " 27 | \n",
491 | " 39 | \n",
492 | " 36 | \n",
493 | "
\n",
494 | " \n",
495 | " 15 | \n",
496 | " 36 | \n",
497 | " 44 | \n",
498 | " 40 | \n",
499 | "
\n",
500 | " \n",
501 | " 16 | \n",
502 | " 64 | \n",
503 | " 86 | \n",
504 | " 50 | \n",
505 | "
\n",
506 | " \n",
507 | " 17 | \n",
508 | " 68 | \n",
509 | " 90 | \n",
510 | " 50 | \n",
511 | "
\n",
512 | " \n",
513 | " 18 | \n",
514 | " 61 | \n",
515 | " 91 | \n",
516 | " 60 | \n",
517 | "
\n",
518 | " \n",
519 | " 19 | \n",
520 | " 30 | \n",
521 | " 41 | \n",
522 | " 30 | \n",
523 | "
\n",
524 | " \n",
525 | "
\n",
526 | "
"
527 | ],
528 | "text/plain": [
529 | " Ns Ni A\n",
530 | "0 31 41 40\n",
531 | "1 19 22 20\n",
532 | "2 56 63 60\n",
533 | "3 67 71 80\n",
534 | "4 88 90 90\n",
535 | "5 6 7 15\n",
536 | "6 18 14 20\n",
537 | "7 40 41 40\n",
538 | "8 36 49 40\n",
539 | "9 54 79 60\n",
540 | "10 35 52 40\n",
541 | "11 52 76 70\n",
542 | "12 51 74 49\n",
543 | "13 47 66 50\n",
544 | "14 27 39 36\n",
545 | "15 36 44 40\n",
546 | "16 64 86 50\n",
547 | "17 68 90 50\n",
548 | "18 61 91 60\n",
549 | "19 30 41 30"
550 | ]
551 | },
552 | "execution_count": 9,
553 | "metadata": {},
554 | "output_type": "execute_result"
555 | }
556 | ],
557 | "source": [
558 | "data"
559 | ]
560 | },
561 | {
562 | "cell_type": "code",
563 | "execution_count": 10,
564 | "metadata": {},
565 | "outputs": [],
566 | "source": [
567 | "description = \"\"\"Donald S. Miller, Kevin D. Crowley, Roy K. Dokka, Rex F. Galbraith, Bart J. Kowallis, Charles W. Naeser,\n",
568 | "Results of interlaboratory comparison of fission track ages for 1992 fission track workshop,\n",
569 | "Nuclear Tracks and Radiation Measurements,\n",
570 | "Volume 21, Issue 4,\n",
571 | "1993,\n",
572 | "Pages 565-573,\n",
573 | "ISSN 0969-8078,\n",
574 | "https://doi.org/10.1016/1359-0189(93)90197-H.\n",
575 | "\"\"\"\n",
576 | "\n",
577 | "metadata = {\"zeta\": 350., \n",
578 | " \"zeta error\": 10.0 / 350., \n",
579 | " \"nd\": 2936, \n",
580 | " \"rhod\": 1.304, \n",
581 | " \"unit area graticule\": 0.90,\n",
582 | " \"Ns\":\"Number of spontaneous tracks\",\n",
583 | " \"Ni\":\"Number of induced tracks in the detector\",\n",
584 | " \"Area\": \"Number of graticules squares\",\n",
585 | " \"description\": \"\"}"
586 | ]
587 | },
588 | {
589 | "cell_type": "code",
590 | "execution_count": 11,
591 | "metadata": {},
592 | "outputs": [],
593 | "source": [
594 | "h5store(\"Miller.h5\", data, **metadata)"
595 | ]
596 | }
597 | ],
598 | "metadata": {
599 | "kernelspec": {
600 | "display_name": "Python 3",
601 | "language": "python",
602 | "name": "python3"
603 | },
604 | "language_info": {
605 | "codemirror_mode": {
606 | "name": "ipython",
607 | "version": 3
608 | },
609 | "file_extension": ".py",
610 | "mimetype": "text/x-python",
611 | "name": "python",
612 | "nbconvert_exporter": "python",
613 | "pygments_lexer": "ipython3",
614 | "version": "3.8.3"
615 | }
616 | },
617 | "nbformat": 4,
618 | "nbformat_minor": 4
619 | }
620 |
--------------------------------------------------------------------------------
/docs/images/image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/docs/images/image1.png
--------------------------------------------------------------------------------
/docs/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/docs/images/logo.png
--------------------------------------------------------------------------------
/docs/readthedocs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = pyFTracks
8 | SOURCEDIR = src
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/readthedocs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXPROJ=pyFTRacks
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/readthedocs/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/docs/readthedocs/requirements.txt
--------------------------------------------------------------------------------
/docs/readthedocs/src/Installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 |
--------------------------------------------------------------------------------
/docs/readthedocs/src/UserGuide.rst:
--------------------------------------------------------------------------------
1 | User Guide
2 | ==========
3 |
4 |
--------------------------------------------------------------------------------
/docs/readthedocs/src/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | # import os
16 | # import sys
17 | # sys.path.insert(0, os.path.abspath('.'))
18 |
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 | project = 'pyFTracks'
23 | copyright = '2020, Romain Beucher'
24 | author = 'Romain Beucher'
25 |
26 | # The short X.Y version
27 | version = ''
28 | # The full version, including alpha/beta/rc tags
29 | release = '0.2.7'
30 |
31 |
32 | # -- General configuration ---------------------------------------------------
33 |
34 | # If your documentation needs a minimal Sphinx version, state it here.
35 | #
36 | # needs_sphinx = '1.0'
37 |
38 | # Add any Sphinx extension module names here, as strings. They can be
39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 | # ones.
41 | extensions = [
42 | 'sphinx.ext.autodoc',
43 | 'sphinx.ext.doctest',
44 | 'sphinx.ext.intersphinx',
45 | 'sphinx.ext.todo',
46 | 'sphinx.ext.coverage',
47 | 'sphinx.ext.mathjax',
48 | 'sphinx.ext.ifconfig',
49 | 'sphinx.ext.viewcode',
50 | 'sphinx.ext.githubpages',
51 | ]
52 |
53 | # Add any paths that contain templates here, relative to this directory.
54 | #templates_path = ['_templates']
55 |
56 | source_parsers = {
57 | '.md': 'recommonmark.parser.CommonMarkParser',
58 | }
59 |
60 | # The suffix(es) of source filenames.
61 | # You can specify multiple suffix as a list of string:
62 | #
63 | source_suffix = ['.rst']
64 |
65 | # The master toctree document.
66 | master_doc = 'index'
67 |
68 | # The language for content autogenerated by Sphinx. Refer to documentation
69 | # for a list of supported languages.
70 | #
71 | # This is also used if you do content translation via gettext catalogs.
72 | # Usually you set "language" from the command line for these cases.
73 | language = None
74 |
75 | # List of patterns, relative to source directory, that match files and
76 | # directories to ignore when looking for source files.
77 | # This pattern also affects html_static_path and html_extra_path .
78 | exclude_patterns = []
79 |
80 | # The name of the Pygments (syntax highlighting) style to use.
81 | pygments_style = 'sphinx'
82 |
83 |
84 | # -- Options for HTML output -------------------------------------------------
85 |
86 | # The theme to use for HTML and HTML Help pages. See the documentation for
87 | # a list of builtin themes.
88 | #
89 | import sphinx_rtd_theme
90 |
91 | html_theme = 'sphinx_rtd_theme'
92 |
93 | # Add any paths that contain custom themes here, relative to this directory.
94 | #html_theme_path = []
95 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
96 | html_logo = 'img/logo.png'
97 |
98 | # Theme options are theme-specific and customize the look and feel of a theme
99 | # further. For a list of options available for each theme, see the
100 | # documentation.
101 | html_theme_options = {
102 | 'logo_only': True,
103 | 'display_version': False,
104 | }
105 |
106 | # Add any paths that contain custom static files (such as style sheets) here,
107 | # relative to this directory. They are copied after the builtin static files,
108 | # so a file named "default.css" will overwrite the builtin "default.css".
109 | #html_static_path = ['_static']
110 |
111 | # Custom sidebar templates, must be a dictionary that maps document names
112 | # to template names.
113 | #
114 | # The default sidebars (for documents that don't match any pattern) are
115 | # defined by theme itself. Builtin themes are using these templates by
116 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
117 | # 'searchbox.html']``.
118 | #
119 | #html_sidebars = {}
120 |
121 | # -- Options for HTMLHelp output ---------------------------------------------
122 |
123 | # Output file base name for HTML help builder.
124 | htmlhelp_basename = 'pyFTracksdoc'
125 |
126 |
127 | # -- Options for LaTeX output ------------------------------------------------
128 |
129 | latex_elements = {
130 | # The paper size ('letterpaper' or 'a4paper').
131 | #
132 | # 'papersize': 'letterpaper',
133 |
134 | # The font size ('10pt', '11pt' or '12pt').
135 | #
136 | # 'pointsize': '10pt',
137 |
138 | # Additional stuff for the LaTeX preamble.
139 | #
140 | # 'preamble': '',
141 |
142 | # Latex figure (float) alignment
143 | #
144 | # 'figure_align': 'htbp',
145 | }
146 |
147 | # Grouping the document tree into LaTeX files. List of tuples
148 | # (source start file, target name, title,
149 | # author, documentclass [howto, manual, or own class]).
150 | latex_documents = [
151 | (master_doc, 'pyFTracks.tex', 'pyFTRacks Documentation',
152 | 'Romain Beucher', 'manual'),
153 | ]
154 |
155 |
156 | # -- Options for manual page output ------------------------------------------
157 |
158 | # One entry per manual page. List of tuples
159 | # (source start file, name, description, authors, manual section).
160 | man_pages = [
161 | (master_doc, 'pyFTracks', 'pyFTracks Documentation',
162 | [author], 1)
163 | ]
164 |
165 |
166 | # -- Options for Texinfo output ----------------------------------------------
167 |
168 | # Grouping the document tree into Texinfo files. List of tuples
169 | # (source start file, target name, title, author,
170 | # dir menu entry, description, category)
171 | texinfo_documents = [
172 | (master_doc, 'pyFTracks', 'pyFTracks Documentation',
173 | author, 'pyFTracks', 'Fission Track modelling in Python',
174 | 'Miscellaneous'),
175 | ]
176 |
177 |
178 | # -- Options for Epub output -------------------------------------------------
179 |
180 | # Bibliographic Dublin Core info.
181 | epub_title = project
182 | epub_author = author
183 | epub_publisher = author
184 | epub_copyright = copyright
185 |
186 | # The unique identifier of the text. This can be a ISBN number
187 | # or the project homepage.
188 | #
189 | # epub_identifier = ''
190 |
191 | # A unique identification for the text.
192 | #
193 | # epub_uid = ''
194 |
195 | # A list of files that should not be packed into the epub file.
196 | epub_exclude_files = ['search.html']
197 |
198 |
199 | # -- Extension configuration -------------------------------------------------
200 |
201 | # -- Options for intersphinx extension ---------------------------------------
202 |
203 | # Example configuration for intersphinx: refer to the Python standard library.
204 | intersphinx_mapping = {'https://docs.python.org/': None}
205 |
206 | # -- Options for todo extension ----------------------------------------------
207 |
208 | # If true, `todo` and `todoList` produce output, else they produce nothing.
209 | todo_include_todos = True
210 |
--------------------------------------------------------------------------------
/docs/readthedocs/src/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/docs/readthedocs/src/img/logo.png
--------------------------------------------------------------------------------
/docs/readthedocs/src/index.rst:
--------------------------------------------------------------------------------
1 | pyFTracks: Fission Track Modelling with Python
2 | ==============================================
3 |
4 |
5 | .. toctree::
6 | :maxdepth: 2
7 |
8 | Installation
9 | User Guide
10 |
11 | Examples
12 |
--------------------------------------------------------------------------------
/joss/paper.bib:
--------------------------------------------------------------------------------
1 | @article{Galbraith1988,
2 | author = {Galbraith, R. F.},
3 | doi = {10.2307/1270081},
4 | issn = {00401706},
5 | journal = {Technometrics},
6 | month = {aug},
7 | number = {3},
8 | pages = {271},
9 | title = {{Graphical Display of Estimates Having Differing Standard Errors}},
10 | url = {https://www.jstor.org/stable/1270081?origin=crossref},
11 | volume = {30},
12 | year = {1988}
13 | }
14 |
15 | @article{Galbraith1990,
16 | author = {Galbraith, R.F.},
17 | doi = {10.1016/1359-0189(90)90036-W},
18 | issn = {13590189},
19 | journal = {International Journal of Radiation Applications and Instrumentation. Part D. Nuclear Tracks and Radiation Measurements},
20 | month = {jan},
21 | number = {3},
22 | pages = {207--214},
23 | title = {{The radial plot: Graphical assessment of spread in ages}},
24 | url = {https://linkinghub.elsevier.com/retrieve/pii/135901899090036W},
25 | volume = {17},
26 | year = {1990}
27 | }
28 |
29 |
30 | @article{Carlson1999,
31 | abstract = {Annealing rates for fission tracks in apatite vary markedly as a complex function of composition, based on an experimental study of 15 well-characterized, compositionally diverse apatites. Extensive annealing data were obtained in 69 experiments (durations of 1, 10, 100, and 1000 h at temperatures from 75 to 400 C) on each of four apatites, three with near end-member occupancy of the halogen site by F, Cl, and OH, plus the well-known apatite from Durango, Mexico. These results were supplemented by less-comprehensive annealing data from 12 experiments over the same range of time and temperature on each of the remaining 11 apatites. Measurements of initial fission-track length, a parameter of considerable importance to the derivation of time-temperature paths from fission-track data, reveal substantial variations from one apatite to another; initial lengths are best predicted from etch figures. Interlaboratory comparisons of data on annealing kinetics highlight discrepancies that appear to result largely from differences in the precision and accuracy of experimental temperatures. None of the factors previously proposed as the dominant compositional controls on annealing rates can account completely for annealing behavior over the full range of compositions studied. Nevertheless, relative rates of annealing among all apatites are highly systematic, which allows this data set to be used in its entirety to constrain multikinetic annealing models that predict fission-track lengths as a function of time and temperature.},
32 | author = {Carlson, W D and Donelick, R A and Ketcham, R A},
33 | issn = {0003004X},
34 | journal = {American Mineralogist},
35 | number = {9},
36 | pages = {1213--1223},
37 | pmid = {33143},
38 | publisher = {Mineral Soc America},
39 | title = {{Variability of apatite fission-track annealing kinetics : I . Experimental results}},
40 | url = {http://ammin.geoscienceworld.org/cgi/content/abstract/84/9/1213},
41 | volume = {84},
42 | year = {1999}
43 | }
44 |
45 | @article{article,
46 | author = {Ketcham, Richard and Donelick, Raymond and Carlson, William},
47 | year = {1999},
48 | month = {09},
49 | pages = {1235-1255},
50 | title = {Variability of apatite fission-track annealing kinetics; III, Extrapolation to geological time scales},
51 | volume = {84},
52 | journal = {American Mineralogist},
53 | doi = {10.2138/am-1999-0903}
54 | }
55 |
56 | @article{article,
57 | author = {Ketcham, Richard and Donelick, R.A. and Donelick, M.B.},
58 | year = {2000},
59 | month = {01},
60 | pages = {(electronic)},
61 | title = {AFTSolve: A program for multi-kinetic modeling of apatite fission-track data},
62 | volume = {2},
63 | journal = {Geological Materials Research}
64 | }
65 |
66 | @article{Ketcham2003,
67 | author = {Ketcham, Richard A},
68 | file = {::},
69 | pages = {817--829},
70 | title = {{Observations on the relationship between crystallographic orientation and biasing in apatite fission-track measurements}},
71 | volume = {88},
72 | year = {2003}
73 | }
74 |
75 | @article{Ketcham2005,
76 | abstract = {The systems discussed in this volume broadly share three features: parent isotopes, daughter products, and one or more time-dependent, temperature-sensitive processes by which daughter products are altered or lost. If these processes can be measured in the},
77 | author = {Ketcham, R. A.},
78 | doi = {10.2138/rmg.2005.58.11},
79 | file = {::},
80 | issn = {1529-6466},
81 | journal = {Reviews in Mineralogy and Geochemistry},
82 | mendeley-groups = {HELFRAG{\_}PAPER{\_}2},
83 | month = {jan},
84 | number = {1},
85 | pages = {275--314},
86 | title = {{Forward and Inverse Modeling of Low-Temperature Thermochronometry Data}},
87 | url = {http://rimg.geoscienceworld.org/cgi/content/full/58/1/275?ijkey=d0a048bc12552d0e9196dab003dfa0023adde3f8{\&}keytype2=tf{\_}ipsecsha papers2://publication/uuid/5017D79C-0DE6-496E-B8B9-647A8481E4A0 http://rimg.geoscienceworld.org/cgi/doi/10.2138/rmg.2005.58.11},
88 | volume = {58},
89 | year = {2005}
90 | }
91 |
92 | @article{Ehlers2005,
93 | abstract = {The programs have since been extended to include ( - )/ and 40 Ar/ 39 Ar ages, as well as FT ages. The programs now include modern diffusion data for all of the minerals commonly dated for , ranging from dating of to Ar dating of},
94 | author = {Ehlers, T. A. and Chaudhri, T. and Kumar, S. and Fuller, C. W. and Willett, S. D. and Ketcham, R. A. and Fu, F. Q.},
95 | doi = {10.2138/rmg.2005.58.22},
96 | issn = {1529-6466},
97 | journal = {Reviews in Mineralogy and Geochemistry},
98 | mendeley-groups = {HELFRAG{\_}PAPER{\_}1},
99 | month = {jan},
100 | number = {58},
101 | pages = {589--622},
102 | title = {{Computational tools for low-temperature thermochronometer interpretation}},
103 | url = {http://rimg.geoscienceworld.org/cgi/content/abstract/58/1/589 papers2://publication/uuid/97D57EF7-3CC7-429F-B3B1-EA8DD5A6EE97 http://rimg.geoscienceworld.org/cgi/doi/10.2138/rmg.2005.58.22},
104 | volume = {58},
105 | year = {2005}
106 | }
107 |
108 | @article{Ketcham2007,
109 | author = {Ketcham, R. a. and Carter, a. and Donelick, R. a. and Barbarand, J. and Hurford, a. J.},
110 | doi = {10.2138/am.2007.2280},
111 | file = {::},
112 | issn = {0003-004X},
113 | journal = {American Mineralogist},
114 | keywords = {anisotropy,annealing,apatite,fission-track,thermochronology},
115 | month = {may},
116 | number = {5-6},
117 | pages = {789--798},
118 | title = {{Improved measurement of fission-track annealing in apatite using c-axis projection}},
119 | url = {http://ammin.geoscienceworld.org/cgi/doi/10.2138/am.2007.2280},
120 | volume = {92},
121 | year = {2007}
122 | }
123 |
124 | @article{Gallagher2012,
125 | author = {Gallagher, Kerry},
126 | doi = {10.1029/2011JB008825},
127 | issn = {0148-0227},
128 | journal = {Journal of Geophysical Research},
129 | mendeley-groups = {HELFRAG{\_}PAPER{\_}1},
130 | month = {feb},
131 | number = {B2},
132 | pages = {B02408},
133 | title = {{Transdimensional inverse thermal history modeling for quantitative thermochronology}},
134 | url = {http://www.agu.org/pubs/crossref/2012/2011JB008825.shtml http://doi.wiley.com/10.1029/2011JB008825},
135 | volume = {117},
136 | year = {2012}
137 | }
138 |
--------------------------------------------------------------------------------
/joss/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'pyFTracks: Fission-Track modelling in Python'
3 | tags:
4 | - Python
5 | - geology
6 | - earth sciences
7 | - thermochronology
8 | - fission-track
9 | - geochronology
10 | - geochemistry
11 | - tectonics
12 | authors:
13 | - name: Romain Beucher
14 | orcid: 0000-0003-3891-5444
15 | affiliation: "1"
16 | - name: Roderick Brown
17 | orcid: 0000-0002-0763-3711
18 | affiliation: "2"
19 | - name: Louis Moresi
20 | orcid: 0000-0003-3685-174X
21 | affiliation: "1"
22 | - name: Fabian Kohlmann
23 | orcid:
24 | affiliation: "3"
25 |
26 | affiliations:
27 | - name: Research School of Earth Sciences, The Australian National Univeristy, Canberra, Australia
28 | index: 1
29 | - name: School of Geographical and Earth Sciences, The University of Glasgow, Glasgow, United-Kingdom
30 | index: 2
31 | - name: Lithodat Pty Ltd, Melbourne, Australia
32 | index: 3
33 |
34 | date: 05 Mars 2020
35 | bibliography: paper.bib
36 | ---
37 |
38 | # Summary
39 |
40 | # Audience
41 |
42 | # Acknowledgments
43 |
44 | Romain Beucher is financially supported by AuScope as
45 | part of the Simulation Analysis Modelling platform (SAM).
46 |
47 | # References
48 |
--------------------------------------------------------------------------------
/pyFTracks/.gitignore:
--------------------------------------------------------------------------------
1 | *.c
2 | .ipynb_checkpoints
3 |
--------------------------------------------------------------------------------
/pyFTracks/__init__.py:
--------------------------------------------------------------------------------
1 | from .annealing import Ketcham1999, Ketcham2007
2 | from .structures import Grain, Sample
3 | from .viewer import Viewer
4 | from .age_calculations import calculate_central_age as central_age
5 | from .age_calculations import calculate_pooled_age as pooled_age
6 | from .age_calculations import calculate_ages as single_grain_ages
7 | from .age_calculations import chi_square as chi2_test
8 | from .thermal_history import ThermalHistory
9 |
--------------------------------------------------------------------------------
/pyFTracks/age_calculations.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.stats import chi2
3 |
4 | def chi_square(Ns, Ni):
5 | """ Return $\chi^2_{\text stat}$ value and the associate p-value"""
6 |
7 | NsNi = np.ndarray((len(Ns), 2))
8 | NsNi[:, 0] = Ns
9 | NsNi[:, 1] = Ni
10 |
11 | length = len(Ns)
12 | Ns = sum(Ns)
13 | Ni = sum(Ni)
14 |
15 | X2 = 0.
16 | for Nsj, Nij in NsNi:
17 | X2 += (Nsj*Ni - Nij*Ns)**2 / (Nsj + Nij)
18 |
19 | X2 *= 1.0/(Ns*Ni)
20 | rv = chi2(length - 1)
21 | return 1.0 - rv.cdf(X2)
22 |
23 | def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, Nd, sigma=0.15):
24 | """Function to calculate central age."""
25 |
26 | Ns = np.array(Ns)
27 | Ni = np.array(Ni)
28 |
29 | # We just replace 0 counts with a low value, the age will be rounded to
30 | # 2 decimals. That should take care of the zero count issue.
31 | Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts
32 | Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts
33 |
34 | # Calculate mj
35 | LAMBDA = 1.55125e-4
36 | G = 0.5
37 | m = Ns + Ni
38 | p = Ns / m
39 |
40 | theta = np.sum(Ns) / np.sum(m)
41 |
42 | for i in range(0, 30):
43 | w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2)
44 | sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w))
45 | theta = np.sum(w * p) / np.sum(w)
46 |
47 | t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta))
48 | se = np.sqrt(1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + 1.0 / Nd + (seZeta / zeta)**2) * t
49 |
50 | return {"Central": np.round(t, 2), "se": np.round(se, 2), "sigma": np.round(sigma, 2)}
51 |
52 | def calculate_pooled_age(Ns, Ni, zeta, seZeta, rhod, Nd):
53 |
54 | # We just replace 0 counts with a low value, the age will be rounded to
55 | # 2 decimals. That should take care of the zero count issue.
56 | Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts
57 | Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts
58 |
59 | Ns = np.sum(Ns)
60 | Ni = np.sum(Ni)
61 |
62 | LAMBDA = 1.55125e-4
63 | G = 0.5
64 | t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
65 | se = t * (1.0 / Ns + 1.0 / Ni + 1 / Nd + seZeta**2)**0.5
66 |
67 | return {"Pooled Age": np.round(t, 2), "se": np.round(se, 2)}
68 |
69 | def calculate_ages(Ns, Ni, zeta, seZeta, rhod, Nd):
70 |
71 | # We just replace 0 counts with a low value, the age will be rounded to
72 | # 2 decimals. That should take care of the zero count issue.
73 | Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts
74 | Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts
75 |
76 | Ns = np.array(Ns)
77 | Ni = np.array(Ni)
78 | Nd = np.array(Nd)
79 |
80 | def true_divide(val):
81 | with np.errstate(divide='ignore', invalid='ignore'):
82 | val = np.true_divide(1.0, val)
83 | val[val == np.inf] = 0
84 | return np.nan_to_num(val)
85 |
86 | # Calculate mj
87 | LAMBDA = 1.55125e-4
88 | G = 0.5
89 | t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
90 |
91 | se = (true_divide(Ns) + true_divide(Ni) + 1.0 / Nd + seZeta**2)**0.5
92 | se *= t
93 |
94 | return {"Age(s)": np.round(t, 2), "se(s)": np.round(se, 2)}
95 |
96 |
--------------------------------------------------------------------------------
/pyFTracks/annealing.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .utilities import draw_from_distrib, drawbinom
3 | from .viewer import Viewer
4 | import cython
5 | import numpy as np
6 | cimport numpy as np
7 | from libc.math cimport exp, pow, log
8 | from pyFTracks.structures import Sample
9 |
10 | _MIN_OBS_RCMOD = 0.13
11 |
12 |
13 | cdef struct annealModel:
14 | double c0, c1, c2, c3, a, b
15 |
16 | cdef correct_observational_bias(double rcmod):
17 | """
18 | Does the conversion from length to density for the Ketcham et al., 1999 model.
19 |
20 | The observational bias quantifies the relative probability of observation among different
21 | fission-track populations calculated by the model. Highly annealed populations are less
22 | likely to be detected and measured than less-annealed populations for 2 primary reasons.
23 | - Shorter track are less frequently impinged and thus etched
24 | - At advanced stage of annealing some proportion of tracks at high angles to the c-axis
25 | may be lost altogether, even though lower-angle tracks remain long
26 | Thus the number of detectable tracks in the more annealed population diminishes, at a rate
27 | dispropportionate to measured mean length (Ketcham 2003b). These 2 factors can be approximated
28 | in a general way by using an empirical function that relates measured fission-track length to
29 | fission-track density (e,g. Green 1998). The following is taken from Ketcham et al 2000
30 | """
31 | if (rcmod >= 0.765):
32 | return 1.600 * rcmod - 0.600
33 | # because very short fission tracks are undetectable, they should be eliminated from model results.
34 | # We assumes a minimum detectable length of 2.18 µm, or a reduced length of 0.13,
35 | # the shortest track observed in over 38,000 measurements in the Carlson et al. (1999) data set.
36 | # (Ketcham, 2000)
37 | if (rcmod >= _MIN_OBS_RCMOD):
38 | return 9.205 * rcmod * rcmod - 9.157 * rcmod + 2.269
39 | return 0.0
40 |
41 |
42 | cdef calculate_reduced_stddev(double redLength, int doProject):
43 | """Calculates the reduced standard deviation of a track population length
44 | from the reduced mean length. Based on Carlson and Donelick"""
45 | if doProject:
46 | return(0.1081 - 0.1642 * redLength + 0.1052 * redLength * redLength)
47 | else:
48 | return(0.4572 - 0.8815 * redLength + 0.4947 * redLength * redLength)
49 |
50 |
51 | cdef calculate_mean_reduced_length_ketcham1999(double redLength, int usedCf):
52 | # Californium irradiation of apatite can be a useful technique for increasing the number
53 | # of confined tracks. It will however change the biasing of track detection.
54 | # If it is necessary to calculate the mean rather than c-axis-projected lengths, we
55 | # use the empirical function provided by Ketcham et al 1999.
56 | if usedCf:
57 | return 1.396 * redLength - 0.4017
58 | else:
59 | return -1.499 * redLength * redLength + 4.150 * redLength - 1.656
60 |
61 |
62 | cdef calculate_mean_reduced_length_ketcham2003(double redLength, int usedCf):
63 | # Californium irradiation of apatite can be a useful technique for increasing the number
64 | # of confined tracks. It will however change the biasing of track detection.
65 | # If it is necessary to calculate the mean rather than c-axis-projected lengths, we
66 | # use the empirical function provided by Ketcham et al 2003.
67 | if usedCf:
68 | return -0.4720 + 1.4701 * redLength
69 | else:
70 | return -1.2101 + 3.0864 * redLength - 0.8792 * redLength * redLength
71 |
72 |
73 | _seconds_in_megayears = 31556925974700
74 |
75 | class AnnealingModel():
76 |
77 | def __init__(self, kinetic_parameters: dict, use_projected_track: bool=False,
78 | use_Cf_irradiation: bool =False):
79 |
80 | self.use_projected_track = use_projected_track
81 | self.use_Cf_irradiation = use_Cf_irradiation
82 | self._kinetic_parameters = kinetic_parameters
83 |
84 | @property
85 | def history(self):
86 | return self._history
87 |
88 | @history.setter
89 | def history(self, value):
90 | self._history = value
91 |
92 | @property
93 | def kinetic_parameters(self):
94 | return self._kinetic_parameters
95 |
96 | @kinetic_parameters.setter
97 | def kinetic_parameters(self, value):
98 | self._kinetic_parameters = value
99 |
100 | @property
101 | def rmr0(self):
102 | kinetic_type = list(self.kinetic_parameters.keys())[0]
103 | kinetic_value = self.kinetic_parameters[kinetic_type]
104 | return self._kinetic_conversion[kinetic_type].__func__(kinetic_value)
105 |
106 | def _sum_populations(self, track_l0=16.1, nbins=200):
107 |
108 | cdef double init_length = track_l0
109 | cdef double[::1] time = np.ascontiguousarray(self.history.time)
110 | cdef double[::1] reduced_lengths = np.ascontiguousarray(self.reduced_lengths)
111 | cdef int first_node = self.first_node
112 |
113 | cdef double[::1] pdfAxis = np.zeros((nbins))
114 | cdef double[::1] cdf = np.zeros((nbins))
115 | cdef double[::1] pdf = np.zeros((nbins))
116 | cdef double min_length = 2.15
117 | cdef int project = self.use_projected_track
118 | cdef int usedCf = self.use_Cf_irradiation
119 | cdef int num_points_pdf = nbins
120 | cdef int numTTNodes = time.shape[0]
121 |
122 | cdef int i, j
123 | cdef double weight, rStDev, obsBias, calc, rmLen, z
124 | cdef double wt1, wt2
125 |
126 | cdef double SQRT2PI = 2.50662827463
127 | cdef double U238MYR = 1.55125e-4
128 |
129 | for i in range(num_points_pdf):
130 | pdf[i] = 0.
131 |
132 | for i in range(num_points_pdf):
133 | pdfAxis[i] = (i * 1.0 + 0.5) * 20.0 / num_points_pdf
134 |
135 | wt1 = exp(U238MYR * time[first_node]) / U238MYR
136 |
137 | for j in range(first_node, numTTNodes - 1):
138 |
139 | wt2 = exp(U238MYR * time[j+1]) / U238MYR
140 | weight = wt1 - wt2
141 | wt1 = wt2
142 |
143 | # Californium irradiation of apatite can be a useful technique for increasing the number
144 | # of confined tracks. It will however change the biasing of track detection.
145 | # If it is necessary to calculate the mean rather than c-axis-projected lengths, we
146 | # use the empirical function provided by Ketcham et al 1999.
147 | rmLen = calculate_mean_reduced_length_ketcham1999(reduced_lengths[j], usedCf)
148 |
149 | rStDev = calculate_reduced_stddev(rmLen, project)
150 | obsBias = correct_observational_bias(rmLen)
151 | calc = weight * obsBias / (rStDev * SQRT2PI)
152 |
153 | if rmLen > 0:
154 | for i in range(num_points_pdf):
155 | if pdfAxis[i] >= min_length:
156 | z = (rmLen - pdfAxis[i] / init_length) / rStDev
157 | if z <= 4.:
158 | pdf[i] += calc * exp(-(z*z) / 2.0)
159 |
160 | self.pdf_axis = np.array(pdfAxis)
161 | self.pdf = np.array(pdf)
162 | self.pdf /= self.pdf.sum()
163 | self.cdf = self.pdf.cumsum()
164 | self.MTL = np.sum(self.pdf_axis * self.pdf)
165 | self.STD = np.sqrt(np.sum(self.pdf_axis**2 * self.pdf) - self.MTL**2)
166 |
167 | return self.pdf_axis, self.pdf, self.MTL
168 |
169 | def calculate_age(self, track_l0=16.1, std_length_reduction=0.893):
170 | """ Predict the pooled fission-track age
171 |
172 | We assume that each time step of length dt will contribute dt to the
173 | total fission track age, modified by the amount of track density reduction of
174 | the population in that time step, relative to the age standard.
175 |
176 | The total age is the sum of all contributions
177 |
178 | std_length_reduction: Estimated fission track density reduction in the age standard.
179 | The density reduction in the age standard is calculated using its estimated
180 | track length reduction, using the assumption that density reduction is proportional to
181 | length reduction, and that spontaneaous fission track are initially as long as induced
182 | track.
183 |
184 | If for a fission-track worker the Durango apatite has a measured present day spontaneous
185 | mean track length of 14.47 um, and a mean induced track length of 16.21, them the
186 | estimated length reduction is 14.47/16.21 = 0.893
187 | """
188 |
189 | self.annealing_model()
190 | self._sum_populations(track_l0)
191 |
192 | cdef double[::1] time = np.ascontiguousarray(self.history.time * _seconds_in_megayears )
193 | cdef double[::1] reduced_lengths = np.ascontiguousarray(self.reduced_lengths)
194 | cdef int first_node = self.first_node
195 |
196 | cdef double cstd_length_reduction = std_length_reduction
197 |
198 | cdef double oldest_age
199 | cdef double ft_model_age
200 | cdef double reduced_density
201 |
202 | cdef int node
203 | cdef double midLength
204 | cdef long long secinmyr = _seconds_in_megayears
205 |
206 | cdef int numTTNodes = time.shape[0]
207 |
208 | reduced_density = 0.0
209 | ft_model_age = 0.0
210 | oldest_age = time[first_node] / secinmyr
211 |
212 | for node in range(numTTNodes - 2):
213 | # Take midpoint length as the mean of the endpoints. This is conform to
214 | # Willett (1992) and is also described in the Ketcham 2000 AFTSolve implementation.
215 | midLength = (reduced_lengths[node] + reduced_lengths[node+1]) / 2.0
216 | ft_model_age += correct_observational_bias(midLength) * (time[node] - time[node+1])
217 | reduced_density += correct_observational_bias(midLength)
218 |
219 | ft_model_age += correct_observational_bias(reduced_lengths[numTTNodes - 2]) * (time[node] - time[node+1])
220 | reduced_density += correct_observational_bias(reduced_lengths[numTTNodes - 2])
221 | reduced_density /= cstd_length_reduction * (numTTNodes-2)
222 |
223 | ft_model_age /= cstd_length_reduction * secinmyr
224 |
225 | self.oldest_age = oldest_age
226 | self.ft_model_age = ft_model_age
227 | self.reduced_density = reduced_density
228 |
229 | return self.oldest_age, self.ft_model_age, self.reduced_density
230 |
231 | solve = calculate_age
232 |
233 | def generate_synthetic_counts(self, Nc=30):
234 | """Generate Synthetic AFT data.
235 |
236 | Parameters:
237 | Nc : Number of crystals
238 |
239 | """
240 | rho = self.reduced_density
241 |
242 | # Probability in binomial distribution
243 | prob = rho / (1. + rho)
244 |
245 | # For Nc crystals, generate synthetic Ns and Ni
246 | # count data using binomial distribution, conditional
247 | # on total counts Ns + Ni, sampled randomly with
248 | # a maximum of 100.
249 |
250 | NsNi = np.random.randint(5, 100, Nc)
251 | Ns = np.array([drawbinom(I, prob) for I in NsNi])
252 | Ni = NsNi - Ns
253 | return Ns, Ni
254 |
255 | def generate_synthetic_lengths(self, ntl=100):
256 | tls = draw_from_distrib(self.pdf_axis, self.pdf, ntl)
257 | return tls
258 |
259 | def generate_synthetic_sample(self, counts=30, ntl=100):
260 | tls = self.generate_synthetic_lengths(ntl)
261 | Ns, Ni = self.generate_synthetic_counts(counts)
262 | A = np.random.randint(10, 100, Ns.size)
263 | data = {"Ns": Ns, "Ni": Ni, "A": A}
264 | sample = Sample(data)
265 | sample.track_lengths = tls
266 | sample.pooled_age = self.ft_model_age
267 | return sample
268 |
269 |
270 |
271 | class Ketcham1999(AnnealingModel):
272 |
273 | @staticmethod
274 | def convert_Dpar_to_rmr0(dpar):
275 | if dpar <= 1.75:
276 | return 0.84
277 | elif dpar >= 4.58:
278 | return 0.
279 | else:
280 | return 1.0 - np.exp(0.647 * (dpar - 1.75) - 1.834)
281 |
282 | @staticmethod
283 | def convert_Cl_pfu_to_rmr0(clpfu):
284 | value = np.abs(clpfu - 1.0)
285 | if value <= 0.130:
286 | return 0.0
287 | else:
288 | return 1.0 - np.exp(2.107 * (1.0 - value) - 1.834)
289 |
290 | @staticmethod
291 | def convert_Cl_weight_pct(clwpct):
292 | clwpct *= 0.2978
293 | return Ketcham1999.convert_Cl_pfu_to_rmr0(clwpct)
294 |
295 | @staticmethod
296 | def convert_OH_pfu_to_rmr0(ohpfu):
297 | value = np.abs(ohpfu - 1.0)
298 | return 0.84 * (1.0 - (1.0 - value)**4.5)
299 |
300 | _kinetic_conversion = {"ETCH_PIT_LENGTH": convert_Dpar_to_rmr0,
301 | "CL_PFU": convert_Cl_pfu_to_rmr0,
302 | "OH_PFU": convert_OH_pfu_to_rmr0,
303 | "RMR0": lambda x: x}
304 |
305 | def __init__(self, kinetic_parameters: dict, use_projected_track: bool =False,
306 | use_Cf_irradiation: bool =False):
307 |
308 | super(Ketcham1999, self).__init__(
309 | kinetic_parameters,
310 | use_projected_track,
311 | use_Cf_irradiation)
312 |
313 | def annealing_model(self):
314 |
315 | # Must be in seconds (do conversion)
316 | cdef double[::1] time = np.ascontiguousarray(self.history.time * _seconds_in_megayears)
317 | # Must be in Kelvin
318 | cdef double[::1] temperature = np.ascontiguousarray(self.history.temperature)
319 | cdef int numTTnodes = time.shape[0]
320 | cdef double[::1] reduced_lengths = np.zeros(time.shape[0] - 1)
321 | cdef double crmr0 = self.rmr0
322 | cdef int first_node = 0
323 |
324 | cdef int node, nodeB
325 | cdef double equivTime
326 | cdef double timeInt, x1, x2, x3
327 | cdef double equivTotAnnLen
328 | cdef double k
329 | cdef double calc
330 | cdef double tempCalc
331 | cdef double MIN_OBS_RCMOD = _MIN_OBS_RCMOD
332 |
333 | # Fanning Curvilinear Model lcMod FC, See Ketcham 1999, Table 5e
334 | # The preferred equation presented in Ketcham et al 1999, describes the apatite
335 | # B2 from the Carlson et al 1999 data set. The Apatite, which is a chlor-hydroxy apatite from
336 | # Norway, showed the most resistance to annealing """
337 | cdef annealModel modKetch99 = annealModel(
338 | c0=-19.844,
339 | c1=0.38951,
340 | c2=-51.253,
341 | c3=-7.6423,
342 | a=-0.12327,
343 | b=-11.988)
344 |
345 | k = 1 - crmr0
346 |
347 | equivTotAnnLen = pow(MIN_OBS_RCMOD, 1.0 / k) * (1.0 - crmr0) + crmr0
348 |
349 | equivTime = 0.
350 | tempCalc = log(1.0 / ((temperature[numTTnodes - 2] + temperature[numTTnodes - 1]) / 2.0))
351 |
352 | for node in range(numTTnodes - 2, -1, -1):
353 | # We calculate the modeled reduced length (length normalized by
354 | # initial length of a fission track parallel to the c-axis (Donelick 1999))
355 | # after an isothermal annealing episode at a temperature T (Kelvin) of
356 | # duration t (seconds)
357 | timeInt = time[node] - time[node + 1] + equivTime
358 | x1 = (log(timeInt) - modKetch99.c2) / (tempCalc - modKetch99.c3)
359 | x2 = 1.0 + modKetch99.a * (modKetch99.c0 + modKetch99.c1 * x1)
360 |
361 | if x2 <= 0:
362 | reduced_lengths[node] = 0.
363 | else:
364 | reduced_lengths[node] = pow(x2, 1.0 / modKetch99.a)
365 | x3 = 1.0 - modKetch99.b * reduced_lengths[node]
366 | if x3 <= 0:
367 | reduced_lengths[node] = 0.0
368 | else:
369 | reduced_lengths[node] = pow(x3, 1.0 / modKetch99.b)
370 |
371 | if reduced_lengths[node] < equivTotAnnLen:
372 | reduced_lengths[node] = 0.
373 |
374 | # Check to see if we've reached the end of the length distribution
375 | # If so, we then do the kinetic conversion.
376 | if reduced_lengths[node] == 0.0 or node == 0:
377 | if node > 0:
378 | node += 1
379 | first_node = node
380 |
381 | for nodeB in range(first_node, numTTnodes - 1):
382 | if reduced_lengths[nodeB] < crmr0:
383 | reduced_lengths[nodeB] = 0.0
384 | first_node = nodeB
385 | else:
386 | # This is equation 8 from Ketcham et al, 1999
387 | # Apatite with the composition of B2 are very rare, B2 is
388 | # significantly more resistant than the most common variety, near
389 | # end member fluorapatite.
390 | # Ketcham 1999 showed that the reduced length of any apatite could
391 | # be related to the length of an apatite that is relatively more resistant
392 | # (hence use of B2)
393 | reduced_lengths[nodeB] = pow((reduced_lengths[nodeB] - crmr0) / (1.0 - crmr0), k)
394 | break
395 |
396 | # Update tiq for this time step
397 | if reduced_lengths[node] < 0.999:
398 | tempCalc = log(1.0 / ((temperature[node-1] + temperature[node]) / 2.0))
399 | equivTime = pow((1.0 - pow(reduced_lengths[node], modKetch99.b)) / modKetch99.b, modKetch99.a)
400 | equivTime = ((equivTime - 1.0) / modKetch99.a - modKetch99.c0) / modKetch99.c1
401 | equivTime = exp(equivTime * (tempCalc - modKetch99.c3) + modKetch99.c2)
402 |
403 | self.reduced_lengths = np.array(reduced_lengths)
404 | self.first_node = first_node
405 | return self.reduced_lengths, self.first_node
406 |
407 |
408 | class Ketcham2007(AnnealingModel):
409 |
410 | @staticmethod
411 | def convert_Dpar_to_rmr0(dpar, etchant="5.5HNO3"):
412 | """ Here depends on the etchant (5.5 or 5.0 HNO3)
413 | This is based on the relation between the fitted rmr0 values and
414 | the Dpar etched using a 5.5M etchant as published in
415 | Ketcham et al, 2007,Figure 6b
416 | We use the linear conversion defined in Ketcham et al 2007 to
417 | make sure that we are using 5.5M DPar"""
418 | if etchant == "5.0HNO3":
419 | dpar = 0.9231 * dpar + 0.2515
420 | if dpar <= 1.75:
421 | return 0.84
422 | elif dpar >= 4.58:
423 | return 0
424 | else:
425 | return 0.84 * ((4.58 - dpar) / 2.98)**0.21
426 |
427 | @staticmethod
428 | def convert_Cl_pfu_to_rmr0(clpfu):
429 | """ Relation between fitted rmr0 value from the fanning curvilinear model and
430 | Cl content is taken from Ketcham et al 2007 Figure 6a """
431 | value = np.abs(clpfu - 1.0)
432 | if value <= 0.130:
433 | return 0.0
434 | else:
435 | return 0.83 * ((value - 0.13) / 0.87)**0.23
436 |
437 | @staticmethod
438 | def convert_Cl_weight_pct(clwpct):
439 | # Convert %wt to APFU
440 | return Ketcham2007.convert_Cl_pfu_to_rmr0(clwpct * 0.2978)
441 |
442 |
443 | @staticmethod
444 | def convert_unit_paramA_to_rmr0(paramA):
445 | if paramA >= 9.51:
446 | return 0.0
447 | else:
448 | return 0.84 * ((9.509 - paramA) / 0.162)**0.175
449 |
450 | _kinetic_conversion = {"ETCH_PIT_LENGTH": convert_Dpar_to_rmr0,
451 | "CL_PFU": convert_Cl_pfu_to_rmr0,
452 | "RMR0": lambda x: x}
453 |
454 | def __init__(self, kinetic_parameters: bool, use_projected_track: bool =False,
455 | use_Cf_irradiation: bool=False):
456 |
457 | super(Ketcham2007, self).__init__(
458 | kinetic_parameters,
459 | use_projected_track,
460 | use_Cf_irradiation)
461 |
462 | def annealing_model(self):
463 | cdef double[::1] time = np.ascontiguousarray(self.history.time * _seconds_in_megayears)
464 | cdef double[::1] temperature = np.ascontiguousarray(self.history.temperature)
465 | cdef int numTTnodes = time.shape[0]
466 | cdef double[::1] reduced_lengths = np.zeros(time.shape[0] - 1)
467 | cdef double crmr0 = self.rmr0
468 | cdef int first_node = 0
469 |
470 | cdef int node, nodeB
471 | cdef double equivTime
472 | cdef double timeInt, x1, x2
473 | cdef double equivTotAnnLen
474 | cdef double k
475 | cdef double calc
476 | cdef double tempCalc
477 | cdef double MIN_OBS_RCMOD = _MIN_OBS_RCMOD
478 |
479 | cdef annealModel modKetch07 = annealModel(
480 | c0=0.39528,
481 | c1=0.01073,
482 | c2=-65.12969,
483 | c3=-7.91715,
484 | a=0.04672,
485 | b=0)
486 |
487 | k = 1.04 - crmr0
488 |
489 | equivTotAnnLen = pow(MIN_OBS_RCMOD, 1.0 / k) * (1.0 - crmr0) + crmr0
490 |
491 | equivTime = 0.
492 | tempCalc = log(1.0 / ((temperature[numTTnodes - 2] + temperature[numTTnodes - 1]) / 2.0))
493 |
494 | for node in range(numTTnodes - 2, -1, -1):
495 | timeInt = time[node] - time[node + 1] + equivTime
496 | x1 = (log(timeInt) - modKetch07.c2) / (tempCalc - modKetch07.c3)
497 | x2 = pow(modKetch07.c0 + modKetch07.c1 * x1, 1.0 / modKetch07.a) + 1.0
498 | reduced_lengths[node] = 1.0 / x2
499 |
500 | if reduced_lengths[node] < equivTotAnnLen:
501 | reduced_lengths[node] = 0.
502 |
503 | # Check to see if we've reached the end of the length distribution
504 | # If so, we then do the kinetic conversion.
505 | if reduced_lengths[node] == 0.0 or node == 0:
506 | if node > 0:
507 | node += 1
508 | first_node = node
509 |
510 | for nodeB in range(first_node, numTTnodes - 1):
511 | if reduced_lengths[nodeB] < crmr0:
512 | reduced_lengths[nodeB] = 0.0
513 | first_node = nodeB
514 | else:
515 | # This is equation 8 from Ketcham et al, 1999
516 | reduced_lengths[nodeB] = pow((reduced_lengths[nodeB] - crmr0) / (1.0 - crmr0), k)
517 | break
518 |
519 | # Update tiq for this time step
520 | if reduced_lengths[node] < 0.999:
521 | tempCalc = log(1.0 / ((temperature[node-1] + temperature[node]) / 2.0))
522 | equivTime = pow(1.0 / reduced_lengths[node] - 1.0, modKetch07.a)
523 | equivTime = (equivTime - modKetch07.c0) / modKetch07.c1
524 | equivTime = exp(equivTime * (tempCalc - modKetch07.c3) + modKetch07.c2)
525 |
526 | self.reduced_lengths = np.array(reduced_lengths)
527 | self.first_node = first_node
528 | return self.reduced_lengths, self.first_node
--------------------------------------------------------------------------------
/pyFTracks/path_generators.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pyFTracks.thermal_history import ThermalHistory
3 | import scipy
4 |
5 | class MonteCarloPathGenerator(object):
6 |
7 | def __init__(self, time_range, temperature_range, npaths=1000, inbetween_points=2):
8 |
9 | time_range = np.array(time_range)
10 | temperature_range = np.array(temperature_range)
11 |
12 | # If time is not increasing, reverse arrays
13 | if not np.all(np.diff(time_range) > 0):
14 | time_range = time_range[::-1]
15 |
16 | if np.any(temperature_range < 273.):
17 | print("It looks like you have entered temperature in Celsius...Converting temperature to Kelvin")
18 | temperature_range = temperature_range + 273.15
19 |
20 | self.time_range = np.array(time_range)
21 | self.fact_time = self.time_range[-1]
22 | self.time_range = self.time_range / self.fact_time
23 |
24 | self.temperature_range = np.array(temperature_range)
25 | self.fact_temperature = np.diff(self.temperature_range)
26 | self.temperature_range = (self.temperature_range - 273.15) / self.fact_temperature
27 |
28 | self.inbetween_points = self.n = inbetween_points
29 | self.npaths = npaths
30 |
31 | self.constraints = []
32 |
33 | self.add_constraint({'time': (0., 0.), 'temperature': (0., 20.)})
34 | self.TTPaths = None
35 | self._annealing_model = None
36 | self.goodness_of_fit_values = None
37 |
38 | @property
39 | def annealing_model(self):
40 | return self._annealing_model
41 |
42 | @annealing_model.setter
43 | def annealing_model(self, value):
44 | self._annealing_model = value
45 |
46 | def add_constraint(self, constraint):
47 |
48 | def convert_time(time):
49 | time = np.array(time)
50 | if np.all(time < 0.):
51 | time = time[::-1]
52 | return time
53 |
54 | def convert_temperature(temperature):
55 | temperature = np.array(temperature)
56 | if np.any(temperature < 273):
57 | temperature = temperature + 273.15
58 | return temperature
59 |
60 | if isinstance(constraint, list):
61 | self.constraints += constraint
62 | for item in constraint:
63 | item["time"] = convert_time(item["time"])
64 | item["temperature"] = convert_temperature(item["temperature"])
65 | else:
66 | constraint["time"] = convert_time(constraint["time"])
67 | constraint["temperature"] = convert_temperature(constraint["temperature"])
68 | self.constraints.append(constraint)
69 | return self.constraints
70 |
71 | def clear_constraints(self):
72 | self.constraints = []
73 |
74 | def generate_paths(self):
75 |
76 | nconstraints = len(self.constraints)
77 | npoints = nconstraints * (1 + (2**self.n - 1))
78 |
79 | time = np.random.rand(self.npaths, npoints)
80 | time = (1.0 - time)
81 | # Final time is always present time
82 | time[:, -1] = 0.
83 |
84 | for index, constrain in enumerate(self.constraints):
85 | constrain_time = constrain['time'] / self.fact_time
86 | mask = ~np.any((time >= min(constrain_time)) & (time <= max(constrain_time)), axis=1)
87 | time[mask, index] = np.random.rand(np.count_nonzero(mask),) * (max(constrain_time) - min(constrain_time)) + min(constrain_time)
88 |
89 | time = np.sort(time, axis=1)
90 |
91 | temperature = np.random.rand(self.npaths, npoints)
92 |
93 | for index, constrain in enumerate(self.constraints):
94 | constrain_temp = (constrain['temperature'] - 273.15) / self.fact_temperature
95 | constrain_time = constrain['time'] / self.fact_time
96 | i, j = np.where((time >= min(constrain_time)) & (time <= max(constrain_time)))
97 | shape = i.shape[0]
98 | temperature[i, j] = np.random.rand(shape,) * (max(constrain_temp) - min(constrain_temp)) + min(constrain_temp)
99 |
100 | self.TTPaths = np.ndarray((self.npaths, npoints, 2))
101 | self.TTPaths[:, :, 0] = time * self.fact_time
102 | self.TTPaths[:, :, 1] = temperature * self.fact_temperature + 273.15
103 | return self.TTPaths
104 |
105 | def run(self, measured_lengths, measured_age, measured_age_error):
106 |
107 | if not self.annealing_model:
108 | raise ValueError("""Please provide an Annealing Model""")
109 |
110 | self.goodness_of_fit_values = []
111 |
112 | for path in self.TTPaths:
113 | time, temperature = path[:, 0], path[:, 1]
114 | history = ThermalHistory(time, temperature)
115 | self.annealing_model.history = history
116 | self.annealing_model.calculate_age()
117 | self.goodness_of_fit_values.append(self.merit_function(measured_lengths, measured_age, measured_age_error))
118 |
119 | # sort TTPaths
120 | self.goodness_of_fit_values = np.array(self.goodness_of_fit_values)
121 | self.TTPaths = self.TTPaths[np.argsort(self.goodness_of_fit_values)][::-1]
122 | self.goodness_of_fit_values = self.goodness_of_fit_values[np.argsort(self.goodness_of_fit_values)][::-1]
123 |
124 | return
125 |
126 | def merit_function(self, measured_lengths, age=None, age_error=None):
127 | # We first evaluated goodness of fit for the track length distribution using
128 | # a Kolmogorov-Smirnov test.
129 | # The test relies on 2 parameters:
130 | # 1) The maximum separation between 2 cumulative distribution functions which represent the
131 | # measured and modelled track length.
132 | # 2) The number of measuruments
133 |
134 | # The result of the test is the probability that a set of samples taken randomly
135 | # from the known modelled distribution would have a greater maximum separation from it on
136 | # a cdf plot than is observed for the sample distribution being tested.
137 |
138 | # The number of tracks counted is the statistical constrain on how well-defined the fission
139 | # track length distribution is, we assume that the model distribution is completely known and
140 | # we test the measured distribution against it
141 |
142 | # A K-S probability of 0.05 means that, if N random samples were taken from the distribution
143 | # described by the calculation result, where N is the number of FT length actually measured, there would be a 5% chance that
144 | # the resulting distribution would have a greater maximum separation from the model on a cdf plot than
145 | # is observed between the data and the model.
146 | KS_test_lengths = scipy.stats.rv_discrete(values=(self.annealing_model.pdf_axis, self.annealing_model.pdf))
147 | KS_test_lengths = scipy.stats.kstest(measured_lengths, KS_test_lengths.cdf)[1]
148 |
149 | # Now do the age
150 | norm = scipy.stats.norm()
151 | value = (self.annealing_model.ft_model_age - age) / age_error
152 | KS_test_age = 1.0 - scipy.stats.kstest(np.array([value]), norm.cdf)[1]
153 | return min(KS_test_age, KS_test_lengths)
154 |
155 |
156 | def plot_paths(self, new=False):
157 |
158 | import matplotlib.pyplot as plt
159 | from matplotlib.collections import LineCollection
160 | from matplotlib.patches import Rectangle
161 | from matplotlib.collections import PatchCollection
162 | import matplotlib as mpl
163 |
164 | #Create a new colormap
165 | cmap = mpl.colors.ListedColormap(["pink", "green", "grey"])
166 | bounds = [0., 0.05, 0.5, 1.0]
167 | norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
168 |
169 | if self.goodness_of_fit_values is not None and not new:
170 | colors = cmap(norm(self.goodness_of_fit_values))
171 | else:
172 | colors = "grey"
173 |
174 | if new:
175 | self.generate_paths()
176 | fig, ax = plt.gcf(), plt.gca()
177 | ax.set_xlim(self.TTPaths[:, :, 0].max(), self.TTPaths[:, :, 0].min())
178 | ax.set_ylim(self.TTPaths[:, :, 1].max(), self.TTPaths[:, :, 1].min())
179 |
180 | lines = LineCollection(self.TTPaths, linestyle='solid', colors=colors)
181 | ax.add_collection(lines)
182 |
183 | patches = []
184 |
185 | for constrain in self.constraints:
186 | dx = abs(constrain["time"][1] - constrain["time"][0])
187 | dy = abs(constrain["temperature"][1] - constrain["temperature"][0])
188 | x = constrain["time"][0]
189 | y = constrain["temperature"][0]
190 | patches.append(Rectangle([x, y], dx, dy))
191 |
192 | rectangles = PatchCollection(patches, color="red", facecolor='none', zorder=20)
193 | ax.add_collection(rectangles)
194 |
195 | ax.set_title('Time Temperature Paths')
196 | ax.set_xlabel('Time in Myr')
197 | ax.set_ylabel('Tempeature in Kelvins')
198 | return ax
--------------------------------------------------------------------------------
/pyFTracks/radialplot/FTradialplot.py:
--------------------------------------------------------------------------------
1 | from matplotlib.axes import Axes
2 | from matplotlib.projections import register_projection
3 | from matplotlib.patches import Arc
4 | from matplotlib import collections as mc
5 | import numpy as np
6 | import math
7 | import matplotlib.pyplot as plt
8 | from matplotlib.ticker import LinearLocator
9 | from matplotlib.ticker import MaxNLocator
10 | from .radialplot import ZAxis, Radialplot
11 | from mpl_toolkits.axes_grid1 import make_axes_locatable
12 | from mpl_toolkits.axes_grid1.inset_locator import inset_axes
13 | from .age_calculations import calculate_central_age, calculate_pooled_age, calculate_ages
14 |
15 | LAMBDA = 1.55125e-4
16 | G = 0.5
17 |
18 | class ZAxisFT(ZAxis):
19 |
20 | def _add_radial_axis(self):
21 | # Get min and max angle
22 |
23 | theta1 = self.ax._t2axis_angle(self.zlim[0])
24 | theta2 = self.ax._t2axis_angle(self.zlim[1])
25 |
26 | # The circle is always centered around 0.
27 | # Width and height are equals (circle)
28 | # Here the easiest is probably to use axis coordinates. The Arc
29 | # is always centered at (0.,0.) and
30 |
31 | height = width = 2.0 * self.radius
32 | arc_element = Arc(
33 | (0, 0.5), width, height, angle=0., theta1=theta1,
34 | theta2=theta2, linewidth=1, zorder=0, color="k",
35 | transform=self.ax.transAxes)
36 |
37 | self.ax.add_patch(arc_element)
38 |
39 | # Add ticks
40 | self.ticks()
41 | self.labels()
42 | self.set_zlabel("Age Estimates (Myr)")
43 | self.add_values_indicators()
44 |
45 | def _get_radial_ticks_z(self):
46 | # Let's build the ticks of the Age axis
47 | za = self.ticks_locator()
48 | zr = self.ax._t2z(np.array(za)) - self.ax.z0
49 | return za
50 |
51 | def labels(self):
52 | # text label
53 | ticks = self.ticks_locator()
54 | angles = np.array([self.ax._t2axis_angle(val) for val in ticks])
55 | x = 1.02 * self.radius * np.cos(np.deg2rad(angles))
56 | y = 1.02 * self.radius * np.sin(np.deg2rad(angles)) + 0.5
57 |
58 | for idx, val in enumerate(ticks):
59 | self.ax.text(x[idx], y[idx], str(val), transform=self.ax.transAxes)
60 |
61 | def ticks(self):
62 |
63 | ticks = self.ticks_locator()
64 | angles = np.array([self.ax._t2axis_angle(val) for val in ticks])
65 | starts = np.ndarray((len(angles), 2))
66 | ends = np.ndarray((len(angles), 2))
67 | starts[:,0] = self.radius * np.cos(np.deg2rad(angles))
68 | starts[:,1] = self.radius * np.sin(np.deg2rad(angles)) + 0.5
69 | ends[:,0] = 1.01 * self.radius * np.cos(np.deg2rad(angles))
70 | ends[:,1] = 1.01 * self.radius * np.sin(np.deg2rad(angles)) + 0.5
71 |
72 | segments = np.stack((starts, ends), axis=1)
73 | lc = mc.LineCollection(segments, colors='k', linewidths=1, transform=self.ax.transAxes)
74 | self.ax.add_collection(lc)
75 |
76 | def ticks_locator(self, ticks=None):
77 | if not ticks:
78 | ages = self.ax._z2t(self.ax.z)
79 | start, end = np.int(np.rint(min(ages))), np.int(np.rint(max(ages)))
80 | loc = MaxNLocator()
81 | ticks = loc.tick_values(start, end)
82 | return ticks
83 |
84 | class FTRadialplot(Radialplot):
85 |
86 | """A RadiaPlot for fission track counts
87 |
88 | Returns:
89 | FTRadialPlot: Radialplot
90 | """
91 |
92 | name = "fission_track_radialplot"
93 |
94 | def radialplot(self, Ns, Ni, zeta, zeta_err, rhod, rhod_err,
95 | Dpars=None, name="unknown",
96 | transform="logarithmic", **kwargs):
97 |
98 | self.Ns = np.array(Ns)
99 | self.Ni = np.array(Ni)
100 | Ns = self.Ns[(self.Ns > 0) & (self.Ni > 0)]
101 | Ni = self.Ni[(self.Ns > 0) & (self.Ni > 0)]
102 | self.Ns = Ns
103 | self.Ni = Ni
104 | # Zeta and Zeta err have units of 10e-6 cm2
105 | self.zeta = zeta
106 | self.zeta_err = zeta_err
107 | self.rhod = rhod
108 | self.rhod_err = rhod_err
109 | self.Dpars = Dpars
110 | self.name = name
111 | self.transform = transform
112 |
113 | # Prepare the plot Area
114 | # Left spine
115 | self.set_ylim(-8, 8)
116 | self.set_yticks([-2, -1, 0, 1, 2])
117 | self.spines["left"].set_bounds(-2, 2)
118 | self.yaxis.set_ticks_position('left')
119 |
120 | self.set_xlim()
121 | self.set_xticks()
122 |
123 | self.spines["top"].set_visible(False)
124 | self.spines["right"].set_visible(False)
125 |
126 | im=self.scatter(self.x, self.y, c=Dpars, cmap="YlOrRd", **kwargs)
127 | if Dpars:
128 | divider = make_axes_locatable(self)
129 | if self.transform == "logarithmic":
130 | divider = make_axes_locatable(self.taxis)
131 | cax = divider.new_vertical(size="5%", pad=0.8, pack_start=True)
132 | self.figure.add_axes(cax)
133 | self.figure.colorbar(im, cax=cax, orientation="horizontal", label=r'Dpars ($\mu$m)')
134 |
135 | self._add_sigma_lines()
136 | self._add_shaded_area()
137 | self._add_central_line()
138 | self._add_stats()
139 |
140 | self.zaxis = ZAxisFT(self)
141 | self.zaxis._add_radial_axis()
142 |
143 | # Apply some default labels:
144 | self.set_ylabel("Standardised estimate y")
145 |
146 | def _second_axis(self):
147 |
148 | def tick_function(x):
149 | with np.errstate(divide='ignore'):
150 | v = 1./ x
151 | return ["{0}%".format(int(val*100)) if val != np.inf else "" for val in v]
152 |
153 | twin_axis = self.twiny()
154 | twin_axis.set_xlim(self.get_xlim())
155 |
156 | loc = MaxNLocator(5)
157 | ticks = loc.tick_values(0, self.max_x)
158 | twin_axis.spines["bottom"].set_bounds(ticks[0], ticks[-1])
159 |
160 | twin_axis.xaxis.set_ticks_position("bottom")
161 | twin_axis.xaxis.set_label_position("bottom")
162 | twin_axis.tick_params(axis="x", direction="in", pad=-15)
163 | twin_axis.spines["bottom"].set_position(("axes", 0.))
164 | twin_axis.set_frame_on(True)
165 | twin_axis.patch.set_visible(False)
166 | for key, sp in twin_axis.spines.items():
167 | sp.set_visible(False)
168 | twin_axis.spines["bottom"].set_visible(True)
169 |
170 | twin_axis.set_xticks(ticks)
171 | twin_axis.set_xticklabels(tick_function(ticks))
172 | twin_axis.set_xlabel(r'$\sigma / t$', labelpad=-30)
173 |
174 | self.taxis = twin_axis
175 | return
176 |
177 | def set_xticks(self, ticks=None):
178 | if ticks:
179 | super(Radialplot, self).set_xticks(ticks)
180 | else:
181 | if self.transform == "linear":
182 | loc = MaxNLocator(5)
183 | ticks = loc.tick_values(0., self.max_x)
184 | ticks2 = loc.tick_values(min(self.sez), max(self.sez))
185 | ticks2 = ticks2[::-1]
186 | ticks2[-1] = min(self.sez)
187 | super(Radialplot, self).set_xticks(1.0 / ticks2)
188 | labels = [str(int(val)) for val in ticks2]
189 | self.xaxis.set_ticklabels(labels)
190 | self.spines["bottom"].set_bounds(0., 1. / ticks2[-1])
191 | self.set_xlabel(r'$\sigma$ (Myr)')
192 | elif self.transform == "logarithmic":
193 | loc = MaxNLocator(5)
194 | ticks = loc.tick_values(0., self.max_x)
195 | super(Radialplot, self).set_xticks(ticks)
196 | self.spines["bottom"].set_bounds(ticks[0], ticks[-1])
197 | self.set_xlabel(r'$t / \sigma$')
198 | self._second_axis()
199 | elif self.transform == "arcsine":
200 | loc = MaxNLocator(5)
201 | ticks = loc.tick_values(0., self.max_x)
202 | super(Radialplot, self).set_xticks(ticks)
203 | labels = [str(int(val**2/4.0)) for val in ticks]
204 | self.xaxis.set_ticklabels(labels)
205 | self.spines["bottom"].set_bounds(ticks[0], ticks[-1])
206 | self.set_xlabel("Ns + Ni")
207 |
208 | @property
209 | def z(self):
210 | """ Return transformed z-values"""
211 | if self.transform == "linear":
212 | return 1.0 / LAMBDA * np.log(1.0 + G * self.zeta * LAMBDA * self.rhod * (self.Ns / self.Ni))
213 |
214 | if self.transform == "logarithmic":
215 | return np.log(G * self.zeta * LAMBDA * self.rhod * (self.Ns / self.Ni))
216 |
217 | if self.transform == "arcsine":
218 | return np.arcsin(np.sqrt((self.Ns + 3.0/8.0) / (self.Ns + self.Ni + 3.0 / 4.0)))
219 |
220 | @property
221 | def sez(self):
222 | """Return standard errors"""
223 |
224 | if self.transform == "linear":
225 | return self.z * np.sqrt( 1.0 / self.Ns + 1.0 / self.Ni)
226 |
227 | if self.transform == "logarithmic":
228 | return np.sqrt(1.0 / self.Ns + 1.0 / self.Ni)
229 |
230 | if self.transform == "arcsine":
231 | return 1.0 / (2.0 * np.sqrt(self.Ns + self.Ni))
232 |
233 | @property
234 | def z0(self):
235 | """ Return central age"""
236 |
237 | if self.transform == "linear":
238 | return np.sum(self.z / self.sez**2) / np.sum(1 / self.sez**2)
239 |
240 | if self.transform == "logarithmic":
241 | totalNs = np.sum(self.Ns)
242 | totalNi = np.sum(self.Ni)
243 | return np.log(G * self.zeta * LAMBDA * self.rhod * (totalNs / totalNi))
244 |
245 | if self.transform == "arcsine":
246 | return np.arcsin(np.sqrt(np.sum(self.Ns) / np.sum(self.Ns + self.Ni)))
247 |
248 | def _z2t(self, z):
249 |
250 | if self.transform == "linear":
251 | t = z
252 | return t
253 | elif self.transform == "logarithmic":
254 | NsNi = np.exp(z) / (self.zeta * G * LAMBDA * self.rhod)
255 | elif self.transform == "arcsine":
256 | NsNi = np.sin(z)**2 / (1.0 - np.sin(z)**2)
257 |
258 | t = 1.0 / LAMBDA * np.log(1.0 + G * self.zeta * LAMBDA * self.rhod * (NsNi))
259 | return t
260 |
261 | def _t2z(self, t):
262 |
263 | if t == 0:
264 | return 0
265 |
266 | if self.transform == "linear":
267 | return t
268 | elif self.transform == "logarithmic":
269 | return np.log(np.exp(LAMBDA * t) - 1)
270 | elif self.transform == "arcsine":
271 | return np.arcsin(
272 | 1.0 / np.sqrt(
273 | 1.0 + LAMBDA * self.zeta * G * self.rhod / (np.exp(LAMBDA * t) - 1.0)
274 | )
275 | )
276 |
277 | def get_central_age(self):
278 | data = calculate_central_age(self.Ns, self.Ni, self.zeta, self.zeta_err, self.rhod, self.rhod_err)
279 | self.central_age = data["Central"]
280 | self.central_age_error = data["se"]
281 | return data
282 |
283 | def get_pooled_age(self):
284 | data = calculate_pooled_age(self.Ns, self.Ni, self.zeta, self.zeta_err, self.rhod, self.rhod_err)
285 | self.pooled_age = data["Pooled Age"]
286 | self.pooled_age_error = data["se"]
287 | return data
288 |
289 | def get_single_ages(self):
290 | data = calculate_ages(self.Ns, self.Ni, self.zeta, self.zeta_err, self.rhod, self.rhod_err)
291 | return data
292 |
293 | def _add_stats(self):
294 |
295 | self.get_central_age()
296 | self.get_pooled_age()
297 | data = self.get_single_ages()
298 |
299 | self.mean_age = np.mean(data["Age(s)"])
300 | self.mean_age_error = np.mean(data["se(s)"])
301 |
302 | text = "{name} (n={n}) \n".format(name=self.name, n=len(self.Ns))
303 | text += "Central Age = {central_age:5.2f} +/- {central_age_error:5.2f} (1$\sigma$) \n".format(
304 | central_age=self.central_age, central_age_error=self.central_age_error
305 | )
306 | text += "Pooled Age = {pooled_age:5.2f} +/- {pooled_age_error:5.2f} (1$\sigma$) \n".format(
307 | pooled_age=self.pooled_age, pooled_age_error=self.pooled_age_error
308 | )
309 | text += "Mean Age = {mean_age:5.2f} +/- {mean_age_error:5.2f} (1$\sigma$) \n".format(
310 | mean_age=self.mean_age, mean_age_error=self.mean_age_error
311 | )
312 | text += "Dispersion = {dispersion} % \n".format(dispersion=0.)
313 | text += "P($\chi^2$) = {chi2}".format(chi2=0.)
314 | self.text(0., 0.95, text,
315 | horizontalalignment="left", verticalalignment="top",
316 | transform=self.transAxes)
317 | return
318 |
319 | register_projection(FTRadialplot)
320 |
321 | def radialplot(Ns=None, Ni=None, zeta=None, zeta_err=0., rhod=None, rhod_err=0., file=None,
322 | Dpars=None, name="unknown", transform="logarithmic", **kwargs):
323 | """Plot Fission Track counts using a RadialPlot (Galbraith Plot)
324 |
325 | Args:
326 | Ns (list or numpy array, optional):
327 | Spontaneous counts.
328 | Defaults to None.
329 | Ni (list or numpy array, optional):
330 | Induced counts.
331 | Defaults to None.
332 | zeta (float, optional):
333 | Zeta calibration parameter.
334 | Defaults to None.
335 | zeta_err (float, optional):
336 | Uncertainty on Zeta calibration parameter.
337 | Defaults to 0.
338 | rhod (float, optional):
339 | Rhod calibration parameter.
340 | Defaults to None.
341 | rhod_err (float, optional):
342 | Uncertainty on Rhod calibration parameter.
343 | Defaults to None.
344 | file (string, optional):
345 | Data File, for now pyRadialPlot only accepts
346 | file format similar to RadialPlotter.
347 | Defaults to None.
348 | Dpars (list or numpy array or float, optional):
349 | Dpars values associated with the grain counts.
350 | Defaults to None.
351 | transform (str, optional):
352 | Transformation used.
353 | Options are "linear", "logarithmic", "arcsine".
354 | Defaults to "logarithmic".
355 | kwargs: Matplotlib additional parameters.
356 |
357 | Returns:
358 | matplotlib.Axes:
359 | A Matplotlib Axes object.
360 | """
361 |
362 | fig = plt.figure(figsize=(6,6))
363 | if file:
364 | from .utilities import read_radialplotter_file
365 | data = read_radialplotter_file(file)
366 | Ns = data["Ns"]
367 | Ni = data["Ni"]
368 | zeta = data["zeta"]
369 | zeta_err = data["zeta_err"]
370 | rhod = data["rhod"]
371 | rhod_err = data["rhod_err"]
372 | if Dpars:
373 | Dpars = data["dpars"]
374 |
375 | if not Dpars and not "color" in kwargs.keys():
376 | kwargs["color"] = "black"
377 |
378 | ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection="fission_track_radialplot")
379 | ax.radialplot(Ns, Ni, zeta, zeta_err, rhod, rhod_err, Dpars, name=name,
380 | transform=transform, **kwargs)
381 | return ax
382 |
--------------------------------------------------------------------------------
/pyFTracks/radialplot/__init__.py:
--------------------------------------------------------------------------------
1 | from .radialplot import general_radial
2 | from .FTradialplot import radialplot
3 | from .utilities import read_radialplotter_file
4 |
--------------------------------------------------------------------------------
/pyFTracks/radialplot/age_calculations.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.stats import chi2
3 |
4 | def chi_square(Ns, Ni):
5 | """ Return $\chi^2_{\text stat}$ value and the associate p-value"""
6 |
7 | NsNi = np.ndarray((len(Ns), 2))
8 | NsNi[:, 0] = Ns
9 | NsNi[:, 1] = Ni
10 |
11 | length = len(Ns)
12 | Ns = sum(Ns)
13 | Ni = sum(Ni)
14 |
15 | X2 = 0.
16 | for Nsj, Nij in NsNi:
17 | X2 += (Nsj*Ni - Nij*Ns)**2 / (Nsj + Nij)
18 |
19 | X2 *= 1.0/(Ns*Ni)
20 | rv = chi2(length - 1)
21 | return 1.0 - rv.cdf(X2)
22 |
23 | def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, rhod_err, sigma=0.15):
24 | """Function to calculate central age."""
25 |
26 | Ns = np.array(Ns)
27 | Ni = np.array(Ni)
28 |
29 | # Calculate mj
30 | LAMBDA = 1.55125e-4
31 | G = 0.5
32 | m = Ns + Ni
33 | p = Ns / m
34 |
35 | theta = np.sum(Ns) / np.sum(m)
36 |
37 | for i in range(0, 30):
38 | w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2)
39 | sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w))
40 | theta = np.sum(w * p) / np.sum(w)
41 |
42 | t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta))
43 | se = t * (1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
44 |
45 | return {"Central": t, "se": se, "sigma": sigma}
46 |
47 | def calculate_pooled_age(Ns, Ni, zeta, seZeta, rhod, rhod_err):
48 |
49 | Ns = np.sum(Ns)
50 | Ni = np.sum(Ni)
51 |
52 | LAMBDA = 1.55125e-4
53 | G = 0.5
54 | t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
55 | se = t * (1.0 / Ns + 1.0 / Ni + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
56 |
57 | return {"Pooled Age": t, "se": se}
58 |
59 | def calculate_ages(Ns, Ni, zeta, seZeta, rhod, rhod_err):
60 |
61 | Ns = np.array(Ns)
62 | Ni = np.array(Ni)
63 |
64 | # Calculate mj
65 | LAMBDA = 1.55125e-4
66 | G = 0.5
67 | t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
68 | se = t * (1.0 / Ns + 1.0 / Ni + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
69 |
70 | return {"Age(s)": t, "se(s)": se}
71 |
72 |
--------------------------------------------------------------------------------
/pyFTracks/radialplot/radialplot.py:
--------------------------------------------------------------------------------
1 | from matplotlib.axes import Axes
2 | from matplotlib.projections import register_projection
3 | from matplotlib.patches import Arc
4 | from matplotlib import collections as mc
5 | import numpy as np
6 | import math
7 | import matplotlib.pyplot as plt
8 | from matplotlib.ticker import LinearLocator
9 | from matplotlib.ticker import MaxNLocator, AutoLocator
10 | from matplotlib.patches import Polygon
11 |
12 | class ZAxis(object):
13 |
14 | def __init__(self, ax):
15 | self.ax = ax
16 | self.radius = 0.9
17 |
18 | @property
19 | def zlim(self):
20 | ticks = self.ticks_locator()
21 | return (min(ticks), max(ticks))
22 |
23 | def _add_radial_axis(self):
24 | # Get min and max angle
25 |
26 | theta1 = self.ax._t2axis_angle(self.zlim[0])
27 | theta2 = self.ax._t2axis_angle(self.zlim[1])
28 |
29 | # The circle is always centered around 0.
30 | # Width and height are equals (circle)
31 | # Here the easiest is probably to use axis coordinates. The Arc
32 | # is always centered at (0.,0.) and
33 |
34 | height = width = 2.0 * self.radius
35 | arc_element = Arc(
36 | (0, 0.5), width, height, angle=0., theta1=theta1,
37 | theta2=theta2, linewidth=1, zorder=0, color="k",
38 | transform=self.ax.transAxes)
39 |
40 | self.ax.add_patch(arc_element)
41 |
42 | # Add ticks
43 | self.ticks()
44 | self.labels()
45 | self.set_zlabel("Estimates a.u")
46 | self.add_values_indicators()
47 |
48 | def _get_radial_ticks_z(self):
49 | # Let's build the ticks of the Age axis
50 | za = self.ticks_locator()
51 | zr = self.ax._t2z(np.array(za)) - self.ax.z0
52 | return za
53 |
54 | def ticks_locator(self, ticks=None):
55 | if not ticks:
56 | ages = self.ax._z2t(self.ax.z)
57 | start, end = np.min(ages), np.max(ages)
58 | loc = MaxNLocator()
59 | ticks = loc.tick_values(start, end)
60 | return ticks
61 |
62 | def set_zlabel(self, label):
63 | self.ax.text(1.05, 0.5, label, rotation=-90,
64 | horizontalalignment="center", verticalalignment="center",
65 | transform=self.ax.transAxes)
66 | return
67 |
68 | def labels(self):
69 | # text label
70 | ticks = self.ticks_locator()
71 | angles = np.array([self.ax._t2axis_angle(val) for val in ticks])
72 | x = 1.02 * self.radius * np.cos(np.deg2rad(angles))
73 | y = 1.02 * self.radius * np.sin(np.deg2rad(angles)) + 0.5
74 |
75 | for idx, val in enumerate(ticks):
76 | self.ax.text(x[idx], y[idx], "{0:5.1f}".format(val), transform=self.ax.transAxes)
77 |
78 | def ticks(self):
79 |
80 | ticks = self.ticks_locator()
81 | angles = np.array([self.ax._t2axis_angle(val) for val in ticks])
82 | starts = np.ndarray((len(angles), 2))
83 | ends = np.ndarray((len(angles), 2))
84 | starts[:,0] = self.radius * np.cos(np.deg2rad(angles))
85 | starts[:,1] = self.radius * np.sin(np.deg2rad(angles)) + 0.5
86 | ends[:,0] = 1.01 * self.radius * np.cos(np.deg2rad(angles))
87 | ends[:,1] = 1.01 * self.radius * np.sin(np.deg2rad(angles)) + 0.5
88 |
89 | segments = np.stack((starts, ends), axis=1)
90 | lc = mc.LineCollection(segments, colors='k', linewidths=1, transform=self.ax.transAxes)
91 | self.ax.add_collection(lc)
92 |
93 | def add_values_indicators(self):
94 | coords = np.ndarray((self.ax.x.size, 2))
95 | coords[:,0] = self.ax.x
96 | coords[:,1] = self.ax.y
97 | axis_to_data = self.ax.transAxes + self.ax.transData.inverted()
98 | data_to_axis = axis_to_data.inverted()
99 | coords = data_to_axis.transform(coords)
100 | angles = np.arctan((coords[:,1] - 0.5) / coords[:,0])
101 | starts = np.ndarray((len(angles), 2))
102 | ends = np.ndarray((len(angles), 2))
103 |
104 | starts[:,0] = (self.radius - 0.02) * np.cos(angles)
105 | starts[:,1] = (self.radius - 0.02) * np.sin(angles) + 0.5
106 | ends[:,0] = (self.radius - 0.01) * np.cos(angles)
107 | ends[:,1] = (self.radius - 0.01) * np.sin(angles) + 0.5
108 |
109 | segments = np.stack((starts, ends), axis=1)
110 | lc = mc.LineCollection(segments, colors='k', linewidths=2, transform=self.ax.transAxes)
111 | self.ax.add_collection(lc)
112 |
113 |
114 | class Radialplot(Axes):
115 |
116 | name = "radialplot"
117 |
118 | @property
119 | def x(self):
120 | return 1.0 / self.sez
121 |
122 | @property
123 | def y(self):
124 | return (self.z - self.z0) / self.sez
125 |
126 | @property
127 | def max_x(self):
128 | return np.max(self.x)
129 |
130 | @property
131 | def min_x(self):
132 | return np.min(self.x)
133 |
134 | @property
135 | def max_y(self):
136 | return np.max(self.y)
137 |
138 | @property
139 | def min_y(self):
140 | return np.min(self.y)
141 |
142 | def set_xlim(self, left=None, right=None, **kwargs):
143 | if left is not None and right is not None:
144 | super(Radialplot, self).set_xlim(left=left, right=1.25 * right, **kwargs)
145 | else:
146 | super(Radialplot, self).set_xlim(left=0, right=1.25 * self.max_x, **kwargs)
147 |
148 | def set_xticks(self, ticks=None):
149 | if ticks:
150 | super(Radialplot, self).set_xticks(ticks)
151 | else:
152 | if self.transform == "linear":
153 | loc = AutoLocator()
154 | ticks = loc.tick_values(0., self.max_x)
155 | ticks2 = loc.tick_values(min(self.sez), max(self.sez))
156 | ticks2 = ticks2[::-1]
157 | ticks2[-1] = min(self.sez)
158 | super(Radialplot, self).set_xticks(1.0 / ticks2)
159 | labels = ["{0:5.1}".format(val) for val in ticks2]
160 | self.xaxis.set_ticklabels(labels)
161 | self.spines["bottom"].set_bounds(0., 1. / ticks2[-1])
162 | self.set_xlabel(r'$\sigma$')
163 | else:
164 | loc = MaxNLocator(5)
165 | ticks = loc.tick_values(0., self.max_x)
166 | super(Radialplot, self).set_xticks(ticks)
167 | self.spines["bottom"].set_bounds(ticks[0], ticks[-1])
168 |
169 | def _rz2xy(self, r, z):
170 | # Calculate the coordinates of a point given by a radial distance
171 | # and a z-value (i.e. a slope)
172 | slope = (z - self.z0)
173 | x = 1 / np.sqrt(1 / r**2 + slope**2 / r**2)
174 | y = slope * x
175 | return x, y
176 |
177 | def radialplot(self, estimates, standard_errors, name="unknown", transform="linear", **kwargs):
178 | self._z = np.array(estimates)
179 | self._sez = np.array(standard_errors)
180 | self.name = name
181 | self.transform = transform
182 |
183 | # Prepare the plot Area
184 | # Left spine
185 | self.set_ylim(-8, 8)
186 | self.set_yticks([-2, -1, 0, 1, 2])
187 | self.spines["left"].set_bounds(-2, 2)
188 | self.yaxis.set_ticks_position('left')
189 |
190 | self.set_xlim()
191 | self.set_xticks()
192 |
193 | self.spines["top"].set_visible(False)
194 | self.spines["right"].set_visible(False)
195 | im = self.scatter(self.x, self.y, **kwargs)
196 | self._add_sigma_lines()
197 | self._add_shaded_area()
198 | self._add_central_line()
199 | self._add_stats()
200 |
201 | self.zaxis = ZAxis(self)
202 | self.zaxis._add_radial_axis()
203 |
204 | # Apply some default labels:
205 | self.set_ylabel("Standardised estimate y")
206 |
207 | @property
208 | def z(self):
209 | if self.transform == "linear":
210 | return self._z
211 | if self.transform == "logarithmic":
212 | return np.log(self._z)
213 | if self.transform =="sqrt":
214 | return np.sqrt(self._z)
215 | else:
216 | raise NotImplementedError("""This transformation is not implemented""")
217 |
218 | @property
219 | def sez(self):
220 | if self.transform == "linear":
221 | return self._sez
222 | elif self.transform == "logarithmic":
223 | return self._sez / self._z
224 | elif self.transform == "sqrt":
225 | return 0.5 * self._sez / self.z
226 | else:
227 | raise NotImplementedError("""This transformation is not implemented""")
228 |
229 | @property
230 | def z0(self):
231 | return np.mean(self.z)
232 |
233 | def _z2t(self, z):
234 | if self.transform == "linear":
235 | return z
236 | elif self.transform == "logarithmic":
237 | return np.exp(z)
238 | elif self.transform == "sqrt":
239 | return z**2
240 |
241 | def _t2z(self, t):
242 | if self.transform == "linear":
243 | return t
244 | elif self.transform == "logarithmic":
245 | return np.log(t)
246 | elif self.transform == "sqrt":
247 | return np.sqrt(t)
248 |
249 | def _t2axis_angle(self, t):
250 | axis_to_data = self.transAxes + self.transData.inverted()
251 | data_to_axis = axis_to_data.inverted()
252 | x, y = self._rz2xy(1.0, self._t2z(t))
253 | x, y = data_to_axis.transform((x, y))
254 | y -= 0.5
255 | return np.rad2deg(np.arctan(y / x))
256 |
257 | def plot_line(self, angle, origin=(0.,0.), **kwargs):
258 | axis_to_data = self.transAxes + self.transData.inverted()
259 | data_to_axis = axis_to_data.inverted()
260 | x1, y1 = data_to_axis.transform(origin)
261 | x2 = 0.89 * np.cos(np.deg2rad(angle))
262 | y2 = 0.89 * np.sin(np.deg2rad(angle)) + y1
263 | self.plot((x1, x2), (y1, y2), transform=self.transAxes, **kwargs)
264 | return
265 |
266 | def _add_sigma_lines(self):
267 | self.plot_line(0., (0., 2.0), color="k", zorder=1)
268 | self.plot_line(0., (0., -2.0), color="k", zorder=1)
269 | return
270 |
271 | def _add_shaded_area(self):
272 | axis_to_data = self.transAxes + self.transData.inverted()
273 | x, y = axis_to_data.transform((0.89, 0.))
274 | coords = np.ndarray((4,2))
275 | coords[0,:] = np.array([0., 2.0])
276 | coords[1,:] = np.array([x, 2.0])
277 | coords[2,:] = np.array([x, -2.0])
278 | coords[3,:] = np.array([0., -2.0])
279 | p = Polygon(coords, closed=True, color="silver", zorder=0)
280 | self.add_patch(p)
281 |
282 | def _add_central_line(self):
283 | self.plot_line(0., (0., 0.), linestyle="--", color="k", zorder=1)
284 |
285 | def _add_stats(self):
286 |
287 | text = "{name} (n={n}) \n".format(name=self.name, n=len(self._z))
288 | text += "Central value = {central_value:5.2f} +/- {central_value_error:5.2f} (1$\sigma$) \n".format(
289 | central_value=self.z0, central_value_error=0.
290 | )
291 | text += "Dispersion = {dispersion} % \n".format(dispersion=0.)
292 | text += "P($\chi^2$) = {chi2}".format(chi2=0.)
293 | self.text(0., 0.95, text,
294 | horizontalalignment="left", verticalalignment="top",
295 | transform=self.transAxes)
296 | return
297 |
298 | register_projection(Radialplot)
299 |
300 | def general_radial(file=None, estimates=None, standard_errors=None, transform="linear", **kwargs):
301 |
302 | fig = plt.figure(figsize=(6,6))
303 |
304 | if not "color" in kwargs.keys():
305 | kwargs["color"] = "black"
306 |
307 | if file:
308 | from .utilities import read_radialplotter_file
309 | data = read_radialplotter_file(file)
310 | estimates = data["Estimates"]
311 | standard_errors = data["Standard Errors"]
312 |
313 | ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection="radialplot")
314 | ax.radialplot(estimates, standard_errors, transform=transform, **kwargs)
315 | return ax
316 |
--------------------------------------------------------------------------------
/pyFTracks/radialplot/utilities.py:
--------------------------------------------------------------------------------
1 |
2 | import csv
3 |
4 | def fission_track_parser(filename):
5 | """ Parser P. Vermeesh RadialPlotter csv file
6 |
7 | returns: Ns, Ni, dpars as python lists"""
8 |
9 | with open(filename, "r") as f:
10 | file = csv.reader(f)
11 | name = next(file)
12 | line = next(file)
13 | zeta, zeta_err = float(line[0]), float(line[1])
14 | line = next(file)
15 | rhod, rhod_err = float(line[0]), int(float(line[1]))
16 |
17 | Ns = []
18 | Ni = []
19 | dpars = []
20 |
21 | for line in file:
22 | Ns.append(int(line[0]))
23 | Ni.append(int(line[1]))
24 | if len(line) > 2:
25 | dpars.append(float(line[2]))
26 |
27 | return {"Ns": Ns,
28 | "Ni": Ni,
29 | "zeta": zeta,
30 | "zeta_err": zeta_err,
31 | "rhod": rhod,
32 | "rhod_err": rhod_err,
33 | "dpars": dpars}
34 |
35 | def read_radialplotter_file(filename):
36 | """ Parser P. Vermeesh RadialPlotter csv file"""
37 |
38 | with open(filename, "r") as f:
39 | file = csv.reader(f)
40 | name = next(file)
41 |
42 | if name[1] == "F":
43 | return fission_track_parser(filename)
44 | else:
45 | return generic_parser(filename)
46 |
47 | def generic_parser(filename):
48 | """ Parser P. Vermeesh RadialPlotter csv file"""
49 |
50 | with open(filename, "r") as f:
51 | file = csv.reader(f)
52 | name = next(file)
53 |
54 | estimates = []
55 | standard_errors = []
56 |
57 | for line in file:
58 | estimates.append(float(line[0]))
59 | standard_errors.append(float(line[1]))
60 |
61 | return {"Estimates": estimates,
62 | "Standard Errors": standard_errors}
63 |
--------------------------------------------------------------------------------
/pyFTracks/ressources/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.pyc
--------------------------------------------------------------------------------
/pyFTracks/ressources/Gleadow.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/pyFTracks/ressources/Gleadow.h5
--------------------------------------------------------------------------------
/pyFTracks/ressources/Miller.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/pyFTracks/ressources/Miller.h5
--------------------------------------------------------------------------------
/pyFTracks/ressources/__init__.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pathlib import Path
3 |
4 | #Miller1995 = pd.read_hdf((Path(__file__).parent / "Miller1995.h5"), "data")
5 | #Gleadow = pd.read_hdf((Path(__file__).parent / "Gleadow.h5"), "data")
6 |
7 | from pyFTracks import Sample
8 |
9 | Miller = Sample().read_from_hdf5(Path(__file__).parent / "Miller.h5")
10 | Gleadow = Sample().read_from_hdf5(Path(__file__).parent / "Gleadow.h5")
11 |
--------------------------------------------------------------------------------
/pyFTracks/ressources/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/underworldcode/pyFTracks/6050a4327616ebca7ab932b609b25c7c4e6a62f8/pyFTracks/ressources/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/pyFTracks/structures.py:
--------------------------------------------------------------------------------
1 | from itertools import count
2 | import numpy as np
3 | from collections import OrderedDict
4 | from pandas import DataFrame, Series
5 | from .radialplot import radialplot
6 | import pandas as pd
7 | from .utilities import read_mtx_file, h5load, h5store
8 | from .age_calculations import calculate_ages
9 | from .age_calculations import calculate_pooled_age
10 | from .age_calculations import calculate_central_age
11 | from .age_calculations import chi_square
12 |
13 | unprojected_coefs = {"ETCH_PIT_LENGTH": {"m": 0.283, "b": 15.63},
14 | "CL_PFU": {"m": 0.544, "b": 16.18},
15 | "OH_PFU": {"m": 0.0, "b": 16.18},
16 | "CL_WT_PCT": {"m": 0.13824, "b": 16.288}}
17 |
18 | projected_coefs = {"ETCH_PIT_LENGTH": {"m": 0.205, "b": 16.10},
19 | "CL_PFU": {"m": 0.407, "b": 16.49},
20 | "OH_PFU": {"m": 0.000, "b": 16.57},
21 | "CL_WT_PCT": {"m": 0.17317, "b": 16.495}}
22 |
23 |
24 | class Grain(Series):
25 |
26 | _metadata = ['_track_lengths', 'track_lengths']
27 |
28 | def __init__(self, *args, **kwargs):
29 | Series.__init__(self, *args, **kwargs)
30 |
31 | @property
32 | def _constructor(self):
33 | return Grain
34 |
35 | @property
36 | def _constructor_expanddim(self):
37 | return Sample
38 |
39 | @property
40 | def zeta(self):
41 | return self._zeta
42 |
43 | @zeta.setter
44 | def zeta(self, value):
45 | self._zeta = value
46 |
47 |
48 | class Sample(DataFrame):
49 |
50 | # Let pandas know what properties are added
51 | _metadata = ['name', 'zeta', 'zeta_error',
52 | 'pooled_age', 'pooled_age_se', 'central_age',
53 | 'central_age_se', 'central_age_se',
54 | 'rhod', 'nd', 'depth', 'elevation',
55 | 'stratigraphic_age', 'stratigraphic_age_name',
56 | 'unit_area_graticule', 'description',
57 | 'deposition_temperature', 'present_day_temperature', 'id',
58 | '_track_lengths', 'track_lengths']
59 |
60 | def __init__(self, data=None, central_age=None, pooled_age=None, zeta=None, zeta_error=None, rhod=None, nd=None, name: str=None,
61 | elevation=None, depth=None, stratigraphic_age=None, stratigraphic_age_name:str=None,
62 | description=None, deposition_temperature=None,
63 | present_day_temperature=None, *args, **kwargs):
64 |
65 | self.name = name
66 | self.depth = depth
67 | self.elevation = elevation
68 | self.stratigraphic_age = stratigraphic_age
69 | self.stratigraphic_age_name = stratigraphic_age_name
70 | self.deposition_temperature = deposition_temperature
71 | self.present_day_temperature = present_day_temperature
72 | self.description = description
73 | self.zeta = zeta
74 | self.zeta_error = zeta_error
75 | self.rhod = rhod
76 | self.nd = nd
77 | self.central_age = central_age
78 | self.pooled_age = pooled_age
79 |
80 |
81 | if isinstance(data, DataFrame):
82 | data = data.to_dict()
83 |
84 | super(Sample, self).__init__(columns=["Ns", "Ni", "A"], data=data, *args, **kwargs)
85 | self._track_lengths = None
86 |
87 | @property
88 | def _constructor(self):
89 | """This is the key to letting Pandas know how to keep
90 | derivative `SomeData` the same type as yours. It should
91 | be enough to return the name of the Class. However, in
92 | some cases, `__finalize__` is not called and `my_attr` is
93 | not carried over. We can fix that by constructing a callable
94 | that makes sure to call `__finlaize__` every time.
95 |
96 | see: https://stackoverflow.com/questions/47466255/subclassing-a-pandas-dataframe-updates
97 | """
98 | def _c(*args, **kwargs):
99 | return Sample(*args, **kwargs).__finalize__(self)
100 | return _c
101 |
102 | @property
103 | def _constructor_sliced(self):
104 | return Grain
105 |
106 | def read_from_hdf5(self, filename):
107 | with pd.HDFStore(filename) as store:
108 | data, metadata = h5load(store)
109 | for val in self._metadata:
110 | try:
111 | setattr(self, val, metadata.pop(val))
112 | except:
113 | pass
114 |
115 | try:
116 | self.calculate_ages()
117 | self.calculate_ratios()
118 | except:
119 | pass
120 |
121 |
122 | if not self.central_age:
123 | try:
124 | self.calculate_central_age()
125 | except:
126 | pass
127 |
128 | if not self.pooled_age:
129 | try:
130 | self.calculate_pooled_age()
131 | except:
132 | pass
133 |
134 | super(Sample, self).__init__(data=data)
135 | return self
136 |
137 | def calculate_ratios(self):
138 | if not hasattr(self, "Ns"):
139 | raise ValueError("Cannot find Ns counts")
140 | if not hasattr(self, "Ni"):
141 | raise ValueError("Cannot find Ns counts")
142 |
143 | self["Ns/Ni"] = self.Ns / self.Ni
144 | if not hasattr(self, "unit_area_graticule"):
145 | self.unit_area_graticule = 1.0
146 | if not hasattr(self, "A"):
147 | self.A = 1
148 | self["RhoS"] = self.Ns / (self.A * self.unit_area_graticule)
149 | self["RhoI"] = self.Ni / (self.A * self.unit_area_graticule)
150 | return self
151 |
152 | def read_from_radialplotter(self, filename):
153 | from pyRadialPlot import read_radialplotter_file
154 | data = read_radialplotter_file(filename)
155 |
156 | self.__init__({"Ns": data["Ns"], "Ni": data["Ni"]})
157 | self.zeta = data["zeta"]
158 | self.rhod = data["rhod"]
159 | self._calculate_statistics()
160 |
161 | def calculate_l0_from_Dpars(self, projected=True):
162 | if not hasattr(self, "Dpars"):
163 | raise ValueError("Cannot find Dpars column")
164 | if projected:
165 | m = projected_coefs["ETCH_PIT_LENGTH"]["m"]
166 | b = projected_coefs["ETCH_PIT_LENGTH"]["b"]
167 | else:
168 | m = unprojected_coefs["ETCH_PIT_LENGTH"]["m"]
169 | b = unprojected_coefs["ETCH_PIT_LENGTH"]["b"]
170 | self["l0"] = m * self.Dpars + b
171 |
172 | def calculate_ages(self):
173 | required = ["Ns", "Ni", "zeta", "zeta_error", "rhod", "nd"]
174 | for arg in required:
175 | if arg is None:
176 | raise ValueError("""Cannot find {0}""".format(arg))
177 |
178 | data = calculate_ages(
179 | self.Ns, self.Ni, self.zeta,
180 | self.zeta_error, self.rhod, self.nd)
181 | self["Ages"] = data["Age(s)"]
182 | self["Ages Errors"] = data["se(s)"]
183 | return {"Ages": list(self["Ages"]),
184 | "Ages Errors": list(self["Ages Errors"])}
185 |
186 | @property
187 | def track_lengths(self):
188 | return self._track_lengths
189 |
190 | @track_lengths.setter
191 | def track_lengths(self, values):
192 | self._track_lengths = values
193 |
194 | def calculate_pooled_age(self):
195 | required = ["Ns", "Ni", "zeta", "zeta_error", "rhod", "nd"]
196 | for arg in required:
197 | if arg is None:
198 | raise ValueError("""Cannot find {0}""".format(arg))
199 | data = calculate_pooled_age(
200 | self.Ns, self.Ni, self.zeta,
201 | self.zeta_error, self.rhod, self.nd)
202 | self.pooled_age = data["Pooled Age"]
203 | self.pooled_age_se = data["se"]
204 | return {"Pooled Age": self.pooled_age,
205 | "se": self.pooled_age_se}
206 |
207 | def calculate_central_age(self):
208 | required = ["Ns", "Ni", "zeta", "zeta_error", "rhod", "nd"]
209 | for arg in required:
210 | if arg is None:
211 | raise ValueError("""Cannot find {0}""".format(arg))
212 | data = calculate_central_age(
213 | self.Ns, self.Ni, self.zeta,
214 | self.zeta_error, self.rhod, self.nd
215 | )
216 | self.central_age = data["Central"]
217 | self.central_age_se = data["se"]
218 | self.central_age_sigma = data["sigma"]
219 | return {"Central": self.central_age,
220 | "se": self.central_age_se,
221 | "sigma": self.central_age_sigma}
222 |
223 | def calculate_chi_square(self):
224 | self.chi2 = chi_square(self.Ns, self.Ni)
225 | return self.chi2
226 |
227 | def _repr_html_(self):
228 | """_repr_html_
229 |
230 | HTML table describing the Sample.
231 | For integration with Jupyter notebook.
232 | """
233 | params = OrderedDict()
234 | params["Name"] = self.name
235 | params["Description"] = self.description
236 | params["Depth"] = self.depth
237 | params["Elevation"] = self.elevation
238 | params["Stratigraphic Age Range Upper/Lower"] = self.stratigraphic_age
239 | params["Stratigraphic Age Name"] = self.stratigraphic_age_name
240 | params["Deposition Temperature"] = self.deposition_temperature
241 | params["Present Day Temperature"] = self.present_day_temperature
242 | params["Total Ns"] = sum(self.Ns)
243 | params["Total Ni"] = sum(self.Ni)
244 | params["rhoD"] = self.rhod
245 | params["nd"] = self.nd
246 | params["Zeta"] = f"{self.zeta} ({self.zeta_error})"
247 |
248 | #html = "Metadata
"
249 | html = ""
250 |
251 | for key, val in params.items():
252 | if not val: val = ""
253 | html += "{0}: {1}
".format(key, val)
254 |
255 | return html + DataFrame._repr_html_(self)
256 |
257 | def apply_forward_model(self, fwd_model, name):
258 | self.kinetic_parameter_type = "ETCH_PIT_LENGTH"
259 | def func1(row):
260 | _, ft_age, reduced_density = fwd_model.solve(
261 | row["l0"],
262 | self.kinetic_parameter_type,
263 | row["Dpars"])
264 | return pd.Series({"ft_age": ft_age, "reduced_density": reduced_density})
265 | df = self.apply(func1, axis=1)
266 | self[name] = df["ft_age"]
267 |
268 | def save(self, filename):
269 | data = pd.DataFrame()
270 | data["Ns"] = self.Ns
271 | data["Ni"] = self.Ni
272 | data["A"] = self.A
273 | metadata = {}
274 | for val in self._metadata:
275 | if not val.startswith("_"):
276 | try:
277 | metadata[val] = getattr(self, val)
278 | except:
279 | pass
280 | h5store(filename, data, **metadata)
281 |
282 |
283 | save_to_hdf = save
284 |
285 | def radialplot(self, transform="logarithmic"):
286 | return radialplot(Ns=self.Ns, Ni=self.Ni, zeta=self.zeta, zeta_err=self.zeta_error,
287 | rhod=self.rhod, transform=transform)
288 |
289 |
290 |
291 |
292 |
--------------------------------------------------------------------------------
/pyFTracks/thermal_history.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cython
3 | import numpy as np
4 | cimport numpy as np
5 | from libc.math cimport fabs
6 |
7 |
8 | cdef calculate_annealing_temperature(double abs_gradient):
9 | """ Calculate the annealing temperature based on absolute temperature gradient
10 | The total annealing temperature (TA) for F-apatite
11 | for a given heating or cooling rate (R) is given by the equation:
12 |
13 | Ta = 377.67 * R**0.019837
14 | """
15 | return 377.67 * abs_gradient**0.019837
16 |
17 |
18 | class ThermalHistory(object):
19 | """Class defining a thermal history"""
20 |
21 | def __init__(self, time, temperature, name="unknown"):
22 | """
23 | time: list of time points in Myr.
24 | temperature: list of temperature points in deg Kelvin.
25 | name: a name for the thermal-history.
26 |
27 | """
28 |
29 | time = np.array(time)
30 | temperature = np.array(temperature)
31 |
32 | # If time is not increasing, reverse arrays
33 | if not np.all(np.diff(time) > 0):
34 | time = time[::-1]
35 | temperature = temperature[::-1]
36 |
37 | if np.any(temperature < 273.):
38 | print("It looks like you have entered temperature in Celsius...Converting temperature to Kelvin")
39 | temperature += 273.15
40 |
41 | self.name = name
42 | self.input_time = time
43 | self.input_temperature = temperature
44 |
45 | self.time = self.input_time
46 | self.temperature = self.input_temperature
47 |
48 | self.maxT = max(temperature)
49 | self.minT = min(temperature)
50 | self.totaltime = max(time) - min(time)
51 | self.dTdt = np.diff(self.temperature) / np.diff(self.time)
52 | self.get_isothermal_intervals()
53 |
54 | def get_isothermal_intervals(self, max_temperature_per_step=8.0,
55 | max_temperature_step_near_ta=3.5):
56 |
57 | """
58 | Interpolate Time Temperature path
59 | Takes the time-temperature path specification and subdivides it for
60 | calculation in isothermal intervals.
61 |
62 | Reference:
63 |
64 | Ketcham, R. A. (2005). Forward and Inverse Modeling of Low-Temperature
65 | Thermochronometry Data. Reviews in Mineralogy and Geochemistry, 58(1),
66 | 275–314. doi:10.2138/rmg.2005.58.11
67 |
68 | It is calibrated to facilitate 0.5% accuracy for end-member F-apatite by
69 | having a maximum temperature step of 3.5 degrees C when the model temperature
70 | is within 10C of the total annealing temperature. Before this cutoff the
71 | maximum temperature step required is 8 C. If the overall model tine steps are
72 | too large, these more distant requirement may not be meet.
73 |
74 | Quoted text:
75 |
76 | "The more segments a time-temperature path is subdivided into, the more accurate
77 | the numerical solution will be. Conversely, an excessive number of time steps
78 | will slow computation down unnecessarily. The optimal time step size to achieve a desired
79 | solution accuracy was examined in detail by Issler (1996b), who demonstrated that time
80 | steps should be smaller as the total annealing temperature of apatite is approached.
81 | For the Ketcham et al. (1999) annealing model for F-apatite, Ketcham et al. (2000) found that 0.5%
82 | precision is assured if there is no step with greater than a 3.5 ºC change within 10 ºC of
83 | the F-apatite total annealing temperature."""
84 |
85 | cdef double[::1] time = np.ascontiguousarray(self.input_time)
86 | cdef double[::1] temperature = np.ascontiguousarray(self.input_temperature)
87 | cdef double[::1] new_time = np.ndarray((200))
88 | cdef double[::1] new_temperature = np.ndarray((200))
89 | cdef double cmax_temp_per_step = max_temperature_per_step
90 | cdef double cmax_temp_step_near_ta = max_temperature_step_near_ta
91 | cdef int npoints = time.shape[0]
92 |
93 | cdef double default_timestep
94 | cdef double alternative_timestep = 0.0
95 | cdef double gradient, abs_gradient
96 | cdef double temperature_interval
97 | cdef double end_temperature
98 | cdef double fact
99 | cdef double temp_per_step
100 | cdef double current_default_timestep
101 | cdef double Ta_near
102 | cdef double max_temperature
103 | cdef double timestep
104 | cdef double time_interval
105 |
106 | cdef int segments
107 | cdef int new_npoints = 1
108 |
109 | new_temperature[0] = temperature[npoints - 1]
110 | new_time[0] = time[npoints - 1]
111 |
112 | default_timestep = time[npoints - 1] * 1.0 / 100
113 |
114 | for seg in range(npoints - 1, 0, -1):
115 | temperature_interval = temperature[seg] - temperature[seg - 1]
116 | time_interval = time[seg] - time[seg - 1]
117 | gradient = temperature_interval / time_interval
118 | abs_gradient = fabs(gradient)
119 | end_temperature = temperature[seg-1]
120 | fact = 0
121 | if gradient < 0:
122 | fact = -1
123 |
124 | temp_per_step = abs_gradient * default_timestep
125 |
126 | if temp_per_step <= cmax_temp_per_step:
127 | current_default_timestep = default_timestep
128 | else:
129 | current_default_timestep = cmax_temp_per_step / abs_gradient
130 |
131 | if abs_gradient < 0.1:
132 | Ta_near = 1000.
133 | else:
134 | Ta_near = calculate_annealing_temperature(abs_gradient) + 10.
135 | alternative_timestep = cmax_temp_step_near_ta / abs_gradient
136 |
137 | while new_time[new_npoints - 1] > time[seg-1]:
138 |
139 | max_temperature = new_temperature[new_npoints - 1] + default_timestep * gradient * fact
140 | if gradient < 0. and max_temperature > end_temperature:
141 | max_temperature = end_temperature
142 |
143 | timestep = current_default_timestep
144 |
145 | if max_temperature > Ta_near:
146 | if alternative_timestep < default_timestep:
147 | timestep = alternative_timestep
148 |
149 | if (timestep + 0.001) > (new_time[new_npoints - 1] - time[seg - 1]):
150 | new_time[new_npoints] = time[seg - 1]
151 | new_temperature[new_npoints] = end_temperature
152 | else:
153 | new_time[new_npoints] = new_time[new_npoints - 1] - timestep
154 | new_temperature[new_npoints] = new_temperature[new_npoints - 1] - gradient * timestep
155 |
156 | new_npoints += 1
157 |
158 | self.time = np.array(new_time)[:new_npoints]
159 | self.temperature = np.array(new_temperature)[:new_npoints]
160 | return self.time, self.temperature
161 |
162 |
163 | # Some useful thermal histories
164 | WOLF1 = ThermalHistory(
165 | name="wolf1",
166 | time=[0., 43., 44., 100.],
167 | temperature=[283.15, 283.15, 403.15, 403.15]
168 | )
169 |
170 | WOLF2 = ThermalHistory(
171 | name="wolf2",
172 | time=[0., 100.],
173 | temperature=[283.15, 403.15]
174 | )
175 |
176 | WOLF3 = ThermalHistory(
177 | name="wolf3",
178 | time=[0., 19., 19.5, 100.],
179 | temperature=[283.15, 283.15, 333.15, 333.15]
180 | )
181 |
182 | WOLF4 = ThermalHistory(
183 | name="wolf4",
184 | time=[0., 24., 76., 100.],
185 | temperature=[283.15, 333.15, 333.15, 373.15]
186 | )
187 |
188 | WOLF5 = ThermalHistory(
189 | name="wolf5",
190 | time=[0., 5., 100.],
191 | temperature=[283.15, 373.15, 291.15]
192 | )
193 |
194 |
195 | FLAXMANS1 = ThermalHistory(
196 | name="Flaxmans1",
197 | #time=[109.73154362416108, 95.97315436241611, 65.10067114093958, 42.95302013422818, 27.069351230425042, 0.223713646532417],
198 | #temperature=[10.472716661803325, 50.21343115594648, 90.20426028596441, 104.6346242027596, 124.63170619867442, 125.47709366793116]
199 | time=[0., 27, 43, 65, 96, 110],
200 | temperature=[398.15, 399.15, 378.15, 363.15, 323.15, 283.15]
201 | )
202 |
203 | VROLIJ = ThermalHistory(
204 | name="Vrolij",
205 | #time=[112.84098861592778, 108.92457659225633, 101.04350962294087, 95.96509833052357, 4.910255922414279, -0.196743768208961],
206 | #temperature=[10.036248368327097, 14.455174285000524, 14.971122078085369, 18.945174136102615, 11.984858737246478, 11.027412104738094]
207 | time=[0., 5., 96., 101, 109, 113],
208 | temperature=[284.15, 285.15, 292.15, 288.15, 287.15, 283.15]
209 | )
210 |
--------------------------------------------------------------------------------
/pyFTracks/utilities.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from scipy import stats
4 | import random
5 | import sys
6 |
7 | def drawbinom(I, prob):
8 | # Look at scipy.stats.binom...option binom.rvs
9 | """Random draw from binomial distribution
10 |
11 | Utility function:
12 | Draw from a binomial distribution:
13 | Only return if the draw is different than 0
14 | """
15 | Ns = 0
16 | while Ns == 0:
17 | A = np.random.RandomState()
18 | Ns = A.binomial(I, prob)
19 | return Ns
20 |
21 | def create_distribution(xk, pk, name="TLD"):
22 | return stats.rv_discrete(name=name, values=(xk, pk))
23 |
24 | def draw_from_distrib(vals, pdf, size=1):
25 | """Random Draw from given distribution
26 | """
27 | vals = np.array(vals)
28 | distrib = stats.rv_discrete(values=(range(len(vals)), pdf))
29 | return vals[distrib.rvs(size=size)]
30 |
31 | def AdjustTTHistory(time, temp):
32 | """Calculate adjusted thermal history
33 |
34 | Useful when one needs to calculate thermal history
35 | in a borehole when some of the sample reaches
36 | surface temperature
37 | """
38 |
39 | def zerointersect(pt1, pt2):
40 | x1, y1 = pt1
41 | x2, y2 = pt2
42 |
43 | xmax = max(x1, x2)
44 | xmin = min(x1, x2)
45 |
46 | x = np.array([[x1, 1.0], [x2, 1.0]])
47 | y = np.array([y1, y2])
48 | A, B = np.linalg.solve(x, y)
49 | X, Y = -B/A, 0.0
50 | if(X > xmin and X < xmax):
51 | return X, Y
52 | else:
53 | return None, None
54 |
55 | if(len(time) != len(temp)):
56 | return "Error"
57 |
58 | TT = [[time[i], temp[i]] for i in range(0, len(time))]
59 |
60 | newTT = []
61 | for i in range(0, len(TT)-1):
62 | pt1 = TT[i]
63 | pt2 = TT[i+1]
64 | X, Y = zerointersect(pt1, pt2)
65 | if(X is not None):
66 | newTT.append([X, Y])
67 |
68 | TT.extend(newTT)
69 | newTT = []
70 | for elem in TT:
71 | if(elem[1] >= 0.0):
72 | newTT.append(elem)
73 |
74 | newTT.sort()
75 | newTT.reverse()
76 | time = [elem[0] for elem in newTT]
77 | temp = [elem[1] for elem in newTT]
78 | return time, temp
79 |
80 | def read_mtx_file(filename):
81 | """ MTX (Madtrax) file parser """
82 |
83 | # Check that the extension is actually mtx
84 | if filename.split(".")[-1] != "mtx":
85 | raise ValueError("Specify and mtx file")
86 |
87 | lines = open(filename, "r").read().splitlines()
88 | lines = (line.strip() for line in lines)
89 |
90 | data = {}
91 |
92 | # First Line is the name
93 | (data["name"]) = next(lines).split(".")[0]
94 |
95 | # Skip Second line (not sure what that is)
96 | next(lines)
97 |
98 | # Third line contains count numbers and zeta information
99 | # nconstraints is the number of boxes defined to constraint
100 | # thermal history
101 |
102 | line2 = next(lines).split()
103 | nconstraints, ntl, ncounts = (int(val) for val in line2[:3])
104 | (data["zeta"],
105 | data["rhod"],
106 | data["nd"]) = (float(val) for val in line2[3:])
107 |
108 | # Skip the constraints
109 | for i in range(nconstraints):
110 | next(lines)
111 |
112 | # After the constraints we find:
113 | # - The Age and associated error
114 | # - The Mean track length and associated error
115 | # - The standard deviation and associated error
116 |
117 | (data["FTage"],
118 | data["FTageE"]) = (float(val) for val in next(lines).split())
119 | (data["MTL"],
120 | data["MTLE"]) = (float(val) for val in next(lines).split())
121 | (data["STDEV"],
122 | data["STDEVE"]) = (float(val) for val in next(lines).split())
123 |
124 | # After we find the counts Ns and Ni
125 | data["Ns"] = []
126 | data["Ni"] = []
127 |
128 | for row in range(ncounts):
129 | Ns, Ni = (int(val) for val in next(lines).split())
130 | data["Ns"].append(Ns)
131 | data["Ni"].append(Ni)
132 |
133 | # Finally the track lengths
134 | data["TL"] = []
135 | for row in range(ntl):
136 | data["TL"].append(float(next(lines)))
137 |
138 | return data
139 |
140 | def h5store(filename, df, **kwargs):
141 | store = pd.HDFStore(filename)
142 | store.put('mydata', df)
143 | store.get_storer('mydata').attrs.metadata = kwargs
144 | store.close()
145 |
146 | def h5load(store):
147 | data = store['mydata']
148 | metadata = store.get_storer('mydata').attrs.metadata
149 | return data, metadata
150 |
--------------------------------------------------------------------------------
/pyFTracks/viewer.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | from matplotlib.widgets import Button
3 | import numpy as np
4 | from scipy.spatial.distance import cdist
5 | from matplotlib.backend_bases import MouseButton
6 | from .thermal_history import ThermalHistory
7 |
8 |
9 | class Cursor(object):
10 | def __init__(self, ax):
11 | self.ax = ax
12 | # text location in axes coords
13 | self.txt = ax.text(0.05, 0.95, '', transform=ax.transAxes)
14 |
15 | def mouse_move(self, event):
16 | if not event.inaxes:
17 | return
18 | x, y = event.xdata, event.ydata
19 | self.txt.set_text('Time=%1.2f Myr, Temp=%1.2f C' % (x, y))
20 | self.ax.figure.canvas.draw_idle()
21 |
22 | class Viewer(object):
23 |
24 | def __init__(self,history=None,
25 | annealing_model=None,
26 | sample=None,
27 | present_temperature=293.15):
28 |
29 | if history:
30 | self.history = history
31 | self.time = np.array(self.history.input_time)
32 | self.temperature = np.array(self.history.input_temperature)
33 | self.present_temperature = self.temperature[-1]
34 | else:
35 | self.present_temperature = present_temperature
36 | self.time = np.array([0.])
37 | self.temperature = np.array([self.present_temperature])
38 | self.history = ThermalHistory(self.time, self.temperature)
39 |
40 | self.annealing_model = annealing_model
41 | self.original_time = np.copy(self.time)
42 | self.original_temperature = np.copy(self.temperature)
43 | self.sample = sample
44 |
45 | self.pind = None # active point
46 | self.epsilon = 1
47 |
48 | self.init_plot()
49 |
50 | def init_plot(self):
51 |
52 | self.fig, (self.ax1, self.ax2) = plt.subplots(1, 2, figsize=(9.0, 8.0))
53 | self.cursor = Cursor(self.ax1)
54 |
55 | self.ax1.plot(
56 | self.original_time,
57 | self.original_temperature,
58 | 'k--', label='original')
59 |
60 | self.l, = self.ax1.plot(
61 | self.time,
62 | self.temperature,
63 | color='k', linestyle="-",
64 | marker='o', markersize=8)
65 |
66 | self.ax1.set_yscale('linear')
67 | self.ax1.set_title("Thermal History")
68 | if np.max(self.time) > 0.0:
69 | self.ax1.set_xlim(np.max(self.time), 0.)
70 | self.ax1.set_ylim(np.max(self.temperature) + 50, 273.15)
71 | else:
72 | self.ax1.set_xlim(100, 0.)
73 | self.ax1.set_ylim(500., 273.15)
74 | self.ax1.set_xlabel('Time (Myr)')
75 | self.ax1.set_ylabel('Temperature (C)')
76 | self.ax1.grid(True)
77 | self.ax1.yaxis.grid(True, which='minor', linestyle='--')
78 | self.ax1.legend(loc=4, prop={'size': 10})
79 |
80 | if self.annealing_model:
81 | self.annealing_model.history = self.history
82 | self.annealing_model.calculate_age()
83 | self.m2, = self.ax2.plot(self.annealing_model.pdf_axis, self.annealing_model.pdf, color="r")
84 | age_label = f"{self.annealing_model.ft_model_age:5.2f}"
85 | MTL_label = f"{self.annealing_model.MTL:5.2f}"
86 | else:
87 | self.ax2.plot()
88 | age_label = 0.0
89 | MTL_label = 0.0
90 |
91 | obs_age_label = ""
92 | if self.sample is not None and self.sample.pooled_age:
93 | obs_age_label = f"{self.sample.pooled_age:5.2f}"
94 |
95 | obs_MTL_label = ""
96 | if self.sample and self.sample.track_lengths is not None:
97 | obs_MTL_label = f"{self.sample.track_lengths.mean():5.2f}"
98 |
99 | self.age_label = self.ax2.text(0.05, 0.95, f"AFT age:{age_label} Myr (obs: {obs_age_label})",
100 | horizontalalignment='left', verticalalignment='center',
101 | transform=self.ax2.transAxes)
102 | self.MTL_label = self.ax2.text(0.05, 0.90, f"MTL:{MTL_label} $\mu$m (obs: {obs_MTL_label} $\mu$m)",
103 | horizontalalignment='left', verticalalignment='center',
104 | transform=self.ax2.transAxes)
105 | self.ax2.set_title("Fission Track prediction")
106 | self.ax2.set_ylim(0., 0.05)
107 | self.ax3 = self.ax2.twinx()
108 | self.ax3.set_ylim(0., 40)
109 |
110 | if self.sample is not None and self.sample.track_lengths is not None:
111 | self.ax3.hist(self.sample.track_lengths, bins=range(0, 21), density=False, alpha=0.5)
112 |
113 | self.axres = plt.axes([0.84, 0.05, 0.12, 0.02])
114 | self.bres = Button(self.axres, 'Reset')
115 | self.bres.on_clicked(self.reset)
116 |
117 | self.fig.canvas.mpl_connect('button_press_event',
118 | self.on_press)
119 | self.fig.canvas.mpl_connect('button_release_event',
120 | self.on_release)
121 | self.fig.canvas.mpl_connect('motion_notify_event',
122 | self.on_motion)
123 | self.fig.canvas.mpl_connect('motion_notify_event',
124 | self.cursor.mouse_move)
125 |
126 | def update_plot(self):
127 | self.l.set_ydata(self.temperature)
128 | self.l.set_xdata(self.time)
129 |
130 | if self.history and self.annealing_model:
131 | self.m2.set_ydata(self.annealing_model.pdf)
132 | age_label = f"{self.annealing_model.ft_model_age:5.2f}"
133 | MTL_label = f"{self.annealing_model.MTL:5.2f}"
134 | else:
135 | self.ax2.plot()
136 | age_label = 0.0
137 | MTL_label = 0.0
138 |
139 | obs_age_label = ""
140 | if self.sample is not None and self.sample.pooled_age:
141 | obs_age_label = f"{self.sample.pooled_age:5.2f}"
142 |
143 | obs_MTL_label = ""
144 | if self.sample.track_lengths is not None:
145 | obs_MTL_label = f"{self.sample.track_lengths.mean():5.2f}"
146 |
147 | self.age_label.set_text(f"AFT age:{age_label} Myr (obs: {obs_age_label})")
148 | self.MTL_label.set_text(f"MTL:{MTL_label} $\mu$m (obs: {obs_MTL_label} $\mu$m)")
149 | self.fig.canvas.draw_idle()
150 |
151 | def reset(self, event):
152 | self.temperature = np.copy(self.original_temperature)
153 | self.time = np.copy(self.original_time)
154 | if self.annealing_model:
155 | self.annealing_model.pdf *= 0.
156 | self.refresh_data()
157 | self.update_plot()
158 |
159 | def on_press(self, event):
160 | if event.inaxes is None:
161 | return
162 | if event.button == MouseButton.LEFT:
163 | d, self.pind = self.find_closest_point(event)
164 | if d[self.pind] > self.epsilon:
165 | self.add_point(event)
166 | if event.button == MouseButton.RIGHT:
167 | d, self.pind = self.find_closest_point(event)
168 | if d[self.pind] >= self.epsilon:
169 | self.pind = None
170 | self.delete_point()
171 | self.refresh_data()
172 | self.update_plot()
173 |
174 | def on_release(self, event):
175 | if event.button != 1:
176 | return
177 | self.pind = None
178 | self.refresh_data()
179 | self.update_plot()
180 |
181 | def on_motion(self, event):
182 | if self.pind is None:
183 | return
184 | if event.inaxes is None:
185 | return
186 | if event.button != 1:
187 | return
188 |
189 | if self.pind == 0:
190 | self.temperature[self.pind] = event.ydata
191 |
192 | if self.pind > 0:
193 | if self.pind < self.time.shape[0] - 1:
194 | if (event.xdata < self.time[self.pind + 1] and
195 | event.xdata > self.time[self.pind - 1]):
196 | self.temperature[self.pind] = event.ydata
197 | self.time[self.pind] = event.xdata
198 | else:
199 | if event.xdata > self.time[self.pind - 1]:
200 | self.temperature[self.pind] = event.ydata
201 | self.time[self.pind] = event.xdata
202 |
203 | self.update_plot()
204 |
205 | def add_point(self, event):
206 | self.time = np.insert(self.time, 0, event.xdata)
207 | self.temperature = np.insert(self.temperature, 0, event.ydata)
208 | indices = np.argsort(self.time)
209 | self.time = np.sort(self.time)
210 | self.temperature = self.temperature[indices]
211 |
212 | def find_closest_point(self, event):
213 | d = np.abs(self.time - event.xdata)
214 | ind = d.argmin()
215 | return d, ind
216 |
217 | def refresh_data(self):
218 | if not self.annealing_model:
219 | return
220 |
221 | self.annealing_model.history.input_time = np.copy(self.time)
222 | self.annealing_model.history.input_temperature = np.copy(self.temperature)
223 | if self.time.shape[0] > 1:
224 | self.annealing_model.history.get_isothermal_intervals()
225 | self.annealing_model.calculate_age()
226 |
227 | def delete_point(self):
228 | if not self.pind:
229 | return
230 | self.time = np.delete(self.time, self.pind)
231 | self.temperature = np.delete(self.temperature, self.pind)
232 |
233 | def show(self):
234 | self.fig.show()
235 | return self
236 |
237 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Cython>=0.29.14
2 | matplotlib>=3.1.1
3 | numpy
4 | jupyter
5 | scipy>=1.3.2
6 | pandas>=0.25.3
7 | tables>=3.6.1
8 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | # This flag says that the code is written to work on both Python 2 and Python
3 | # 3. If at all possible, it is good practice to do this. If you cannot, you
4 | # will need to generate wheels for each Python version that you support.
5 | universal=1
6 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, Extension
2 | from os import path
3 |
4 | MAJOR = 0
5 | MINOR = 2
6 | MICRO = 8
7 | ISRELEASED = False
8 | VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
9 |
10 | class get_numpy_include(object):
11 | """Returns Numpy's include path with lazy import"""
12 |
13 | def __str__(self):
14 | import numpy
15 | return numpy.get_include()
16 |
17 |
18 | # Get the long description from the README file
19 | here = path.abspath(path.dirname(__file__))
20 | with open(path.join(here, 'README.rst')) as f:
21 | long_description = f.read()
22 |
23 | extensions = [Extension("pyFTracks.annealing", ["pyFTracks/annealing.pyx"]),
24 | Extension("pyFTracks.thermal_history", ["pyFTracks/thermal_history.pyx"])]
25 |
26 | with open('requirements.txt') as f:
27 | requirements = f.read().splitlines()
28 |
29 | setup(
30 | name='pyFTracks',
31 | setup_requires=[
32 | 'setuptools>=18.0',
33 | 'numpy',
34 | 'cython'
35 | ],
36 | version=VERSION,
37 | description='Fission Track Modelling and Analysis with Python',
38 | ext_modules=extensions,
39 | include_package_data=True,
40 | include_dirs=[get_numpy_include()],
41 | long_description=long_description,
42 | url='https://github.com/rbeucher/pyFTracks.git',
43 | author='Romain Beucher',
44 | author_email='romain.beucher@unimelb.edu.au',
45 | classifiers=[
46 | 'Development Status :: 2 - Pre-Alpha',
47 |
48 | 'Intended Audience :: Science/Research',
49 | 'Topic :: Software Development :: Build Tools',
50 |
51 | 'License :: OSI Approved :: MIT License',
52 |
53 | 'Programming Language :: Python :: 3.3',
54 | 'Programming Language :: Python :: 3.4',
55 | 'Programming Language :: Python :: 3.5',
56 | 'Programming Language :: Python :: 3.6',
57 | 'Programming Language :: Python :: 3.7',
58 | 'Programming Language :: Python :: 3.8',
59 | ],
60 | packages=["pyFTracks", "pyFTracks/ressources", "pyFTracks/radialplot"],
61 | keywords='geology thermochronology fission-tracks',
62 | install_requires=requirements,
63 |
64 | )
65 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # the inclusion of the tests module is not meant to offer best practices for
2 | # testing in general, but rather to support the `find_packages` example in
3 | # setup.py that excludes installing the "tests" package
4 |
--------------------------------------------------------------------------------
/tests/test_simple.py:
--------------------------------------------------------------------------------
1 | import pyFTracks as FT
2 | import pytest
3 | import numpy as np
4 |
5 |
6 | def test_thermal_history():
7 | thermal_history = FT.ThermalHistory(name="My Thermal History",
8 | time=[0., 43., 44., 100.],
9 | temperature=[283., 283., 403., 403.])
10 | assert(np.all(thermal_history.input_time == np.array([0.0, 43.0, 44.0, 100.0])))
11 | assert(np.all(thermal_history.input_temperature == np.array([283.0, 283.0, 403.0, 403.0])))
12 |
13 | def test_get_isothermal_interval():
14 | thermal_history = FT.ThermalHistory(name="My Thermal History",
15 | time=[0., 43., 44., 100.],
16 | temperature=[283., 283., 403., 403.])
17 | assert(np.all(np.isclose(thermal_history.time, np.array([
18 | 100., 99., 98., 97.,
19 | 96., 95., 94., 93.,
20 | 92., 91., 90., 89.,
21 | 88., 87., 86., 85.,
22 | 84., 83., 82., 81.,
23 | 80., 79., 78., 77.,
24 | 76., 75., 74., 73.,
25 | 72., 71., 70., 69.,
26 | 68., 67., 66., 65.,
27 | 64., 63., 62., 61.,
28 | 60., 59., 58., 57.,
29 | 56., 55., 54., 53.,
30 | 52., 51., 50., 49.,
31 | 48., 47., 46., 45.,
32 | 44., 43.93333333, 43.86666667, 43.8,
33 | 43.73333333, 43.66666667, 43.6, 43.53333333,
34 | 43.46666667, 43.4, 43.33333333, 43.26666667,
35 | 43.2, 43.13333333, 43.06666667, 43.,
36 | 42., 41., 40., 39.,
37 | 38., 37., 36., 35.,
38 | 34., 33., 32., 31.,
39 | 30., 29., 28., 27.,
40 | 26., 25., 24., 23.,
41 | 22., 21., 20., 19.,
42 | 18., 17., 16., 15.,
43 | 14., 13., 12., 11.,
44 | 10., 9., 8., 7.,
45 | 6., 5., 4., 3.,
46 | 2., 1., 0.]))))
47 |
48 | def test_isothermal_max_temperature_wolfs():
49 | from pyFTracks.thermal_history import WOLF1, WOLF2, WOLF3, WOLF4, WOLF5
50 | thermal_histories = [WOLF1, WOLF2, WOLF3, WOLF4, WOLF5]
51 | for thermal_history in thermal_histories:
52 | assert(np.diff(thermal_history.temperature).max() <= 8.0)
53 |
54 | def test_isothermal_max_temperature_wolfs2():
55 | from pyFTracks.thermal_history import WOLF1, WOLF2, WOLF3, WOLF4, WOLF5
56 | thermal_histories = [WOLF1, WOLF2, WOLF3, WOLF4, WOLF5]
57 | for thermal_history in thermal_histories:
58 | thermal_history.get_isothermal_intervals(max_temperature_per_step=3.0)
59 | assert(np.diff(thermal_history.temperature).max() <= 3.0)
60 |
61 | def test_import_sample_datasets():
62 | from pyFTracks.ressources import Miller, Gleadow
63 |
64 | def test_calculate_central_age():
65 | Ns = [31, 19, 56, 67, 88, 6, 18, 40, 36, 54, 35, 52, 51, 47, 27, 36, 64, 68, 61, 30]
66 | Ni = [41, 22, 63, 71, 90, 7, 14, 41, 49, 79, 52, 76, 74, 66, 39, 44, 86, 90, 91, 41]
67 | zeta = 350. # megayears * 1e6 * u.cm2
68 | zeta_err = 10. / 350.
69 | rhod = 1.304 # 1e6 * u.cm**-2
70 | rhod_err = 0.
71 | Nd = 2936
72 | data = FT.central_age(Ns, Ni, zeta, zeta_err, rhod, Nd)
73 | assert(data["Central"] == pytest.approx(175.5672, rel=1e-2))
74 | assert(data["se"] == pytest.approx(8.5101, rel=1e-2))
75 |
76 | def test_calculate_pooled_age():
77 | Ns = [31, 19, 56, 67, 88, 6, 18, 40, 36, 54, 35, 52, 51, 47, 27, 36, 64, 68, 61, 30]
78 | Ni = [41, 22, 63, 71, 90, 7, 14, 41, 49, 79, 52, 76, 74, 66, 39, 44, 86, 90, 91, 41]
79 | zeta = 350. # megayears * 1e6 * u.cm2
80 | zeta_err = 10. / 350.
81 | rhod = 1.304 # 1e6 * u.cm**-2
82 | rhod_err = 0.
83 | Nd = 2936
84 | data = FT.pooled_age(Ns, Ni, zeta, zeta_err, rhod, Nd)
85 | assert(data["Pooled Age"] == pytest.approx(175.5672, rel=1e-2))
86 | assert(data["se"] == pytest.approx(9.8784, rel=1e-2))
87 |
88 | def test_calculate_single_grain_ages():
89 | Ns = [31, 19, 56, 67, 88, 6, 18, 40, 36, 54, 35, 52, 51, 47, 27, 36, 64, 68, 61, 30]
90 | Ni = [41, 22, 63, 71, 90, 7, 14, 41, 49, 79, 52, 76, 74, 66, 39, 44, 86, 90, 91, 41]
91 | zeta = 350. # megayears * 1e6 * u.cm2
92 | zeta_err = 10. / 350.
93 | rhod = 1.304 # 1e6 * u.cm**-2
94 | rhod_err = 0.
95 | Nd = 2936
96 | data = FT.single_grain_ages(Ns, Ni, zeta, zeta_err, rhod, Nd)
97 | actual_values = np.array([170.27277726, 194.12922188, 199.71847392, 211.82501094,
98 | 219.35417906, 192.69120201, 286.91906312, 218.87597033,
99 | 165.51402406, 154.12751783, 151.7948727 , 154.27595627,
100 | 155.38512454, 160.49155828, 156.07978109, 184.05633985,
101 | 167.62488355, 170.15231471, 151.18251036, 164.84973261])
102 | actual_errors = np.array([ 40.93820562, 61.15626671, 37.30358567, 36.79102639,
103 | 33.72043367, 107.40365187, 102.70782874, 49.20922289,
104 | 36.7660512 , 27.71454167, 33.5872093 , 28.25634367,
105 | 28.7686159 , 31.11427488, 39.43447289, 41.83466447,
106 | 28.25353735, 27.94537164, 25.54017193, 40.0012906 ])
107 | assert np.testing.assert_allclose(data["Age(s)"], actual_values, rtol=0.01) is None
108 | assert np.testing.assert_allclose(data["se(s)"], actual_errors, rtol=0.01) is None
109 |
110 | def test_chi2_test():
111 | Ns = [31, 19, 56, 67, 88, 6, 18, 40, 36, 54, 35, 52, 51, 47, 27, 36, 64, 68, 61, 30]
112 | Ni = [41, 22, 63, 71, 90, 7, 14, 41, 49, 79, 52, 76, 74, 66, 39, 44, 86, 90, 91, 41]
113 | chi2 = FT.chi2_test(Ns, Ni)
114 | assert chi2 == pytest.approx(0.9292, rel=1e-3)
115 |
116 | def test_ketcham_1999_Dpar_to_rmr0():
117 | from pyFTracks.annealing import Ketcham1999
118 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
119 | assert model.convert_Dpar_to_rmr0(1.0) == pytest.approx(0.84)
120 | assert model.convert_Dpar_to_rmr0(1.74) == pytest.approx(0.84)
121 | assert model.convert_Dpar_to_rmr0(5.0) == pytest.approx(0.)
122 | assert model.convert_Dpar_to_rmr0(2.1) == pytest.approx(0.79962206086744075)
123 |
124 | def test_ketcham_1999_clapfu_to_rmr0():
125 | from pyFTracks.annealing import Ketcham1999
126 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
127 | assert(model.convert_Cl_pfu_to_rmr0(1.0) == pytest.approx(0.))
128 | assert(model.convert_Cl_pfu_to_rmr0(0.7) == pytest.approx(0.30169548259180623))
129 | assert(model.convert_Cl_pfu_to_rmr0(0.4) == pytest.approx(0.6288689335789452))
130 |
131 | def test_ohapfu_to_rmr0():
132 | from pyFTracks.annealing import Ketcham1999
133 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
134 | assert(model.convert_OH_pfu_to_rmr0(0.9) == pytest.approx(0.3171578660452086))
135 | assert(model.convert_OH_pfu_to_rmr0(0.7) == pytest.approx(0.6712590592085016))
136 | assert(model.convert_OH_pfu_to_rmr0(0.4) == pytest.approx(0.8263996762391478))
137 |
138 | def test_wolf1_ketcham_1999():
139 | from pyFTracks.annealing import Ketcham1999
140 | from pyFTracks.thermal_history import WOLF1
141 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
142 | model.history = WOLF1
143 | old, model_age, reduced = model.calculate_age(16.1)
144 | assert(old == pytest.approx(44.0, abs=0.1))
145 | assert(model_age == pytest.approx(44.9, abs=0.1))
146 |
147 | def test_wolf2_ketcham_1999():
148 | from pyFTracks.annealing import Ketcham1999
149 | from pyFTracks.thermal_history import WOLF2
150 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
151 | model.history = WOLF2
152 | old, model_age, reduced = model.calculate_age(16.1)
153 | assert(old == pytest.approx(79.5, abs=0.1))
154 | assert(model_age == pytest.approx(66.5, abs=0.1))
155 |
156 | def test_wolf3_ketcham_1999():
157 | from pyFTracks.annealing import Ketcham1999
158 | from pyFTracks.thermal_history import WOLF3
159 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
160 | model.history = WOLF3
161 | old, model_age, _ = model.calculate_age(16.1)
162 | assert(old == pytest.approx(100., abs=0.1))
163 | assert(model_age == pytest.approx(87.9, abs=0.1))
164 |
165 | def test_wolf4_ketcham_1999():
166 | from pyFTracks.annealing import Ketcham1999
167 | from pyFTracks.thermal_history import WOLF4
168 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
169 | model.history = WOLF4
170 | old, model_age, _ = model.calculate_age(16.1)
171 | assert(old == pytest.approx(100., abs=0.1))
172 | assert(model_age == pytest.approx(85.8, abs=0.1))
173 |
174 | def test_wolf5_ketcham_1999():
175 | from pyFTracks.annealing import Ketcham1999
176 | from pyFTracks.thermal_history import WOLF5
177 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
178 | model.history = WOLF5
179 | old, model_age, _ = model.calculate_age(16.1)
180 | assert(old == pytest.approx(100., abs=0.1))
181 | assert(model_age == pytest.approx(26.0, abs=0.1))
182 |
183 | def test_vrolij_ketcham_1999():
184 | from pyFTracks.annealing import Ketcham1999
185 | from pyFTracks.thermal_history import VROLIJ
186 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
187 | model.history = VROLIJ
188 | old, model_age, reduced = model.calculate_age(16.1)
189 | assert(old == pytest.approx(113., abs=0.1))
190 | assert(model_age == pytest.approx(113.0, abs=0.1))
191 |
192 | def test_flaxmans_ketcham_1999():
193 | from pyFTracks.annealing import Ketcham1999
194 | from pyFTracks.thermal_history import FLAXMANS1
195 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
196 | model.history = FLAXMANS1
197 | old, model_age, _ = model.calculate_age(16.1)
198 | assert(old == pytest.approx(0.05, abs=0.5))
199 | assert(model_age == pytest.approx(0.04, abs=0.5))
200 |
201 | def test_wolf1_ketcham_2007():
202 | from pyFTracks.annealing import Ketcham2007
203 | from pyFTracks.thermal_history import WOLF1
204 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
205 | model.history = WOLF1
206 | old, model_age, _ = model.calculate_age(16.1)
207 | assert(old == pytest.approx(44.0, abs=0.5))
208 | assert(model_age == pytest.approx(44.7, abs=0.5))
209 |
210 | def test_wolf2_ketcham_2007():
211 | from pyFTracks.annealing import Ketcham2007
212 | from pyFTracks.thermal_history import WOLF2
213 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
214 | model.history = WOLF2
215 | old, model_age, _ = model.calculate_age(16.1)
216 | assert(old == pytest.approx(76.0, abs=0.5))
217 | assert(model_age == pytest.approx(61.9, abs=0.5))
218 |
219 | def test_wolf3_ketcham_2007():
220 | from pyFTracks.annealing import Ketcham2007
221 | from pyFTracks.thermal_history import WOLF3
222 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
223 | model.history = WOLF3
224 | old, model_age, _ = model.calculate_age(16.1)
225 | assert(old == pytest.approx(100.0, abs=0.5))
226 | assert(model_age == pytest.approx(84.8, abs=0.5))
227 |
228 | def test_wolf4_ketcham_2007():
229 | from pyFTracks.annealing import Ketcham2007
230 | from pyFTracks.thermal_history import WOLF4
231 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
232 | model.history = WOLF4
233 | old, model_age, reduced = model.calculate_age(16.1)
234 | assert(old == pytest.approx(100.0, abs=0.5))
235 | assert(model_age == pytest.approx(81.2, abs=0.5))
236 |
237 | def test_wolf5_ketcham_2007():
238 | from pyFTracks.annealing import Ketcham2007
239 | from pyFTracks.thermal_history import WOLF5
240 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
241 | model.history = WOLF5
242 | old, model_age, _ = model.calculate_age(16.1)
243 | assert(old == pytest.approx(19.5, abs=0.5))
244 | assert(model_age == pytest.approx(7.47, abs=0.5))
245 |
246 | def test_vrolij_ketcham_2007():
247 | from pyFTracks.annealing import Ketcham2007
248 | from pyFTracks.thermal_history import VROLIJ
249 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
250 | model.history = VROLIJ
251 | old, model_age, _ = model.calculate_age(16.1)
252 | assert(old == pytest.approx(113., abs=0.5))
253 | assert(model_age == pytest.approx(112.0, abs=0.5))
254 |
255 | def test_flaxmans_ketcham_2007():
256 | from pyFTracks.annealing import Ketcham2007
257 | from pyFTracks.thermal_history import FLAXMANS1
258 | model = Ketcham2007({"ETCH_PIT_LENGTH": 1.65})
259 | model.history = FLAXMANS1
260 | old, model_age, _ = model.calculate_age(16.1)
261 | assert(old == pytest.approx(0.05, abs=0.5))
262 | assert(model_age == pytest.approx(0.03, abs=0.5))
263 |
264 | #def test_miller_sample():
265 | # from pyFTracks.ressources import Miller
266 | # assert Miller.central_age == pytest.approx(175.5672, rel=0.001)
267 | # assert Miller.central_age_se == pytest.approx(8.51013)
268 | # assert Miller.central_age_sigma == pytest.approx(5.1978e-5, rel=1e-7)
269 |
270 |
271 | def test_generate_synthetic_sample_wolf1():
272 | from pyFTracks.annealing import Ketcham1999
273 | from pyFTracks.thermal_history import WOLF1
274 | model = Ketcham1999({"ETCH_PIT_LENGTH": 1.65})
275 | model.history = WOLF1
276 | model.calculate_age()
277 | sample = model.generate_synthetic_sample()
278 | sample.save("WOLF1.h5")
279 | assert isinstance(sample, FT.Sample)
280 |
--------------------------------------------------------------------------------