├── proxysiphon
├── tests
│ ├── __init__.py
│ ├── test_records.py
│ └── test_proxychimp.py
├── lmr_hdf5
│ ├── foram_seasons.nc
│ └── __init__.py
├── __init__.py
├── records.py
├── agemodel.py
├── proxychimp.py
└── lgm.py
├── README.md
├── ci
└── requirements-py37.yml
├── .gitignore
├── setup.py
├── .travis.yml
├── NEWS.md
└── LICENSE
/proxysiphon/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/proxysiphon/lmr_hdf5/foram_seasons.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brews/proxysiphon/master/proxysiphon/lmr_hdf5/foram_seasons.nc
--------------------------------------------------------------------------------
/proxysiphon/__init__.py:
--------------------------------------------------------------------------------
1 | from proxysiphon.records import read_ncdc, read_lgm, read_petm
2 | from proxysiphon.agemodel import get_deltar_online, fit_agedepthmodel, date_proxy
3 | from proxysiphon.lmr_hdf5 import nc2lmrh5, nc2lmrdf
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # proxysiphon
2 |
3 | [](https://travis-ci.org/brews/proxysiphon)
4 |
5 | Internal lab tool to parse and clean marine sediment proxy data.
6 |
7 | This project is under heavy development.
8 |
9 | Requires Python >= 3.7.
10 |
--------------------------------------------------------------------------------
/ci/requirements-py37.yml:
--------------------------------------------------------------------------------
1 | name: test_env
2 | channels:
3 | - conda-forge
4 | - sbmalev
5 | - defaults
6 | dependencies:
7 | - attrs
8 | - cartopy
9 | - coverage
10 | - docutils
11 | - matplotlib>=2.0
12 | - numpy
13 | - pandas
14 | - chardet
15 | - carbonferret
16 | - erebusfall
17 | - netCDF4
18 | - pytest
19 | - python=3.7
20 | - scipy>=1.0
21 | - unidecode
22 | - xarray
23 | - pytables
24 | - shapely
25 |
--------------------------------------------------------------------------------
/proxysiphon/tests/test_records.py:
--------------------------------------------------------------------------------
1 | from proxysiphon import records
2 |
3 |
4 | def test_publication_to_citationstr():
5 | pub = records.Publication(authors='White, Tom; New, White',
6 | published_date_or_year=1986,
7 | published_title='Article title',
8 | journal_name='Cool Journal',
9 | volume=12, issue=3, pages=173,
10 | doi='sfdjla/vcxl.3')
11 | goal = "White, Tom; New, White (1986): Article title. Cool Journal, 12, 3, 173, doi:sfdjla/vcxl.3"
12 | assert pub.to_citationstr() == goal
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | env/
13 | bin/
14 | build/
15 | develop-eggs/
16 | dist/
17 | eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # Installer logs
28 | pip-log.txt
29 | pip-delete-this-directory.txt
30 |
31 | # Unit test / coverage reports
32 | htmlcov/
33 | .tox/
34 | .coverage
35 | .cache
36 | nosetests.xml
37 | coverage.xml
38 |
39 | # Translations
40 | *.mo
41 |
42 | # Mr Developer
43 | .mr.developer.cfg
44 | .project
45 | .pydevproject
46 |
47 | # Rope
48 | .ropeproject
49 |
50 | # Django stuff:
51 | *.log
52 | *.pot
53 |
54 | # Sphinx documentation
55 | docs/_build/
56 |
57 | #Ignore figures
58 | *.jpg
59 | *.pdf
60 | *.png
61 |
62 | #Data Files
63 | *.npy
64 | *.h5
65 |
66 | #Ipython stuff
67 | *.ipynb
68 | matplotlibrc
69 |
70 | #pyCharm stuff
71 | .idea/*
72 |
73 | #scratch files
74 | scratch/*
75 | tests/data/*
76 |
77 | #Video Files
78 | *.gif
79 | *.avi
80 | *.mp4
81 | Figs/*
82 | Archive/*
83 | *.nc
84 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 |
4 | setup(
5 | name='proxysiphon',
6 | version='0.0.1b1',
7 | description='Internal lab tool to parse and clean marine sediment proxy data.',
8 | license='GPLv3',
9 |
10 | author='S. Brewster Malevich',
11 | author_email='malevich@email.arizona.edu',
12 | url='https://github.com/brews/proxysiphon',
13 | classifiers=[
14 | 'Development Status :: 4 - Beta',
15 |
16 | 'Intended Audience :: Developers',
17 | 'Intended Audience :: Science/Research',
18 | 'Topic :: Scientific/Engineering',
19 |
20 | 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
21 |
22 | 'Programming Language :: Python :: 3',
23 | ],
24 | keywords='marine paleoclimate',
25 |
26 | packages=find_packages(exclude=['docs']),
27 |
28 | install_requires=['numpy', 'scipy', 'pandas', 'chardet', 'carbonferret',
29 | 'erebusfall', 'netCDF4', 'unidecode', 'xarray', 'tables',
30 | 'shapely'],
31 | extras_require={
32 | 'plots': ['matplotlib>=3.0.0', 'cartopy'],
33 | 'agemodel': ['snakebacon']
34 | },
35 | tests_require=['pytest'],
36 | package_data={'proxysiphon': ['tests/*.txt', 'lmr_hdf5/*.nc']},
37 | )
38 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | dist: bionic
3 | notifications:
4 | email: false
5 | matrix:
6 | fast_finish: true
7 | include:
8 | - python: 3.7
9 | env:
10 | - CONDA_ENV=py37
11 | - JOB_OS=Linux
12 | - os: osx
13 | language: generic
14 | env:
15 | - CONDA_ENV=py37
16 | - JOB_OS=MacOSX
17 | before_install:
18 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then wget https://repo.continuum.io/miniconda/Miniconda2-latest-$JOB_OS-x86_64.sh
19 | -O miniconda.sh; else wget https://repo.continuum.io/miniconda/Miniconda3-latest-$JOB_OS-x86_64.sh
20 | -O miniconda.sh; fi
21 | - bash miniconda.sh -b -p $HOME/miniconda
22 | - export PATH="$HOME/miniconda/bin:$PATH"
23 | - hash -r
24 | - conda config --set always_yes yes --set changeps1 no
25 | - conda update -q conda
26 | - conda info -a
27 | install:
28 | - conda env create -q -f ci/requirements-$CONDA_ENV.yml
29 | - source activate test_env
30 | - pip install .
31 | script:
32 | - pytest --pyargs proxysiphon
33 | deploy:
34 | provider: pypi
35 | distributions: sdist bdist_wheel
36 | on:
37 | tags: true
38 | user: __token__
39 | password:
40 | secure: YK2vXCGESyFNvLS7CzcXyEP+JYQbhz73nvb2kewg/4DKdTedYIqm381SM3CfCQpOWchtY4CTz32eVS12eqyNvovVvisVo6oD4UA9I52iEC2Kq6Ux7Bb+ahKeu1qYk1AlKYKZAIWB4QOvCGs82nS1rlsGL4J1EXGtvoYKIb3ahTjy2zIVV1rSbbdZqWMAMCRzmr1dNqf4U8ohgkD7cSVCjGKsFZ8msLCgiOd7WU0JUvkjzh0t0X02FL/R7wi0Uq7DZId1wKJBpwCCNz18bPYxRQr9MGsqMJOuX+F2VnOSWx6ZlY2TMcqbf+Gd7l/RyO9tmwVvaMbqFNUSL9B2cjkGGtd+06rXY6sb6yTNwux6ECE6bg1Nr0p1lTlNK1fxTt+4+Z9JGYxkqt8mYdJ+4S1WvBFhku5RSr7kBfke2GJ8R+Wj06l4oHzWXNxRH/VPnusocFw+9sAC5We1ixbW/MOXtdFOVvTIUvGwwKIDXrhM2J3fGuiu48QwMgb2kxsMimd6hKCBfFyQ6tDYEdc1YpjdvA9Nwh8apKfZFI+NzAaS14zlTDUDZaiJBM99KXZVJyN7lMAH26InViitQzfIb3Ua+eyBnPgcekWSTtgquAoe9W/nFvGsjb/OlVKJ3SuWd5HNW4xIYGPusnhqnJDLVWTf0CFqe0LSt9x5RRY8rdSCV8I=
41 | skip_existing: true
42 |
--------------------------------------------------------------------------------
/NEWS.md:
--------------------------------------------------------------------------------
1 | # proxysiphon v0.0.1b1
2 |
3 | * Fix bad C14 dates and errors in chronology section of output proxy netCDF files (Issue #12).
4 |
5 |
6 | # proxysiphon v0.0.1a5
7 |
8 | * `nc2lmrh5()` and `nc2lmrdf()` should now trim based on depth cutoffs of `LgmRecord` objects. These are optional floats at `self.chronology_information.cut_shallow` and `self.chronology_information.cut_deep`.
9 |
10 |
11 | # proxysiphon v0.0.1a4
12 |
13 | * Add `LgmRecord.chronology_depth_range()` to get the depth range of chronology determinants (Issue #11).
14 | * `LgmRecord.slice_datadepths()` now slices `data.age_ensemble` and `data.age_median` in addition to the general dataframe.
15 | * `LgmRecord.to_netcdf()` will now add attributes to the chronology section of the output netCDF file if the LgmRecord object `chronology_information` has `cut_shallow` or `cut_deep` attributes.
16 | * `LgmRecord.plot_agedepth()` now automatically plots `self.chronology_information.cut_shallow` and `self.chronology_information.cut_deep` attributes as vertical lines, if the cutoff attributes are available.
17 |
18 |
19 | # proxysiphon v0.0.1a3
20 |
21 | * Add method `RedateMixin.swapin_custom_deltar()` to handle swapping-in new carbon reservoir values into proxy record chronology information.
22 | * `QcPlotMixin.plot_agedepth()` now returns near empty axis if the proxy record has no data to plot.
23 | * Add 'plot_agedepth_kws' arguement to `to_qcpdf()`.
24 | * `plot_agedepth` now accepts passing an optional iterable of depths to 'depthlines'. It then plots vertical lines at those depths. This is used to indicate cut-off points for a proxy record.
25 |
26 |
27 | # proxysiphon v0.0.1a2
28 |
29 | * `cartopy`, and `snakebacon` have been made recommended dependencies.
30 | * New method `records.Publication.to_citationstr()` to get quick and dirty bibliography strings.
31 | * Add `PetmRecord` and `LgmRecord`, specialized subclasses of `NcdcRecord`. These are created with `read_lgm()` and `read_petm()`.
32 | * `LgmRecord` instances now have several methods for age modeling with 14C.
33 | * `LgmRecord` and `PetmRecord` records now have a `.to_netcdf()` method.
34 | * `proxysiphon.qcreport` submodule has be refactored into `LgmRecord` and `PetmRecord` methods. This should make it easier to do quick plots (e.g. with `.plot_agedepth()`) and output more comprehensive PDF quality-control reports with `.to_qcpdf()`.
35 | * Add `nc2lmrh5`, a function to read output proxy NetCDF files and convert them to LMR-style HDF5 proxy files.
36 |
37 |
38 | # proxysiphon v0.0.1a1
39 |
40 | * Refactoring to use Python 3.7 `dataclasses` to simplify code.
41 | * New `Guts.yank_variables()` and `NcdcRecord.Variables` classes for the "Variables" file section.
42 | * `NcdcRecord.original_source_url` is now an optional string with a default value of None.
43 | * `DataCollection` `first_year` & `last_year` are now floats and not int.
44 | * More flexible record creation with keywords.
45 | * `NcdcRecord` `description` and `original_source_url` attributes refactored to simple string.
46 | * Removed `contribution_date` attribute from `NcdcRecord`. This was unused in our analysis.
47 | * `proxysiphon.agemodel.fit_agedepthmodel` now also returns snakebacon agemodel kwargs.
48 | * Grab new field from proxy text files ("Description" and "Original_Source_URL").
49 | * Now pull more fields from "Publication" section, including support for multiple publicatioins.
50 | * `Guts.pull_section()` now returns list of lists to deal with sections that appear multiple times (i.e. "Publication").
51 | * Fix missing dependency in setup.py.
52 |
53 |
54 | # proxysiphon v0.0.1a0
55 |
56 | * Initial release.
--------------------------------------------------------------------------------
/proxysiphon/records.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from chardet import detect as chdetect
3 | from pandas import DataFrame
4 | from proxysiphon.proxychimp import Guts
5 | import proxysiphon.lgm as lgm
6 |
7 |
8 | def read_ncdc(filepath_or_buffer, encoding=None):
9 | """Read NOAA NCDC txt file
10 |
11 | Parameters
12 | ----------
13 | filepath_or_buffer
14 | encoding : str or None, optional
15 | File encoding. Default is None which attempts to guess the encoding with
16 | `chardet.detect`.
17 |
18 | Returns
19 | -------
20 | out : NcdcRecord
21 | """
22 | with open(filepath_or_buffer, 'rb') as fl:
23 | flbytes = fl.read()
24 | if encoding is None:
25 | encoding = chdetect(flbytes)['encoding']
26 | g = Guts(flbytes.decode(encoding))
27 | return g.to_ncdcrecord()
28 |
29 |
30 | def read_lgm(filepath_or_buffer, encoding=None):
31 | """Read NOAA NCDC txt file for LGM proxies
32 |
33 | Parameters
34 | ----------
35 | filepath_or_buffer
36 | encoding : str or None, optional
37 | File encoding. Default is None which attempts to guess the encoding with
38 | `chardet.detect`.
39 |
40 | Returns
41 | -------
42 | out : NcdcRecord
43 | """
44 | out = read_ncdc(filepath_or_buffer, encoding=encoding)
45 | return LgmRecord(**out.__dict__)
46 |
47 |
48 | def read_petm(filepath_or_buffer, encoding=None):
49 | """Read NOAA NCDC txt file for PETM proxies
50 |
51 | Parameters
52 | ----------
53 | filepath_or_buffer
54 | encoding : str or None, optional
55 | File encoding. Default is None which attempts to guess the encoding with
56 | `chardet.detect`.
57 |
58 | Returns
59 | -------
60 | out : NcdcRecord
61 | """
62 | out = read_ncdc(filepath_or_buffer, encoding=encoding)
63 | return PetmRecord(**out.__dict__)
64 |
65 |
66 | @dataclass
67 | class SiteInformation:
68 | """Proxy site information"""
69 | site_name: str = None
70 | location: str = None
71 | country: str = None
72 | northernmost_latitude: float = None
73 | southernmost_latitude: float = None
74 | easternmost_longitude: float = None
75 | westernmost_longitude: float = None
76 | elevation: float = None
77 |
78 |
79 | @dataclass
80 | class DataCollection:
81 | """Proxy site data collection information"""
82 | collection_name: str = None
83 | first_year: float = None
84 | last_year: float = None
85 | time_unit: str = None
86 | core_length: str = None
87 | notes: str = None
88 | collection_year: int = None
89 |
90 |
91 | @dataclass
92 | class VariableInfo:
93 | """Proxy site Data Variable information"""
94 | what: str
95 | material: str
96 | error: str
97 | units: str
98 | seasonality: str
99 | archive: str
100 | detail: str
101 | method: str
102 | datatype: str
103 |
104 |
105 | @dataclass
106 | class ChronologyInformation:
107 | """Proxy site chronology information"""
108 | df: DataFrame = field(default_factory=DataFrame)
109 |
110 |
111 | @dataclass
112 | class Data:
113 | """Proxy site data variables"""
114 | df: DataFrame = field(default_factory=DataFrame)
115 |
116 |
117 | @dataclass
118 | class Publication:
119 | """Proxy site publication"""
120 | authors: str = None
121 | published_date_or_year: int = None
122 | published_title: str = None
123 | journal_name: str = None
124 | volume: str = None
125 | edition: str = None
126 | issue: str = None
127 | pages: str = None
128 | report_number: str = None
129 | doi: str = None
130 | online_resource: str = None
131 | full_citation: str = None
132 | abstract: str = None
133 |
134 | def to_citationstr(self):
135 | """Citation str of publication"""
136 | if self.full_citation is not None:
137 | return str(self.full_citation)
138 |
139 | out = '{authors} ({year}): {title}.'.format(authors=self.authors,
140 | year=self.published_date_or_year,
141 | title=self.published_title)
142 | # This is a bit lazy...
143 | if self.journal_name is not None:
144 | out += ' {},'.format(self.journal_name)
145 | if self.edition is not None:
146 | out += ' {},'.format(self.edition)
147 | if self.volume is not None:
148 | out += ' {},'.format(self.volume)
149 | if self.issue is not None:
150 | out += ' {},'.format(self.issue)
151 | if self.pages is not None:
152 | out += ' {},'.format(self.pages)
153 | if self.report_number is not None:
154 | out += ' {},'.format(self.report_number)
155 | if self.doi is not None:
156 | out += ' doi:{}'.format(self.doi)
157 | if self.online_resource is not None:
158 | out += ' {}'.format(self.online_resource)
159 |
160 | return out
161 |
162 |
163 | @dataclass
164 | class NcdcRecord:
165 | """Proxy site NCDC record"""
166 | chronology_information: ChronologyInformation = None
167 | data: Data = None
168 | data_collection: DataCollection = None
169 | description: str = None
170 | original_source_url: str = None
171 | publication: list = field(default_factory=list)
172 | site_information: SiteInformation = None
173 | variables: dict = field(default_factory=dict)
174 |
175 |
176 | class LgmRecord(lgm.QcPlotMixin, lgm.RedateMixin, lgm.NetcdfMixin, NcdcRecord):
177 | """Proxy site LGM NCDC record"""
178 | pass
179 |
180 |
181 | class PetmRecord(LgmRecord):
182 | """PETM proxy site NCDC record"""
183 | pass
184 |
--------------------------------------------------------------------------------
/proxysiphon/agemodel.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import scipy.stats as stats
6 | import carbonferret as cf
7 |
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def remove_outliers(df, col_name, iqr_min=10):
13 | """Remove outliers from dataframe copy based on col_name"""
14 | out = df.copy()
15 | iqr = stats.iqr(df.loc[:, col_name])
16 | if iqr < iqr_min:
17 | return out
18 | upper = np.percentile(df.loc[:, col_name], 75)
19 | lower = np.percentile(df.loc[:, col_name], 25)
20 | out = out[out.loc[:, col_name] < (upper + 1.5 * iqr)]
21 | out = out[out.loc[:, col_name] > (lower - 1.5 * iqr)]
22 | return out
23 |
24 |
25 | def get_deltar_online(latlon, max_distance=3000):
26 | """Use carbonferret to grab an estimate ΔR from internet"""
27 | max_distance = int(max_distance)
28 | nearby = cf.find_near(lat=latlon[0], lon=latlon[1], n=10)
29 |
30 | nearby = nearby[nearby['distance (km)'] <= max_distance]
31 | # nearby = remove_outliers(nearby, 'DeltaR')
32 | # nearby = remove_outliers(nearby, 'DeltaRErr')
33 | log.debug('ΔR and ΔRσ from {} samples'.format(len(nearby)))
34 |
35 | w = 1/len(nearby)
36 | deltar_mean = nearby['DeltaR'].mean()
37 | # Use pooled or combined variance of carbonferret deltaR distributions.
38 | var = (w * ((nearby['DeltaR'] - deltar_mean)**2 + nearby['DeltaRErr']**2)).sum()
39 | sigma = np.sqrt(var)
40 | return tuple([deltar_mean, sigma])
41 |
42 |
43 | def fit_agedepthmodel(chron, pdata, deltar=None, deltar_error=None, minyr=None, mcmc_kws=None):
44 | log.debug('Fitting new age model.')
45 |
46 | try:
47 | import snakebacon as sb
48 | except ModuleNotFoundError:
49 | raise ModuleNotFoundError('snakebacon needs to be installed for age models')
50 |
51 | if minyr is None:
52 | minyr = -1000
53 |
54 | chron = pd.DataFrame(chron).copy()
55 | pdata = pd.DataFrame(pdata).copy()
56 |
57 | chron['depth'] = chron.loc[:, ('depth_top', 'depth_bottom')].mean(axis=1)
58 | chron['labid'] = chron.Labcode
59 | chron['age'] = chron.loc[:, '14C_date']
60 | chron['error'] = chron.loc[:, '14C_1s_err']
61 |
62 | chron['cc'] = 2 # This is marine calibration curve, use for all depths.
63 |
64 | chron.sort_values('depth', inplace=True)
65 |
66 | # Cleanup dataframe so works with snakebacon.
67 | if deltar is not None:
68 | chron['delta_R'] = deltar
69 | if deltar_error is not None:
70 | chron['delta_R_1s_err'] = deltar_error
71 |
72 | if chron.other_date.notnull().any():
73 | # Check that can't have 14C_date and other_date at same depth. Same for 1s_error.
74 | assert ~chron.loc[:, ['14C_date', 'other_date']].notnull().all(axis=1).any()
75 | assert ~chron.loc[:, ['14C_1s_err', 'other_1s_err']].notnull().all(axis=1).any()
76 | other_msk = ~chron['14C_date'].notnull()
77 | # `other_dates` should not have delta_R values
78 | chron.loc[other_msk, 'delta_R'] = 0
79 | chron.loc[other_msk, 'delta_R_1s_err'] = 0
80 | # Move `other_dates` and `errors` to chron.age and chron.error.
81 | chron.loc[other_msk, 'age'] = chron.loc[other_msk, 'other_date']
82 | chron.loc[other_msk, 'error'] = chron.loc[other_msk, 'other_1s_err']
83 | chron.loc[other_msk, 'cc'] = 0 # Using constant calibration curve for non-14C dates
84 | # Drop rows with `other_date` but no `other_1s_error`.
85 | chron.drop(chron[other_msk & chron['other_date'].notnull() & ~chron['other_1s_err'].notnull()].index,
86 | inplace=True)
87 |
88 | # Have any NaNs in age, depth or error?
89 | assert chron.loc[:, ['age', 'depth', 'error']].notnull().all().all()
90 |
91 | coredates = sb.ChronRecord(chron)
92 |
93 | d_min = np.min([x.min() for x in [chron.depth, pdata.depth]])
94 | d_max = np.max([x.max() for x in [chron.depth, pdata.depth]])
95 | sug_acc_mean = sb.suggest_accumulation_rate(coredates)
96 | # n_segs = np.ceil((d_max - d_min) / 5) # Num segments in mcmc, ~ 5cm, rounded up.
97 |
98 | # TODO(brews): Check whether need sqrt error here.
99 | guesses = np.random.randn(2) * coredates.error[:2] + coredates.age[:2]
100 | guesses[guesses < minyr] = minyr # Line #70 of Bacon.R warns that otherwise twalk MCMC will not run.
101 |
102 | # if n_segs > 200 or n_segs < 5:
103 | # n_segs = np.ceil((d_max - d_min) / 10)
104 | # assert (n_segs < 500) and (n_segs > 5) # Sanity check for extremely long or short MCMC runs.
105 |
106 | mcmc_params = dict(depth_min=d_min, depth_max=d_max,
107 | cc=chron.cc.values,
108 | d_r=chron.delta_R.values,
109 | d_std=chron.delta_R_1s_err.values,
110 | t_a=[3], t_b=[4], k=50, # n_segs,
111 | minyr=minyr, maxyr=50000,
112 | th01=guesses[0], th02=guesses[1],
113 | acc_mean=sug_acc_mean, acc_shape=1.5,
114 | mem_strength=4, mem_mean=0.7)
115 |
116 | if mcmc_kws is not None:
117 | mcmc_params.update(mcmc_kws)
118 |
119 | log.debug('MCMC parameters: {}'.format(mcmc_params))
120 |
121 | agemodel = sb.AgeDepthModel(coredates, mcmc_kws=mcmc_params)
122 | log.debug('Age model fit done')
123 | return agemodel, coredates, mcmc_params
124 |
125 |
126 | def date_proxy(admodel, pdata, nsims):
127 | try:
128 | import snakebacon as sb
129 | except ModuleNotFoundError:
130 | raise ModuleNotFoundError('snakebacon needs to be installed for age models')
131 |
132 | pdata = pd.DataFrame(pdata).copy()
133 | if 'age' in pdata.columns:
134 | # Rename to avoid error with sb.ProxyRecord
135 | log.debug('Renaming age column in proxy data')
136 | cols = list(pdata.columns.values)
137 | cols[cols.index('age')] = 'original_age'
138 | pdata.columns = cols
139 |
140 | orig_pdata = sb.ProxyRecord(pdata)
141 | pdata_median = admodel.date(orig_pdata, 'median').to_pandas()
142 | pdata_ensemble = admodel.date(orig_pdata, 'ensemble', nsims).to_pandas()
143 | pdata_median['age'] = pdata_median['age'].round()
144 | pdata_ensemble['age'] = pdata_ensemble['age'].round()
145 | return pdata_median, pdata_ensemble
146 |
--------------------------------------------------------------------------------
/proxysiphon/tests/test_proxychimp.py:
--------------------------------------------------------------------------------
1 | from tempfile import NamedTemporaryFile
2 | import pytest
3 | import pandas as pd
4 |
5 | from proxysiphon import proxychimp
6 |
7 |
8 | datapayload = ['#------------------------',
9 | '# Contribution_Date',
10 | '#',
11 | '#------------------------',
12 | '# Title',
13 | '#',
14 | '#------------------------',
15 | '# Chronology_Information',
16 | '#',
17 | '# Labcode\tdepth_top\tdepth_bottom\tmat_dated\t14C_date\t14C_1s_err\tdelta_R\tdelta_R_1s_err\tother_date\tother_1s_err\tother_type\t',
18 | '# 152757 5 6 G. ruber or mixed planktonic 475 30 188 73 -999 -999 -999 ',
19 | '#',
20 | '#------------------------',
21 | '# Data',
22 | '# Data lines follow (have no #)',
23 | '# Missing Value: -999',
24 | 'depth\tage\tbacon',
25 | '1\t2\t3',
26 | '4\t5\t6']
27 |
28 |
29 | @pytest.fixture(scope='module')
30 | def chron_nodeltaR_nodata_guts():
31 | payload = ['#------------------------',
32 | '# Contribution_Date',
33 | '#',
34 | '#------------------------',
35 | '# Title',
36 | '#',
37 | '#------------------------',
38 | '# Chronology_Information',
39 | '#',
40 | '# Labcode\tdepth_top\tdepth_bottom\tmat_dated\t14C_date\t14C_1s_err\tdelta_R\tdelta_R_1s_err\tother_date\tother_1s_err\tother_type\t',
41 | '# 152757 5 6 G. ruber or mixed planktonic -999 -999 -999 -999 -999 -999 -999 ',
42 | '#',
43 | '#------------------------',
44 | '# Data',
45 | '# Data lines follow (have no #)',
46 | '# Missing Value: -999',
47 | 'depth\tage\tbacon']
48 | with NamedTemporaryFile('wb') as tf:
49 | tf.write('\n'.join(payload).encode('utf-8'))
50 | tf.flush()
51 | g = proxychimp.Guts(tf.name)
52 | return g
53 |
54 |
55 | @pytest.fixture(scope='module')
56 | def chron_guts():
57 | payload = datapayload
58 | with NamedTemporaryFile('wb') as tf:
59 | tf.write('\n'.join(payload).encode('utf-8'))
60 | tf.flush()
61 | g = proxychimp.Guts(tf.name)
62 | return g
63 |
64 |
65 | @pytest.fixture(scope='module')
66 | def dumb_guts():
67 | payload = ['#------------------------',
68 | '# NOTE: Please cite original publication, online resource and date accessed when using this data.',
69 | '# If there is no publication information, please cite Investigator,', '#',
70 | '# Description/Documentation lines begin with #', '# Data lines have no #', '#',
71 | '# Online_Resource: http://www.ncdc.noaa.gov/paleo/study/',
72 | '# Online_Resource: http://www1.ncdc.noaa.gov/pub/data/paleo/', '#',
73 | '# Original_Source_URL: https://www.ncdc.noaa.gov/paleo-search/study/2622',
74 | '#------------------------',
75 | '# Contribution_Date',
76 | '#',
77 | '#------------------------',
78 | '# Title',
79 | '#',
80 | '#------------------------',
81 | '# Data_Collection', '# Collection_Name: P178-15P',
82 | '# First_Year: 39485', '# Last_Year: -18',
83 | '# Time_Unit: cal yr BP', '# Core_Length: ',
84 | '# Notes: mg_red', '# Collection_Year: 1923',
85 | '#------------------------',
86 | '# Site Information', '# Site_Name: P178-15P',
87 | '# Location: Arabian Sea', '# Country: ',
88 | '# Northernmost_Latitude: 11.955', '# Southernmost_Latitude: 11.955',
89 | '# Easternmost_Longitude: 44.3', '# Westernmost_Longitude: 44.3',
90 | '# Elevation: -869',
91 | '#------------------------',
92 | '# Description and Notes',
93 | '# Description: d18O sacc from 2003 paper, mg/ca sacc from 2002 paper, alkeno',
94 | '#------------------------',
95 | '# Publication',
96 | '# Authors: Tierney, Jessica E.; Pausata, Francesco S. R.; deMenocal, Peter B.',
97 | '# Published_Date_or_Year: 2016',
98 | '# Published_Title: Deglacial Indian monsoon failure and North Atlantic stadials linked by Indian Ocean surface cooling',
99 | '# Journal_Name: Nature Geoscience', '# Volume: 9',
100 | '# Edition: ', '# Issue: ', '# Pages: 46-50',
101 | '# Report Number: ', '# DOI: 10.1038/ngeo2603',
102 | '# Online_Resource: ', '# Full_Citation: ', '# Abstract:',
103 | '#------------------------',
104 | '# Chronology_Information',
105 | '#',
106 | '# Labcode\tdepth_top\tdepth_bottom\tmat_dated\t14C_date\t14C_1s_err\tdelta_R\tdelta_R_1s_err\tother_date\tother_1s_err\tother_type\t',
107 | '#',
108 | '#---------------------------------------',
109 | '# Variables',
110 | '# Data variables follow that are preceded by "##" in columns one and two.',
111 | '# Variables list, one per line, shortname-tab-longname components (9 components: what, material, error, units, seasonality, archive, detail, method, C or N for Character or Numeric data)',
112 | '## depth ,,,cm,,,,,',
113 | '## age ,,,cal yr BP,,,,,',
114 | '## bacon ,,,index,,,,,',
115 | '#------------------------',
116 | '# Data',
117 | '# Data lines follow (have no #)',
118 | '# Missing Value: -999',
119 | 'depth\tage\tbacon',
120 | '1\t2\t3',
121 | '4\t5\t6']
122 | with NamedTemporaryFile('wb') as tf:
123 | tf.write('\n'.join(payload).encode('utf-8'))
124 | tf.flush()
125 | g = proxychimp.Guts(tf.name)
126 | return g
127 |
128 |
129 | def test_find_values():
130 | lines = ['apple: 1\n', 'bee: 1:2\n', 'charlie: 1.5\n', 'dingo-2\t\n', 'echo: ']
131 | assert proxychimp.find_values(lines, 'NotAKey') is None
132 | assert proxychimp.find_values(lines, 'apple') == '1'
133 | assert proxychimp.find_values(lines, 'bee') == '1:2'
134 | assert proxychimp.find_values(lines, 'charlie', fun=float) == 1.5
135 | assert proxychimp.find_values(lines, 'bee', sep='-', fun=int) is None
136 | assert proxychimp.find_values(lines, 'dingo', sep='-', fun=int) == 2
137 | assert proxychimp.find_values(lines, 'echo') is None
138 |
139 |
140 | def test_str_guts__init_():
141 | filestr = '\n'.join(datapayload)
142 | g = proxychimp.Guts(filestr)
143 | goal_dict = {'Labcode': [152757], 'depth_top': [5], 'depth_bottom': [6],
144 | 'mat_dated': ['G. ruber or mixed planktonic'],
145 | '14C_date': [475],'14C_1s_err': [30],
146 | 'delta_R': [188], 'delta_R_1s_err': [73],
147 | 'other_date': [pd.np.nan], 'other_1s_err': [pd.np.nan],
148 | 'other_type': [pd.np.nan]}
149 | goal = pd.DataFrame(goal_dict)
150 | df = g.yank_chron_df()
151 | for k in goal_dict.keys():
152 | pd.testing.assert_series_equal(goal[k], df[k])
153 |
154 |
155 | def test__divide_portions(dumb_guts):
156 | goal_data = ['depth\tage\tbacon', '1\t2\t3', '4\t5\t6']
157 | goal_description_ends = ('#------------------------', '# Missing Value: -999')
158 | goal_beginline = 70
159 | assert goal_data == dumb_guts.data
160 | assert goal_description_ends[0] == dumb_guts.description[0]
161 | assert goal_description_ends[-1] == dumb_guts.description[-1]
162 | assert goal_beginline == dumb_guts.data_beginline
163 |
164 |
165 | def test__write_sectionindex(dumb_guts):
166 | goal_index = {'Chronology_Information': [(55, 59)],
167 | 'Contribution_Date': [(12, 14)],
168 | 'Data': [(67, 70)],
169 | 'Title': [(15, 17)]}
170 | assert 10 == len(dumb_guts.sectionindex.keys())
171 | for k, goal in goal_index.items():
172 | assert goal == dumb_guts.sectionindex[k]
173 |
174 |
175 | def test_pull_section(dumb_guts):
176 | goal = ['# Data', '# Data lines follow (have no #)',
177 | '# Missing Value: -999']
178 | assert dumb_guts.pull_section('Data')[0] == goal
179 |
180 |
181 | def test_pull_section_badname(dumb_guts):
182 | with pytest.raises(KeyError):
183 | dumb_guts.pull_section('data')
184 |
185 | def test_available_sections(dumb_guts):
186 | goal = ['Contribution_Date', 'Title', 'Data_Collection',
187 | 'Site Information', 'Description and Notes', 'Publication',
188 | 'Chronology_Information', 'Variables', 'Data']
189 | # Skip first section as it is very long:
190 | assert goal == dumb_guts.available_sections()[1:]
191 |
192 |
193 | def test_guess_missingvalues(dumb_guts):
194 | goal = '-999'
195 | assert goal == dumb_guts.guess_missingvalues()
196 |
197 |
198 | def test_yank_data_df(dumb_guts):
199 | goal_dict = {'depth': [1, 4], 'age': [2, 5], 'bacon': [3, 6]}
200 | goal = pd.DataFrame(goal_dict)
201 | df = dumb_guts.yank_data_df()
202 | for k in goal_dict.keys():
203 | pd.testing.assert_series_equal(goal[k], df[k])
204 |
205 |
206 | def test_yank_chron_df(chron_guts):
207 | goal_dict = {'Labcode': [152757], 'depth_top': [5], 'depth_bottom': [6],
208 | 'mat_dated': ['G. ruber or mixed planktonic'],
209 | '14C_date': [475],'14C_1s_err': [30],
210 | 'delta_R': [188], 'delta_R_1s_err': [73],
211 | 'other_date': [pd.np.nan], 'other_1s_err': [pd.np.nan],
212 | 'other_type': [pd.np.nan]}
213 | goal = pd.DataFrame(goal_dict)
214 | df = chron_guts.yank_chron_df()
215 | for k in goal_dict.keys():
216 | pd.testing.assert_series_equal(goal[k], df[k])
217 |
218 |
219 | def test_yank_publication(dumb_guts):
220 | v1 = dumb_guts.yank_publication()[0]
221 | assert 'Tierney, Jessica E.; Pausata, Francesco S. R.; deMenocal, Peter B.' == v1['authors']
222 | assert '10.1038/ngeo2603' == v1['doi']
223 |
224 |
225 | def test_yank_site_infromation(dumb_guts):
226 | v = dumb_guts.yank_site_information()
227 | assert 'P178-15P' == v['site_name']
228 | assert 'Arabian Sea' == v['location']
229 | assert v['country'] is None
230 | assert 11.955 == v['northernmost_latitude']
231 | assert 11.955 == v['southernmost_latitude']
232 | assert 44.3 == v['easternmost_longitude']
233 | assert 44.3 == v['westernmost_longitude']
234 | assert -869 == v['elevation']
235 |
236 |
237 | def test_yank_data_collection(dumb_guts):
238 | v = dumb_guts.yank_data_collection()
239 | assert 'P178-15P' == v['collection_name']
240 | assert 39485 == v['first_year']
241 | assert -18 == v['last_year']
242 | assert 'cal yr BP' == v['time_unit']
243 | assert v['core_length'] is None
244 | assert 'mg_red' == v['notes']
245 | assert 1923 == v['collection_year']
246 |
247 |
248 | def test_pull_variables(dumb_guts):
249 | v = dumb_guts.yank_variables()
250 | assert 'cm' == v['depth'][3]
251 |
252 |
253 | def test_yank_description_and_notes(dumb_guts):
254 | v = dumb_guts.yank_description_and_notes()
255 | assert 'd18O sacc from 2003 paper, mg/ca sacc from 2002 paper, alkeno' == v
256 |
257 |
258 | def test_yank_original_source_url(dumb_guts):
259 | v = dumb_guts.yank_original_source_url()
260 | assert 'https://www.ncdc.noaa.gov/paleo-search/study/2622' == v
261 |
262 |
263 | def test_has_chron(dumb_guts, chron_guts):
264 | assert dumb_guts.has_chron() is False
265 | assert chron_guts.has_chron() is True
266 |
267 |
268 | def test_has_deltar(dumb_guts, chron_guts, chron_nodeltaR_nodata_guts):
269 | assert chron_nodeltaR_nodata_guts.has_deltar() is False
270 | assert dumb_guts.has_deltar() is False
271 | assert chron_guts.has_deltar() is True
272 |
273 |
274 | def test_has_datacolumn(dumb_guts, chron_nodeltaR_nodata_guts):
275 | assert chron_nodeltaR_nodata_guts.has_datacolumn('boogers') is False
276 | assert dumb_guts.has_datacolumn('depth') is True
277 |
--------------------------------------------------------------------------------
/proxysiphon/proxychimp.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import collections
3 | import datetime
4 | from io import BytesIO
5 |
6 | import pandas as pd
7 | from chardet import detect as chdetect
8 | import proxysiphon.records as records
9 |
10 |
11 | DIVIDER = '#------------------------'
12 | DATALINE_TRIGGER = '# Data lines follow (have no #)'
13 | HEADINGS = ['Contribution_Date', 'Title', 'Investigators',
14 | 'NOTE: Please cite original publication, online resource and date accessed when using this data.',
15 | 'Description and Notes', 'Publication', 'Funding_Agency',
16 | 'Site Information', 'Data_Collection', 'Species',
17 | 'Chronology_Information', 'Variables', 'Data']
18 | CHRON_HEADER = '# Labcode\tdepth_top\tdepth_bottom\tmat_dated\t14C_date\t14C_1s_err\tdelta_R\tdelta_R_1s_err\tother_date\tother_1s_err\tother_type\t'
19 | MISSINGVALUE_LABEL = '# Missing Value: '
20 |
21 |
22 | log = logging.getLogger(__name__)
23 |
24 |
25 | def grab_latlon(g):
26 | """Grab latitude from proxychimp.Guts"""
27 | victim = g.pull_section('Site Information')[0]
28 | fields = dict(Northernmost_Latitude=None,
29 | Southernmost_Latitude=None,
30 | Easternmost_Longitude=None,
31 | Westernmost_Longitude=None)
32 | for ln in victim:
33 | if any([x in ln for x in fields.keys()]):
34 | ln_split = ln.split(':')
35 | v = float(ln_split[-1])
36 | k = ln_split[0][1:].lstrip().rstrip()
37 | assert k in fields.keys()
38 | fields[k] = v
39 | return fields['Northernmost_Latitude'], fields['Westernmost_Longitude']
40 |
41 |
42 | def grab_elevation(g):
43 | """Grab site elevation from proxychimp.Guts"""
44 | victim = g.pull_section('Site Information')[0]
45 | elevation = None
46 | for ln in victim:
47 | if 'Elevation:' in ln:
48 | ln_split = ln.split(':')
49 | elevation = float(ln_split[-1])
50 | return elevation
51 |
52 |
53 | def grab_collection_year(g):
54 | """Get collection year from Data_Collection section of proxychimp.Guts"""
55 | victim = g.pull_section('Data_Collection')[0]
56 | yr = None
57 | for ln in victim:
58 | if 'Collection_Year' in ln:
59 | ln_split = ln.split(':')
60 | yr = int(ln_split[-1])
61 | return yr
62 |
63 |
64 | def grab_publication_year(g):
65 | """Get publication year from proxychimp.Guts"""
66 | victim = g.pull_section('Publication')
67 | yr = []
68 | for p in victim:
69 | for ln in p:
70 | if 'Published_Date_or_Year' in ln:
71 | ln_split = ln.split(':')
72 | yr.append(int(ln_split[-1]))
73 | return yr
74 |
75 |
76 | def grab_contribution_date(g):
77 | """Get contribution date from proxychimp.Guts"""
78 | victim = g.pull_section('Contribution_Date')[0]
79 | d = None
80 | for ln in victim:
81 | if 'Date' in ln:
82 | ln_split = ln.split(':')
83 | d_str = ln_split[-1].split('-')
84 | assert len(d_str) == 3
85 | d = datetime.date(int(d_str[0]), int(d_str[1]), int(d_str[2]))
86 | return d
87 |
88 |
89 | def find_values(x, k, sep=':', fun=None):
90 | """Find values in a list of strings containing [k][sep][values]
91 |
92 | Parameters
93 | ----------
94 | x : iterable
95 | Iterable of strings representing lines in a file.
96 | k : str
97 | A key or pattern that to search x for.
98 | sep : str
99 | A separator that divides the `k` pattern from the target value.
100 | fun : function-like, optional
101 | Function used to format target value before returning.
102 |
103 | Returns
104 | -------
105 | The target value is returned after removing excess whitespace from left
106 | and right of the value. If the key or target pattern is not found,
107 | then None is returned. If an empty string, or whitespace is the found
108 | value, then None is returned.
109 | """
110 | out = None
111 | for l in x:
112 | if k in l and sep in l:
113 | val = l.split(sep, maxsplit=1)[1:][0].lstrip().rstrip()
114 | if val != '':
115 | out = val
116 | if fun is not None and out is not None:
117 | out = fun(out)
118 | return out
119 |
120 |
121 | class Guts:
122 | """Ghetto and error-tolerant way to parse sections of NCDC proxy text files
123 | """
124 |
125 | def __init__(self, str_or_path):
126 | self._section_headings = ['# ' + s for s in HEADINGS]
127 |
128 | self.path = None
129 | self.encodingguess = None
130 | self.filestr = None
131 | try:
132 | with open(str_or_path, 'rb') as fl:
133 | flbytes = fl.read()
134 | self.path = str(str_or_path)
135 | self.encodingguess = chdetect(flbytes)
136 | # TODO(brews): Not sure we need ~both~ filestr, and lines + self.data + self.description.
137 | self.filestr = flbytes.decode(self.encodingguess['encoding'])
138 | except (FileNotFoundError, OSError):
139 | self.filestr = str(str_or_path)
140 | lines = self.filestr.splitlines()
141 |
142 | self.data = []
143 | self.description = []
144 | self.data_beginline = None
145 | self._divide_portions(lines)
146 |
147 | self.sectionindex = None
148 | self._write_sectionindex()
149 |
150 | def _divide_portions(self, lines):
151 | """Divide guts into 'description' and 'data' sections
152 | """
153 | prev_description = True
154 | dataline_flag = False
155 | for n, ln in enumerate(lines):
156 | if ln[0] == '#' or dataline_flag is False:
157 | self.description.append(ln)
158 | if DATALINE_TRIGGER in ln:
159 | dataline_flag = True
160 | log.debug('Found dataline flag on line {0}'.format(n))
161 | elif dataline_flag:
162 | self.data.append(ln)
163 | if prev_description is True:
164 | self.data_beginline = n
165 | log.debug('Data portion begins on line {0}'.format(n))
166 | prev_description = False
167 |
168 | def _write_sectionindex(self):
169 | """Populate the section index"""
170 | all_keys = []
171 | all_start = []
172 | all_stop = []
173 | prev_divider = False
174 |
175 | for idx, ln in enumerate(self.description): # Lines after 6-line title.
176 | if prev_divider is True and any([h == ln.rstrip() for h in self._section_headings]):
177 | key = ln[1:].rstrip().lstrip()
178 |
179 | # If already have start idx for other section, append end idx
180 | # for that section
181 | if len(all_start) > 0:
182 | all_stop.append(idx - 1)
183 |
184 | all_keys.append(key)
185 | all_start.append(idx)
186 | prev_divider = False
187 | if DIVIDER in ln:
188 | prev_divider = True
189 | all_stop.append(self.data_beginline)
190 |
191 | section_map = collections.defaultdict(list)
192 | for (k, start, stop) in zip(all_keys, all_start, all_stop):
193 | section_map[k].append((start, stop))
194 | section_map.default_factory = None
195 | self.sectionindex = section_map
196 |
197 | def pull_section(self, section):
198 | """Grab a list of list of line strings from the file description for each 'section'"""
199 | try:
200 | out = [self.description[slice(*idx)] for idx in self.sectionindex[section]]
201 | except KeyError:
202 | raise KeyError('section key "{}" not found'.format(section))
203 | return out
204 |
205 | def available_sections(self):
206 | """Get a list of available sections in the file"""
207 | return list(self.sectionindex.keys())
208 |
209 | def yank_data_df(self):
210 | """Get 'data' information as dataframe"""
211 | lines = [x.rstrip() for x in self.data]
212 | data_bytes = '\n'.join(lines).encode('utf-8')
213 | missingvalues = self.guess_missingvalues()
214 | df = pd.read_csv(BytesIO(data_bytes), sep='\t', na_values=missingvalues)
215 | return df
216 |
217 | def yank_chron_df(self, section_name='Chronology_Information', missingvalues=None):
218 | """Get chronology information as pandas.DataFrame"""
219 | if missingvalues is None:
220 | missingvalues = [-999, 'NaN']
221 | section = self.pull_section(section_name)[0]
222 | start_idx = section.index(CHRON_HEADER)
223 | g_chrond = section[start_idx:]
224 | g_chrond_cleaned = [x[2:].rstrip() for x in g_chrond] # Removes the '# ' and ending white space.
225 | data_bytes = '\n'.join(g_chrond_cleaned).encode('utf-8')
226 | df = pd.read_csv(BytesIO(data_bytes), sep='\t', na_values=missingvalues)
227 | return df
228 |
229 | def guess_missingvalues(self):
230 | """Guess data section missing values"""
231 | section = self.pull_section('Data')[0]
232 | out = None
233 | for ln in section:
234 | if MISSINGVALUE_LABEL in ln:
235 | out = ln.split(MISSINGVALUE_LABEL)[1]
236 | log.debug('Guessed missing value(s): {}'.format(out))
237 | return out
238 |
239 | def has_data(self):
240 | """Check if has populated data information"""
241 | try:
242 | d = self.yank_data_df()
243 | except KeyError:
244 | return False
245 | if len(d) > 0:
246 | result = True
247 | else:
248 | result = False
249 | return result
250 |
251 | def has_chron(self):
252 | """Check if has populated chronology information"""
253 | try:
254 | chron = self.yank_chron_df()
255 | except KeyError:
256 | return False
257 | if len(chron) > 0:
258 | result = True
259 | else:
260 | result = False
261 | return result
262 |
263 | def has_deltar(self):
264 | """Check if has populated delta R chronology information"""
265 | try:
266 | chron = self.yank_chron_df()
267 | except KeyError:
268 | return False
269 | if any(chron.delta_R.notnull()):
270 | result = True
271 | else:
272 | result = False
273 | return result
274 |
275 | def has_deltar_error(self):
276 | """Check if has populated delta R error chronology information"""
277 | try:
278 | chron = self.yank_chron_df()
279 | except KeyError:
280 | return False
281 | if any(chron.delta_R_1s_err.notnull()):
282 | result = True
283 | else:
284 | result = False
285 | return result
286 |
287 | def has_datacolumn(self, name):
288 | """Check if name is in data section columns"""
289 | try:
290 | data = self.yank_data_df()
291 | except KeyError:
292 | return False
293 | if name in data.columns:
294 | result = True
295 | else:
296 | result = False
297 | return result
298 |
299 | def yank_original_source_url(self):
300 | """Get string of original source URL
301 |
302 | Returns
303 | -------
304 | out : str or None
305 |
306 | Raises
307 | ------
308 | AssertionError
309 | If more than one section is found in the file data.
310 | """
311 | target_section = 'NOTE: Please cite original publication, online ' \
312 | 'resource and date accessed when using this data.'
313 | target_key = 'Original_Source_URL:'
314 |
315 | sections = self.pull_section(target_section)
316 | assert len(sections) < 2, 'More than one section found'
317 | section = sections[0]
318 |
319 | try:
320 | out = find_values(section, target_key, fun=str)
321 | except AttributeError:
322 | # target key not found in string
323 | out = None
324 |
325 | return out
326 |
327 | def yank_data_collection(self):
328 | """Get data collection information
329 |
330 | Returns
331 | -------
332 | out : dict or None
333 |
334 | Raises
335 | ------
336 | AssertionError
337 | If more than one section is found in the file data.
338 | """
339 | target_section = 'Data_Collection'
340 | # List of tuples, tuples give (dict_key, source_key, type_fun)
341 | target_keys = [('collection_name', 'Collection_Name:', str),
342 | ('first_year', 'First_Year:', float),
343 | ('last_year', 'Last_Year:', float),
344 | ('time_unit', 'Time_Unit:', str),
345 | ('core_length', 'Core_Length:', str),
346 | ('notes', 'Notes:', str),
347 | ('collection_year', 'Collection_Year:', int)]
348 | out = {k[0]: None for k in target_keys}
349 |
350 | sections = self.pull_section(target_section)
351 | assert len(sections) < 2, 'More than one section found'
352 | section = sections[0]
353 |
354 | for dict_key, source_key, type_fun in target_keys:
355 | out[dict_key] = find_values(section, source_key, fun=type_fun)
356 |
357 | return out
358 |
359 | def yank_description_and_notes(self):
360 | """Get string of description and notes
361 |
362 | Returns
363 | -------
364 | out : str or None
365 |
366 | Raises
367 | ------
368 | AssertionError
369 | If more than one section is found in the file data.
370 | """
371 | target_section = 'Description and Notes'
372 | target_key = 'Description:'
373 |
374 | sections = self.pull_section(target_section)
375 | assert len(sections) < 2, 'More than one section found'
376 | section = sections[0]
377 |
378 | try:
379 | out = find_values(section, target_key, fun=str)
380 | except AttributeError:
381 | # target key not found in string
382 | out = None
383 |
384 | return out
385 |
386 | def yank_publication(self):
387 | """Get list of publication information
388 |
389 | Returns
390 | -------
391 | out : list[dict]
392 | """
393 | target_section = 'Publication'
394 | # List of tuples, tuples give (dict_key, source_key, type_fun)
395 | target_keys = [('authors', '# Authors:', str),
396 | ('published_date_or_year', '# Published_Date_or_Year:', int),
397 | ('published_title', '# Published_Title:', str),
398 | ('journal_name', '# Journal_Name:', str),
399 | ('volume', '# Volume:', str),
400 | ('edition', '# Edition:', str),
401 | ('issue', '# Issue:', str),
402 | ('pages', '# Pages:', str),
403 | ('report_number', '# Report Number:', str),
404 | ('doi', '# DOI:', str),
405 | ('online_resource', '# Online_Resource:', str),
406 | ('full_citation', '# Full_Citation:', str),
407 | ('abstract', '# Abstract:', str)]
408 | dict_template = {k[0]: None for k in target_keys}
409 |
410 | out = []
411 | sections = self.pull_section(target_section)
412 |
413 | for section in sections:
414 |
415 | this_pub = dict_template.copy()
416 | for dict_key, source_key, type_fun in target_keys:
417 | this_pub[dict_key] = find_values(section, source_key, fun=type_fun)
418 | out.append(this_pub)
419 |
420 | return out
421 |
422 | def yank_variables(self):
423 | """Get variable section information
424 |
425 | Returns
426 | -------
427 | out : dict
428 |
429 | Raises
430 | ------
431 | AssertionError
432 | If more than one section is found in the file data.
433 | """
434 | target_section = 'Variables'
435 | sections = self.pull_section(target_section)
436 | assert len(sections) < 2, 'More than one section found'
437 | section = sections[0]
438 |
439 | out = {}
440 | for ln in section:
441 |
442 | # Skip line if not data variables line.
443 | if ln[:3] != '## ':
444 | continue
445 |
446 | var_name, components_group = ln[3:].split('\t')
447 | out[var_name] = tuple(components_group.split(','))
448 |
449 | return out
450 |
451 | def yank_site_information(self):
452 | """Get site information
453 |
454 | Returns
455 | -------
456 | out : dict
457 |
458 | Raises
459 | ------
460 | AssertionError
461 | If more than one section is found in the file data.
462 | """
463 | target_section = 'Site Information'
464 | # List of tuples, tuples give (dict_key, source_key, type_fun)
465 | target_keys = [('site_name', '# Site_Name:', str),
466 | ('location', '# Location:', str),
467 | ('country', '# Country:', str),
468 | ('northernmost_latitude', '# Northernmost_Latitude:', float),
469 | ('southernmost_latitude', '# Southernmost_Latitude:', float),
470 | ('easternmost_longitude', '# Easternmost_Longitude:', float),
471 | ('westernmost_longitude', '# Westernmost_Longitude:', float),
472 | ('elevation', '# Elevation:', float)]
473 | out = {k[0]: None for k in target_keys}
474 |
475 | sections = self.pull_section(target_section)
476 | assert len(sections) < 2, 'More than one section found'
477 | section = sections[0]
478 |
479 | for dict_key, source_key, type_fun in target_keys:
480 | out[dict_key] = find_values(section, source_key, fun=type_fun)
481 |
482 | return out
483 |
484 | def to_ncdcrecord(self):
485 | """to NcdcRecord instance"""
486 | chron = records.ChronologyInformation(df=self.yank_chron_df())
487 | d = records.Data(df=self.yank_data_df())
488 | d_collection = records.DataCollection(**self.yank_data_collection())
489 | description = self.yank_description_and_notes()
490 | orig_url = self.yank_original_source_url()
491 | pubs = [records.Publication(**p) for p in self.yank_publication()]
492 | site_info = records.SiteInformation(**self.yank_site_information())
493 |
494 | varis = {}
495 | file_vars = self.yank_variables()
496 | for k, v in file_vars.items():
497 | varis[k] = records.VariableInfo(*v)
498 |
499 | out = records.NcdcRecord(chronology_information=chron,
500 | data=d,
501 | data_collection=d_collection,
502 | description=description,
503 | original_source_url=orig_url,
504 | publication=pubs,
505 | site_information=site_info,
506 | variables=varis)
507 | return out
508 |
--------------------------------------------------------------------------------
/proxysiphon/lmr_hdf5/__init__.py:
--------------------------------------------------------------------------------
1 | """Read proxies netCDF5 file and write proxies HDF5 file for LMR Data Assimilation workflow
2 | """
3 |
4 |
5 | __all__ = ['nc2lmrh5', 'nc2lmrdf']
6 |
7 |
8 | import os
9 | import logging
10 | import numpy as np
11 | import pandas as pd
12 | import xarray as xr
13 | import netCDF4
14 | import shapely.affinity
15 | import shapely.geometry
16 |
17 | import erebusfall as ef
18 |
19 |
20 | log = logging.getLogger(__name__)
21 |
22 |
23 | class DistanceThresholdError(Exception):
24 | """Raised when the distance between two points is further than a threshold
25 |
26 | Parameters
27 | ----------
28 | target_distance : int or float
29 | The distance between two target points (km).
30 | distance_threshold : int or float
31 | The distance threshold.
32 |
33 | """
34 |
35 | def __init__(self, target_distance, distance_threshold):
36 | self.target_distance = target_distance
37 | self.distance_threshold = distance_threshold
38 |
39 |
40 | def chord_distance(latlon1, latlon2):
41 | """Chordal distance between two sequences of (lat, lon) points
42 |
43 | Parameters
44 | ----------
45 | latlon1 : sequence of tuples
46 | (latitude, longitude) for one set of points.
47 | latlon2 : sequence of tuples
48 | A sequence of (latitude, longitude) for another set of points.
49 |
50 | Returns
51 | -------
52 | dists : 2d array
53 | An mxn array of Earth chordal distances [1]_ (km) between points in
54 | latlon1 and latlon2.
55 |
56 | References
57 | ----------
58 | .. [1] https://en.wikipedia.org/wiki/Chord_(geometry)
59 |
60 | """
61 | earth_radius = 6378.137 # in km
62 |
63 | latlon1 = np.atleast_2d(latlon1)
64 | latlon2 = np.atleast_2d(latlon2)
65 |
66 | n = latlon1.shape[0]
67 | m = latlon2.shape[0]
68 |
69 | paired = np.hstack((np.kron(latlon1, np.ones((m, 1))),
70 | np.kron(np.ones((n, 1)), latlon2)))
71 |
72 | latdif = np.deg2rad(paired[:, 0] - paired[:, 2])
73 | londif = np.deg2rad(paired[:, 1] - paired[:, 3])
74 |
75 | a = np.sin(latdif / 2) ** 2
76 | b = np.cos(np.deg2rad(paired[:, 0]))
77 | c = np.cos(np.deg2rad(paired[:, 2]))
78 | d = np.sin(np.abs(londif) / 2) ** 2
79 |
80 | half_angles = np.arcsin(np.sqrt(a + b * c * d))
81 |
82 | dists = 2 * earth_radius * np.sin(half_angles)
83 |
84 | return dists.reshape(m, n)
85 |
86 |
87 | def get_nearest(latlon, dain, depth=None, lat_coord='Latitude', lon_coord='Longitude',
88 | depth_coord='depth', distance_threshold=1500):
89 | """Get nearest non-NaN to latlon from xarray.DataArray obj
90 |
91 | Finds the nearest not NaN to latlon, and optionally depth. It searches for a
92 | nearest depth first, if given, and then searches for nearest latlon. Note
93 | that this does not work with irregular grids, such as rotated polar, etc.
94 |
95 | Parameters
96 | ----------
97 | latlon : tuple
98 | Target latitude and longitude. Must be within -90 to 90 and -180 to 180.
99 | dain : xarray.DataArray
100 | Field with regular latlon coordinates.
101 | depth : float or int, optional
102 | Target depth to get nearest.
103 | lat_coord : str, optional
104 | Name of the latitude coordinate in ``da``.
105 | lon_coord : str, optional
106 | Name of the longitude coordinate in ``da``.
107 | depth_coord : str, optional
108 | Name of the depth coordinate in ``da``.
109 | distance_threshold : float or int, optional
110 | If the nearest distance is larger than this, raise
111 |
112 | Returns
113 | -------
114 | nearest : xarray.DataArray
115 | Nearest points.
116 | nearest_distance : float
117 | Chordal distance (km) from target to matched gridpoint.
118 |
119 | Raises
120 | ------
121 | DistanceThresholdError
122 | """
123 | da = dain.copy()
124 |
125 | assert latlon[0] <= 90 and latlon[0] >= -90
126 | assert latlon[1] <= 180 and latlon[1] >= -180
127 |
128 | assert lat_coord in da.coords
129 | assert lon_coord in da.coords
130 |
131 | assert (da[lat_coord].ndim == 1) and (da[lon_coord].ndim == 1)
132 |
133 | # First, find the nearest depth index, if given.
134 | if depth is not None:
135 | assert depth_coord in da.coords
136 |
137 | # Note use 'pad' because want next upper water column level value.
138 | da = da.sortby('depth')
139 | da = da.sel(**{depth_coord: depth}, method='nearest')
140 |
141 | # Now search for nearest latlon point.
142 | da_stack = da.stack(yx=[lat_coord, lon_coord]).dropna('yx')
143 | da_latlon_stack = np.vstack((da_stack[lat_coord], da_stack[lon_coord])).T
144 |
145 | # Any values above 180 become negative -- needed for 0-360 longitudes.
146 | highlon_msk = da_latlon_stack > 180
147 | da_latlon_stack[highlon_msk] = da_latlon_stack[highlon_msk] - 360
148 |
149 | distance = chord_distance(np.array([latlon]), da_latlon_stack)
150 | nearest = da_stack.isel(yx=np.argmin(distance))
151 | nearest_distance = np.min(distance)
152 |
153 | if nearest_distance > distance_threshold:
154 | raise DistanceThresholdError(nearest_distance, distance_threshold)
155 |
156 | return nearest, nearest_distance
157 |
158 |
159 | def get_netcdf_resource(fl, **kwargs):
160 | """Read NetCDF files as package resource, output for xarray.Dataset
161 |
162 | Parameters
163 | ----------
164 | fl : str
165 | NetCDF resource name.
166 | **kwargs :
167 | Passed to ``xarray.open_dataset``.
168 |
169 | Returns
170 | -------
171 | data : xarray.Dataset
172 | """
173 | here = os.path.abspath(os.path.dirname(__file__))
174 | flpath = os.path.join(here, fl)
175 | data = xr.open_dataset(flpath, **kwargs)
176 | return data
177 |
178 |
179 | def poly_dateline_wrap(p):
180 | """Split dateline crossing polygon into multipoly so wraps.
181 |
182 | Parameters
183 | ----------
184 | p : shapely.geometry.Polygon
185 |
186 | Returns
187 | -------
188 | out : shapely.geometry.Polygon or shapely.geometry.MultiPolygon
189 | """
190 | dateline = shapely.geometry.LineString([(180, 90), (180, -90)])
191 |
192 | if not dateline.crosses(p):
193 | return p
194 |
195 | right_ls = shapely.geometry.LineString([(360, 90), (360, -90)])
196 | left_ls = shapely.geometry.LineString([(-180, 90), (-180, -90)])
197 | left_clip = shapely.geometry.MultiLineString([left_ls, dateline]).convex_hull
198 | right_clip = shapely.geometry.MultiLineString([dateline, right_ls]).convex_hull
199 |
200 | northpacific_left = p.intersection(left_clip)
201 | dl_right_off = p.intersection(right_clip)
202 |
203 | # Shift right of dateline back so lon (x) is within [-180, 180]
204 | northpacific_lr = shapely.affinity.translate(shapely.geometry.LinearRing(dl_right_off.exterior.coords),
205 | xoff=-360)
206 | northpacific_right = shapely.geometry.Polygon(northpacific_lr)
207 | out = shapely.geometry.MultiPolygon([northpacific_left, northpacific_right])
208 | return out
209 |
210 |
211 | def find_seasonality(sitegrp, proxy_grp):
212 | """Return string list of ints giving site variable seasonality.
213 |
214 | Parameters
215 | ----------
216 | sitegrp : netCDF4.Group
217 | Proxy site netcdf group.
218 | proxy_grp: netCDF4.Group
219 | Proxy variable netcdf group
220 |
221 | Returns
222 | -------
223 | out : str
224 | """
225 | out = list(range(1, 13))
226 |
227 | proxy_type = str(proxy_grp.long_name)
228 | if proxy_type == "UK'37":
229 | log.debug('finding seasonality for UK37 proxy')
230 | # Jess Tierney polygons from BAYSPLINE
231 | mediterranean = shapely.geometry.Polygon([(-5.5, 36.25),
232 | (3, 47.5),
233 | (45, 47.5),
234 | (45, 30),
235 | (-5.5, 30)])
236 | northatlantic = shapely.geometry.Polygon([(-55, 48),
237 | (-50, 70),
238 | (20, 70),
239 | (10, 62.5),
240 | (-4.5, 58.2),
241 | (-4.5, 48)])
242 | northpacific_raw = shapely.geometry.Polygon([(135, 45),
243 | (135, 70),
244 | (250, 70),
245 | (232, 52.5),
246 | (180, 45)])
247 | northpacific = poly_dateline_wrap(northpacific_raw)
248 |
249 | latlon = (float(sitegrp.latitude), float(sitegrp.longitude))
250 | assert -90 < latlon[0] < 90, 'site latitude must be -90 < lat < 90'
251 | assert -180 < latlon[1] < 180, 'site longitude must be -180 < lon < 180'
252 |
253 | site_location = shapely.geometry.Point(latlon[::-1])
254 | if mediterranean.contains(site_location):
255 | out = [1, 2, 3, 4, 5, 11, 12]
256 | if northatlantic.contains(site_location):
257 | out = [8, 9, 10]
258 | if northpacific.contains(site_location):
259 | out = [6, 7, 8]
260 |
261 | elif proxy_type == 'd18O':
262 | foram_type = str(proxy_grp.foraminifera_type)
263 | latlon = (float(sitegrp.latitude), float(sitegrp.longitude))
264 | log.debug('finding seasonality for d18O proxy ({}) @ {}'.format(foram_type, latlon))
265 | foram_seas = get_netcdf_resource('foram_seasons.nc', group='d18oc')
266 | foram_seas = foram_seas.where(foram_seas != '') # Replace '' with nan.
267 | # Need to normalize full species/subspecies names to the variable names used
268 | # in the FORAM_SEASONS_NETCDF group.
269 | foraminifera_map = {'Globigerina bulloides': 'G. bulloides',
270 | 'Neogloboquadrina pachyderma sinistral': 'N. pachyderma',
271 | 'Neogloboquadrina incompta': 'N. incompta',
272 | 'Globigerinoides ruber white': 'G. ruber',
273 | 'Globigerinoides ruber pink': 'G. ruber',
274 | 'Trilobatus sacculifer': 'T. sacculifer',
275 | }
276 | foram_spp_str = foraminifera_map.get(foram_type)
277 | if foram_spp_str is not None:
278 | nearest_seas, _ = get_nearest(latlon, foram_seas[foram_spp_str],
279 | lat_coord='lat', lon_coord='lon')
280 | out = nearest_seas.item()
281 |
282 | else:
283 | pass # foram type not in our species mapping so use annual.
284 |
285 | elif proxy_type == 'Mg/Ca':
286 | foram_type = str(proxy_grp.foraminifera_type)
287 | latlon = (float(sitegrp.latitude), float(sitegrp.longitude))
288 | log.debug('finding seasonality for Mg/Ca proxy ({}) @ {}'.format(foram_type, latlon))
289 | foram_seas = get_netcdf_resource('foram_seasons.nc', group='mgca')
290 | foram_seas = foram_seas.where(foram_seas != '') # Replace '' with nan.
291 | # Need to normalize full species/subspecies names to the variable names used
292 | # in the FORAM_SEASONS_NETCDF group.
293 | foraminifera_map = {'Globigerina bulloides': 'G. bulloides',
294 | 'Neogloboquadrina pachyderma sinistral': 'N. pachyderma sinistral',
295 | 'Neogloboquadrina incompta': 'N. pachyderma dextral',
296 | 'Globigerinoides ruber white': 'G. ruber white',
297 | 'Globigerinoides ruber pink': 'G. ruber pink',
298 | 'Trilobatus sacculifer': 'G. sacculifer',
299 | }
300 | foram_spp_str = foraminifera_map.get(foram_type)
301 | if foram_spp_str is not None:
302 | nearest_seas, _ = get_nearest(latlon, foram_seas[foram_spp_str],
303 | lat_coord='lat', lon_coord='lon')
304 | out = nearest_seas.item()
305 | else:
306 | pass # foram type not in our species mapping so use annual.
307 | log.debug('season months are: {}'.format(out))
308 | return str(out)
309 |
310 |
311 | def icevol_correction(proxies, metadata):
312 | """Apply ice-volume correction to d18O proxies in LMR proxy dataframes.
313 |
314 | Returns modified copies of the original data.
315 | """
316 | p = proxies.copy()
317 | m = metadata.copy()
318 |
319 | matches = ['d18o' in c.lower() for c in p.columns]
320 |
321 | if not any(matches):
322 | return p, m
323 |
324 | m.set_index('Proxy ID', inplace=True)
325 |
326 | matches_idx = np.where(matches)
327 | matched_columns = p.columns[matches_idx]
328 |
329 | for c in matched_columns:
330 | proxy_raw = proxies[c].values
331 | age_raw = proxies[c].index.values
332 | age_yr = 1950 - age_raw # CE/BC to Yr BP
333 | proxy_adjusted = ef.icevol_correction(age_yr, proxy_raw, proxytype='d18o',
334 | timeunit='ya')
335 | p.loc[:, c] = proxy_adjusted
336 |
337 | m.loc[c, 'Oldest (C.E.)'] = p[c].dropna().index.min()
338 | m.loc[c, 'Youngest (C.E.)'] = p[c].dropna().index.max()
339 |
340 | m.reset_index(inplace=True)
341 | return p, m
342 |
343 |
344 | def lmr_da_dfs(sitegrp=None, agemodel_iter=None, find_modern_seasonality=True):
345 | """Return proxy data and metadata pandas df needed for LMR DA proxy input
346 |
347 | If no args are passed, return empty proxy and metadata (template) pandas
348 | dataframes.
349 |
350 | Parameters
351 | ----------
352 | sitegrp : netCDF4.Group or None, optional
353 | Proxy site netcdf group.
354 | agemodel_iter : int or None, optional
355 | Age-depth model iteration to use, if available. This is the index of the
356 | 'agemodel_ensemble' column to use for the output data. If ``None`` (default),
357 | uses median age.
358 | find_modern_seasonality : bool
359 | Do you want to estimate sample seasonality using the modern record?
360 | Sets annual seasonality if False. Default is True.
361 |
362 | Returns
363 | -------
364 | data : pandas.DataFrame
365 | meta : pandas.DataFrame
366 | """
367 | data_template = pd.DataFrame()
368 | meta_template = pd.DataFrame({'Proxy ID': [], 'Site': [], 'Lat (N)': [], 'Lon (E)': [],
369 | 'Archive type': [], 'Proxy measurement': [],
370 | 'Resolution (yr)': [], 'Reference': [], 'Databases': [],
371 | 'Seasonality': [], 'Elev': [],
372 | 'Oldest (C.E.)': [], 'Youngest (C.E.)': []})
373 | meta_template = meta_template.astype(dtype={'Proxy ID': 'object', 'Site': 'object',
374 | 'Lat (N)': 'float64', 'Lon (E)': 'float64',
375 | 'Archive type': 'object',
376 | 'Proxy measurement': 'object',
377 | 'Resolution (yr)': 'float64',
378 | 'Reference': 'object', 'Databases': 'object',
379 | 'Seasonality': 'object', 'Elev': 'float64',
380 | 'Oldest (C.E.)': 'float64',
381 | 'Youngest (C.E.)': 'float64'})
382 |
383 | if sitegrp is None:
384 | return data_template.copy(), meta_template.copy()
385 |
386 | log.info('extracting LMR data from {}'.format(str(sitegrp.site_name)))
387 |
388 | variables_to_skip = ['depth', 'age_original', 'age_median', 'age_ensemble']
389 | proxy_variables = [(k, v) for k, v in sitegrp['data'].variables.items() if k not in variables_to_skip]
390 |
391 | latitude = float(sitegrp.latitude)
392 | longitude = float(sitegrp.longitude)
393 | elevation = float(sitegrp.elevation)
394 | data_df = data_template.copy()
395 | meta_df = meta_template.copy()
396 |
397 | for k, v in proxy_variables:
398 | log.debug('processing variable {}'.format(str(k)))
399 |
400 | # Convert years BP to CE/BP and find min max.
401 | if agemodel_iter is None:
402 | try:
403 | age_yrs_ce = 1950 - sitegrp['data'].variables['age_median'][:]
404 | except KeyError:
405 | age_yrs_ce = 1950 - sitegrp['data'].variables['age_original'][:]
406 | else:
407 | idx = int(agemodel_iter)
408 | age_yrs_ce = 1950 - sitegrp['data'].variables['age_ensemble'][:, idx]
409 |
410 | # Make depth cutoff mask from depth cutoffs, if available.
411 | cut_deep = np.inf
412 | if hasattr(sitegrp['chronology'], 'cut_deep'):
413 | cut_deep = float(sitegrp['chronology'].cut_deep)
414 | cut_shallow = -np.inf
415 | if hasattr(sitegrp['chronology'], 'cut_shallow'):
416 | cut_shallow = float(sitegrp['chronology'].cut_shallow)
417 | depth = sitegrp['data'].variables['depth'][:]
418 | cutoff_msk = (depth >= cut_shallow) & (depth <= cut_deep)
419 |
420 | notnan_and_notcut = ~np.isnan(v[:].filled(np.nan)) & cutoff_msk
421 | youngest_ce = np.max(age_yrs_ce[notnan_and_notcut])
422 | oldest_ce = np.min(age_yrs_ce[notnan_and_notcut])
423 |
424 | # Put together proxy ID and proxy measurement strings.
425 | siteid = str(sitegrp.site_name).strip().lower()
426 | pmeasurement = str(k).strip().lower()
427 | # Append cleaning protocol info if available for Mg/Ca
428 | try:
429 | if v.long_name == 'Mg/Ca':
430 | log.debug('Mg/Ca variable, attempting to find cleaning protocol')
431 | # Should throw error if have Mg/Ca record w/o cleaning protocol info.
432 | cleaning_protocol = str(v.mgca_cleaning_protocol)
433 | cleaning_str = None
434 | if 'reductive' in cleaning_protocol.lower():
435 | cleaning_str = ':reductive'
436 | elif 'barker' in cleaning_protocol.lower():
437 | cleaning_str = ':barker'
438 | pmeasurement += cleaning_str
439 | except AttributeError: # caught if v doesn't have long_name attrib...
440 | pass
441 | proxyid = siteid + ':' + pmeasurement
442 |
443 | this_meta = meta_template.copy()
444 | this_meta['Proxy ID'] = [proxyid]
445 | this_meta['Site'] = [siteid]
446 | this_meta['Lat (N)'] = latitude
447 | this_meta['Lon (E)'] = longitude
448 | this_meta['Archive type'] = ['Marine sediments']
449 | this_meta['Proxy measurement'] = [pmeasurement]
450 | this_meta['Resolution (yr)'] = [(youngest_ce - oldest_ce) / len(age_yrs_ce[notnan_and_notcut])]
451 | this_meta['Reference'] = [str(None)]
452 | this_meta['Databases'] = ['[DTDA]']
453 | if find_modern_seasonality:
454 | this_meta['Seasonality'] = find_seasonality(sitegrp, v)
455 | else:
456 | this_meta['Seasonality'] = str(list(range(1, 13)))
457 | this_meta['Elev'] = elevation
458 | this_meta['Oldest (C.E.)'] = oldest_ce
459 | this_meta['Youngest (C.E.)'] = youngest_ce
460 | meta_df = meta_df.append(this_meta, ignore_index=True)
461 |
462 | d = (pd.DataFrame({'Year C.E.': age_yrs_ce[cutoff_msk],
463 | proxyid: v[:].filled(np.nan)[cutoff_msk]})
464 | .set_index('Year C.E.')
465 | .dropna(how='any'))
466 | data_df = data_df.join(d, how='outer')
467 |
468 | data_df = data_df.sort_index(ascending=False)
469 |
470 | return data_df, meta_df
471 |
472 |
473 | def _lmr_df_from_nc_sites(fl, agemodel_iter=None, find_modern_seasonality=True):
474 | """Create LMR data and metadata dataframes from opened netCDF file group"""
475 | all_data_df, all_meta_df = lmr_da_dfs()
476 |
477 | for site_grp in fl.groups.values():
478 | try:
479 | site_data_df, site_meta_df = lmr_da_dfs(site_grp,
480 | agemodel_iter=agemodel_iter,
481 | find_modern_seasonality=find_modern_seasonality)
482 | except TypeError as e:
483 | errormsg = '{} raised - skipping file - {} - Mg/Ca variable likely missing cleaning protocol info'
484 | log.error(errormsg.format(e, site_grp.site_name))
485 | continue
486 |
487 | all_meta_df = all_meta_df.append(site_meta_df, ignore_index=True)
488 | all_data_df = all_data_df.join(site_data_df, how='outer')
489 |
490 | return all_meta_df, all_data_df
491 |
492 |
493 | def nc2lmrdf(path_or_buffer, agemodel_iter=None,
494 | icevol_cor=True, find_modern_seasonality=True):
495 | """Read proxy netCDF and output to LMR DA-format dataframes.
496 |
497 | Parameters
498 | ----------
499 | path_or_buffer : str or netCDF4.Dataset
500 | Input proxy netCDF file path or opened buffer.
501 | agemodel_iter : int or None, optional
502 | Optional index of age-model iteration to use in output -- if multiple
503 | age-model iterations are available.
504 | icevol_cor : bool, optional
505 | Do you want to apply ice-volume correction to the d18O foraminiferal
506 | records? This is done with the `erebusfalls` package. `True` by default.
507 | find_modern_seasonality : bool, optional
508 | Do you want to estimate sample seasonality using the modern record?
509 | Sets annual seasonality if False. Default is True.
510 |
511 | Returns
512 | -------
513 | all_data_df : pd.Dataframe
514 | LMR proxy data.
515 | all_meta_df : pd.Dataframe
516 | LMR proxy metadata.
517 |
518 | Also see
519 | --------
520 | `proxysiphon.nc2lmrh5`
521 |
522 | """
523 | # Open proxy site netCDF file and collect needed data or read directly from
524 | # obj if it's already open.
525 | if isinstance(path_or_buffer, str):
526 | with netCDF4.Dataset(filename=path_or_buffer, mode='r') as fl:
527 | all_meta_df, all_data_df = _lmr_df_from_nc_sites(fl,
528 | agemodel_iter=agemodel_iter,
529 | find_modern_seasonality=find_modern_seasonality)
530 | else:
531 | all_meta_df, all_data_df = _lmr_df_from_nc_sites(path_or_buffer,
532 | agemodel_iter=agemodel_iter,
533 | find_modern_seasonality=find_modern_seasonality)
534 |
535 | all_data_df = all_data_df.sort_index(ascending=False)
536 |
537 | if icevol_cor:
538 | # Ice-volume correction to d18O foram proxies:
539 | all_data_df, all_meta_df = icevol_correction(all_data_df, all_meta_df)
540 |
541 | return all_data_df, all_meta_df
542 |
543 |
544 | def nc2lmrh5(path_or_buffer, h5file, agemodel_iter=None, icevol_cor=True,
545 | find_modern_seasonality=True):
546 | """Read proxy netCDF and output to LMR DA-format HDF5 file.
547 |
548 | Parameters
549 | ----------
550 | path_or_buffer : str or netCDF4.Dataset
551 | Input proxy netCDF file path or opened buffer.
552 | h5file : str
553 | Path to output HDF5 file. Not written if None.
554 | agemodel_iter : int or None, optional
555 | Optional index of age-model iteration to use in output -- if multiple
556 | age-model iterations are available.
557 | icevol_cor : bool, optional
558 | Do you want to apply ice-volume correction to the d18O foraminiferal
559 | records? This is done with the `erebusfalls` package. `True` by default.
560 | find_modern_seasonality : bool, optional
561 | Do you want to estimate sample seasonality using the modern record?
562 | Sets annual seasonality if False. Default is True.
563 |
564 | Returns
565 | -------
566 | Nothing is returned, output is written to `h5file`.
567 |
568 | Also see
569 | --------
570 | `proxysiphon.nc2lmrdf`
571 | """
572 | all_data_df, all_meta_df = nc2lmrdf(path_or_buffer, agemodel_iter=agemodel_iter,
573 | icevol_cor=icevol_cor,
574 | find_modern_seasonality=find_modern_seasonality)
575 |
576 | # Write to H5 file.
577 | log.debug('writing to HDF5 file: {}'.format(h5file))
578 | all_meta_df.to_hdf(h5file, key='meta', mode='w', format='table',
579 | complevel=9, complib='blosc')
580 | all_data_df.to_hdf(h5file, key='proxy', mode='r+', format='table',
581 | complevel=9, complib='blosc')
582 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/proxysiphon/lgm.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import logging
3 | from copy import deepcopy
4 |
5 | import netCDF4
6 | import numpy as np
7 | import unidecode
8 |
9 | from proxysiphon.agemodel import get_deltar_online, fit_agedepthmodel, date_proxy
10 |
11 | log = logging.getLogger(__name__)
12 |
13 |
14 | def _normalize_to_ascii_array(a, dtype='S50'):
15 | """Normalize sequence of UTF-8 string to np.Array of ASCII"""
16 | normed = np.array([unidecode.unidecode(str(x)) for x in a], dtype=dtype)
17 | return normed
18 |
19 |
20 | class RedateMixin:
21 | """Mixins to redate LGM proxy records"""
22 |
23 | def swapin_custom_deltar(self, d_r=None, d_std=None):
24 | """Swap-in custom proxy ΔR mean and standard-deviation values.
25 |
26 | ``d_r`` is put into self.chronology_information.df column "delta_R" and
27 | ``d_std`` is put into column "delta_R_1s_err". Any existing values in
28 | these columns are first moved to a new column "*_original", but only if
29 | the "*_original" columns don't already exist. If "*_original" columns
30 | already exist, then current values in "delta_R" or "delta_R_1s_err" are
31 | discarded in the returned proxy record.
32 |
33 | Parameters
34 | ----------
35 | d_r : ndarray, scalar or None
36 | Carbon reservoir (ΔR) mean value to swap-in. Ignored if None.
37 | d_std : ndarray, scalar or None
38 | ΔR standard-deviation value to swap-in. Ignored
39 | if None.
40 |
41 | Returns
42 | -------
43 | out : Modified copy of proxy record.
44 | """
45 | out = self.copy()
46 |
47 | if d_r is None and d_std is None:
48 | return out
49 |
50 | # If we're plugging in a new value, we preserve the original but moving it to a new *_original
51 | # columns.
52 | if d_r is not None:
53 | if 'delta_R_original' not in out.chronology_information.df.columns:
54 | out.chronology_information.df['delta_R_original'] = out.chronology_information.df['delta_R']
55 | out.chronology_information.df['delta_R'] = d_r
56 | out.chronology_information.df['delta_R'].replace(to_replace='None', value=np.nan, inplace=True)
57 |
58 | if d_std is not None:
59 | if 'delta_R_1s_err_original' not in out.chronology_information.df.columns:
60 | out.chronology_information.df['delta_R_1s_err_original'] = out.chronology_information.df[
61 | 'delta_R_1s_err']
62 | out.chronology_information.df['delta_R_1s_err'] = d_std
63 | out.chronology_information.df['delta_R_1s_err'].replace(to_replace='None', value=np.nan, inplace=True)
64 |
65 | return out
66 |
67 | def copy(self):
68 | """Return deep copy of self"""
69 | return deepcopy(self)
70 |
71 | def redate(self, **kwargs):
72 | """Redate proxy data with (snake)bacon
73 |
74 | Parameters
75 | ----------
76 | nsims : int
77 | Number of age-model simulations to retain.
78 | kwargs :
79 | Passed on to ``self._fit_agemodel``.
80 |
81 | Returns
82 | -------
83 | Updated copy of self. The ``snakebacon.AgeDepthModel`` used to redate
84 | the record is monkeypatched into
85 | ``self.chronology_information.bacon_agemodel`` in this returned copy.
86 | """
87 | rec = self.copy()
88 |
89 | agemodel, mcmc_kwargs = rec._fit_agemodel(**kwargs)
90 | a_median, a_ensemble = rec._date_sampledepths(agemodel, kwargs.pop('nsims', None))
91 | rec.chronology_information.bacon_agemodel = agemodel
92 | rec.data.age_median = a_median
93 | rec.data.age_ensemble = a_ensemble
94 | return rec
95 |
96 | def _fit_agemodel(self, **kwargs):
97 | """Fit snakebacon model to NcdcRecord
98 | """
99 | chron_df = self.chronology_information.df.copy()
100 | data_df = self.data.df.copy()
101 | myr = 1950 - self.recent_date()
102 | deltar = self.chronology_information.df['delta_R'].values
103 | deltar_error = self.chronology_information.df['delta_R_1s_err'].values
104 |
105 | agemodel, _, mcmc_kwargs = fit_agedepthmodel(chron_df, data_df, deltar, deltar_error,
106 | minyr=myr, mcmc_kws=kwargs)
107 | return agemodel, mcmc_kwargs
108 |
109 | def _date_sampledepths(self, agemodel, nsims=1000):
110 | """Date self proxy sample depths given a snakebacon.AgeModel
111 |
112 | Parameters
113 | ----------
114 | agemodel : snakebacon.AgeModel
115 | Age model to date from.
116 | nsims : scalar or None, optional
117 | Number of draws to include in returned ensemble. ``None`` returns
118 | 1000 draws.
119 |
120 | Returns
121 | -------
122 | p_median : pandas.DataFrame
123 | Median age per depth.
124 | p_ensemble :
125 | Ensemble of ages per depth.
126 | """
127 | if nsims is None:
128 | nsims = 1000
129 |
130 | data_df = self.data.df.copy()
131 | p_median, p_ensemble = date_proxy(agemodel, data_df, nsims)
132 |
133 | p_median = (p_median[['depth', 'age']].rename(columns={'age': 'age_median'})
134 | .set_index('depth'))
135 | p_ensemble = (p_ensemble[['depth', 'age', 'mciter']].rename(columns={'mciter': 'draw'})
136 | .pivot(index='depth', columns='draw', values='age'))
137 | return p_median, p_ensemble
138 |
139 | def recent_date(self):
140 | """Get the most recent date from self metadata.
141 |
142 | First try "Collection_Year" in "Data_Collection" section. If can't
143 | find, then earliest year in publications list.
144 | """
145 | out = None
146 | col_year = self.data_collection.collection_year
147 | if col_year is not None:
148 | out = col_year
149 | else:
150 | # Return earliest year in publications.
151 | out = min([int(p.published_date_or_year) for p in self.publication if p.published_date_or_year is not None])
152 | return out
153 |
154 | def update_deltar(self):
155 | """Set self.ChronologyInformation.df delta_R and delta_r_1s_error, returning an updated copy
156 |
157 | If these variables are changed/updated, the originals are moved to column names *_original.
158 | """
159 | x = self.copy()
160 | latlon = (float(x.site_information.northernmost_latitude),
161 | float(x.site_information.easternmost_longitude))
162 |
163 | chron_df = x.chronology_information.df.copy()
164 |
165 | delta_r_used = None
166 | delta_r_1s_err_used = None
167 |
168 | n_unique_deltarerror = len(chron_df['delta_R'].dropna().unique())
169 | n_unique_deltar = len(chron_df['delta_R_1s_err'].dropna().unique())
170 |
171 | if (n_unique_deltarerror > 1) and (n_unique_deltar > 1):
172 | log.info('Found multi-depth deltar and deltar_error. Using NcdcRecords original values')
173 | elif n_unique_deltar > 1:
174 | log.info('Found multi-depth deltar, without deltar_error. Using carbonferret "delta_R_1s_err".')
175 | delta_r_1s_err_used = get_deltar_online(latlon)[1]
176 | else:
177 | delta_r_used, delta_r_1s_err_used = get_deltar_online(latlon)
178 | log.debug('deltar(deltar_error): {}({})'.format(delta_r_used, delta_r_1s_err_used))
179 |
180 | if delta_r_used is not None:
181 | chron_df['delta_R_original'] = chron_df['delta_R'].copy()
182 | chron_df['delta_R'] = delta_r_used
183 |
184 | if delta_r_1s_err_used is not None:
185 | chron_df['delta_R_1s_err_original'] = chron_df['delta_R_1s_err'].copy()
186 | chron_df['delta_R_1s_err'] = delta_r_1s_err_used
187 |
188 | x.chronology_information.df = chron_df.copy()
189 | return x
190 |
191 | def average_duplicate_datadepths(self):
192 | """Average obs at the same depth in self.data.df, return copy of self
193 | """
194 | if not self.data.df['depth'].duplicated().any():
195 | log.debug('Found no duplicate depths in Data')
196 | # Return if no duplicates
197 | return self
198 |
199 | x = self.copy()
200 | log.debug('Found duplicate depths in Data')
201 | x.data.df = x.data.df.groupby('depth', as_index=False).mean()
202 | return x
203 |
204 | def has_chron(self):
205 | """True if self has populated chronology information"""
206 | result = False
207 | try:
208 | chron = self.chronology_information.df
209 | except KeyError:
210 | return result
211 | if len(chron) > 1: # Needs to have more than one date.
212 | result = True
213 | return result
214 |
215 | def has_data(self):
216 | """True if self has populated data information"""
217 | result = False
218 | try:
219 | d = self.data.df
220 | except KeyError:
221 | return result
222 | if len(d) > 0:
223 | result = True
224 | return result
225 |
226 | def chronology_depth_range(self, chronology='original'):
227 | """Get (min, max) range of chronology determinant depths
228 |
229 | Parameters
230 | ----------
231 | chronology : str, optional
232 | Which chronology information do we get depth ranges from. Can be
233 | 'original' or 'redate'. 'original' uses the depths in chronology information from
234 | self.chronology_information.df. 'redate' looks to
235 | self.chronology_information.bacon_agemodel.mcmcsetup.coredates.depth,
236 | assuming that the record has been redated with self.readate().
237 |
238 | Returns
239 | -------
240 | out : tuple
241 | Depths range (min, max).
242 |
243 | See Also
244 | --------
245 | LgmRecord.slice_datadepths : Cut self.data.df to given depths
246 | """
247 | assert chronology in ['original', 'redate']
248 | if chronology == 'redate':
249 | depths = np.array(self.chronology_information.bacon_agemodel.mcmcsetup.coredates.depth)
250 | elif chronology == 'original':
251 | depths = np.concatenate([self.chronology_information.df['depth_top'],
252 | self.chronology_information.df['depth_bottom']])
253 | out = (np.nanmin(depths), np.nanmax(depths))
254 | return out
255 |
256 | def slice_datadepths(self, shallow=None, deep=None):
257 | """Cut self.data to given depths, return updated copy of self
258 |
259 | The cut is inclusive for `shallow` and `deep` limits. Both `shallow` and
260 | `deep` must be positive. If no values are given for `shallow` and `deep`,
261 | the cuts are at the deepest and shallowest samples in
262 | `self.chronology_information`.
263 |
264 | Note also that it cuts self.data.df, in addition to self.data.age_median
265 | and self.data.age_ensemble, if available.
266 |
267 | See Also
268 | --------
269 | LgmRecord.chronology_depth_range : Get range of chronology determinant depths
270 | """
271 | if len(self.data.df) < 1 or 'depth' not in self.data.df.columns:
272 | return self
273 |
274 | out = self.copy()
275 |
276 | if shallow is None:
277 | shallow = out.data.df['depth'].min()
278 | if deep is None:
279 | deep = out.data.df['depth'].max()
280 |
281 | assert shallow >= 0 and deep >= 0, 'cut depths must be positive'
282 |
283 | out.data.df = out.data.df[(out.data.df['depth'] >= shallow) & (out.data.df['depth'] <= deep)]
284 |
285 | if hasattr(out.data, 'age_ensemble'):
286 | out.data.age_ensemble = out.data.age_ensemble[
287 | (out.data.age_ensemble.index >= shallow) & (out.data.age_ensemble.index <= deep)]
288 | if hasattr(out.data, 'age_median'):
289 | out.data.age_median = out.data.age_median[
290 | (out.data.age_median.index >= shallow) & (out.data.age_median.index <= deep)]
291 |
292 | return out
293 |
294 |
295 | class NetcdfMixin:
296 | """Mixins to export LGM proxy records to a NetCDF file"""
297 |
298 | @staticmethod
299 | def _variable_attributes(varname):
300 | """Get dict of netCDF4 variable attributes for a given NcdcRecord Data column name"""
301 | proxy_map = {'d13c': ('d13C', 'per mil'),
302 | 'd18o': ('d18O', 'per mil'),
303 | 'mgca': ('Mg/Ca', 'per mil'),
304 | 'percent': ('Percent foraminifera', '%'),
305 | 'tex86': ('TEX86', 'index'),
306 | 'uk37': ("UK'37", 'index')}
307 | # Second part of the column name...
308 | foraminifera_map = {'bulloides': 'Globigerina bulloides',
309 | 'crassaformis': 'Globorotalia crassaformis',
310 | 'dutertrei': 'Neogloboquadrina dutertrei',
311 | 'inflata': 'Globoconella inflata',
312 | 'mabahethi': 'Cibicides mabahethi',
313 | 'marginata': 'Bulimina marginata',
314 | 'menardii': 'Globorotalia menardii',
315 | 'obliquiloculata': 'Pulleniatina obliquiloculata',
316 | 'pachyderma': 'Neogloboquadrina pachyderma sinistral',
317 | 'pachysin': 'Neogloboquadrina pachyderma sinistral',
318 | 'pachyderma_d': 'Neogloboquadrina incompta',
319 | 'peregrina': 'Uvigerina peregrina',
320 | 'quinqueloba': 'Turborotalita quinqueloba',
321 | 'ruber': 'Globigerinoides ruber white',
322 | 'ruber_lato': 'Globigerinoides ruber white',
323 | 'ruber_pink': 'Globigerinoides ruber pink',
324 | 'ruber_stricto': 'Globigerinoides ruber white',
325 | 'sacculifer': 'Trilobatus sacculifer',
326 | 'truncatulinoides': 'Globorotalia pachytheca',
327 | 'tumida': 'Globorotalia tumida',
328 | 'acicula': 'Creseis acicula',
329 | }
330 | varname = varname.lower()
331 | try:
332 | if '_' not in varname:
333 | out = {'long_name': proxy_map[varname][0], 'units': proxy_map[varname][1]}
334 | else:
335 | proxy, foram = varname.split('_', 1)
336 | out = {'long_name': proxy_map[proxy][0], 'units': proxy_map[proxy][1],
337 | 'foraminifera_type': foraminifera_map[foram]}
338 | except KeyError: # Variable name not found.
339 | out = {}
340 | return out
341 |
342 | def _attach_site_ncgroup(self, parent):
343 | """Create the NetCDF4 site group and populate it
344 |
345 | This is run whenever self.to_netcdf() is called.
346 |
347 | Parameters
348 | ----------
349 | parent : netcdf4.Group or netcdf4.Dataset
350 | Object the created site group will use as a parent.
351 |
352 | Returns
353 | -------
354 | this_site : netcdf4.Group
355 | Reference to the created chronology group.
356 | """
357 | site_name = self.site_information.site_name.strip()
358 | # Normalize site group name to ASCII and replace whitespace characters.
359 | grp_name = unidecode.unidecode(site_name.lower()).replace(' ', '_')
360 |
361 | # Create and populate site group
362 | this_site = parent.createGroup(grp_name)
363 | this_site.site_name = str(site_name)
364 | this_site.comment = str(self.description)
365 | this_site.latitude = float(self.site_information.northernmost_latitude)
366 | this_site.longitude = float(self.site_information.easternmost_longitude)
367 | this_site.elevation = int(self.site_information.elevation)
368 | try:
369 | this_site.collection_year = int(self.data_collection.collection_year)
370 | except TypeError: # If collection year doesn't exist
371 | pass
372 | publication_str = '\n\n'.join([x.to_citationstr() for x in self.publication])
373 | this_site.references = publication_str
374 | return this_site
375 |
376 | def _attach_chronology_ncgroup(self, parent):
377 | chron = parent.createGroup('chronology')
378 | chron.createDimension('depth_top', None)
379 | chron.createDimension('str_dim', 50)
380 |
381 | labcode = chron.createVariable('labcode', 'S1', ('depth_top', 'str_dim'))
382 | labcode.long_name = 'Lab sample code'
383 | labcode.missing_value = 'NA'
384 | labcode._Encoding = 'ascii'
385 | labcode[:] = _normalize_to_ascii_array(self.chronology_information.df['Labcode'].fillna('NA'))
386 |
387 | depth_top = chron.createVariable('depth_top', 'f4', ('depth_top',))
388 | depth_top.long_name = 'Sample top depth'
389 | depth_top.axis = 'Z'
390 | depth_top.positive = 'down'
391 | depth_top.units = 'cm'
392 | depth_top.missing_value = np.nan
393 | depth_top[:] = self.chronology_information.df['depth_top'].values
394 |
395 | depth_bottom = chron.createVariable('depth_bottom', 'f4', ('depth_top',))
396 | depth_bottom.long_name = 'Sample bottom depth'
397 | depth_bottom.units = 'cm'
398 | depth_bottom.positive = 'down'
399 | depth_bottom.missing_value = np.nan
400 | depth_bottom[:] = self.chronology_information.df['depth_bottom'].values
401 |
402 | mat_dated = chron.createVariable('mat_dated', 'S1', ('depth_top', 'str_dim'))
403 | mat_dated.long_name = 'Material dated'
404 | mat_dated.missing_value = 'NA'
405 | mat_dated._Encoding = 'ascii'
406 | mat_dated[:] = _normalize_to_ascii_array(self.chronology_information.df['mat_dated'].fillna('NA'))
407 |
408 | c14_date = chron.createVariable('c14_date', 'f4', ('depth_top',))
409 | c14_date.long_name = '14C date'
410 | c14_date.units = 'RC yr BP'
411 | c14_date.missing_value = np.nan
412 | c14_date[:] = self.chronology_information.df['14C_date'].values
413 |
414 | c14_1s_err = chron.createVariable('c14_1s_err', 'f4', ('depth_top',))
415 | c14_1s_err.long_name = '14C 1-sigma error'
416 | c14_1s_err.units = 'RC yr BP'
417 | c14_1s_err.missing_value = np.nan
418 | c14_1s_err[:] = self.chronology_information.df['14C_1s_err'].values
419 |
420 | delta_r = chron.createVariable('delta_r', 'f4', ('depth_top',))
421 | delta_r.long_name = 'delta R'
422 | delta_r.missing_value = np.nan
423 | delta_r[:] = self.chronology_information.df['delta_R'].values
424 |
425 | if 'delta_R_original' in self.chronology_information.df.columns:
426 | delta_r_orig = chron.createVariable('delta_r_original', 'f4', ('depth_top',))
427 | delta_r_orig.long_name = 'Original delta R'
428 | delta_r_orig.description = 'Carbon reservoir correction (delta R) value(s) given in the orignal proxy site data set.'
429 | delta_r_orig.missing_value = np.nan
430 | delta_r_orig[:] = self.chronology_information.df['delta_R_original'].values
431 |
432 | delta_r_1s_error = chron.createVariable('delta_r_1s_error', 'f4', ('depth_top',))
433 | delta_r_1s_error.missing_value = np.nan
434 | delta_r_1s_error.long_name = 'delta R 1-sigma error'
435 | delta_r_1s_error[:] = self.chronology_information.df['delta_R_1s_err'].values
436 |
437 | if 'delta_R_1s_err_original' in self.chronology_information.df.columns:
438 | delta_r_1s_error_orig = chron.createVariable('delta_r_1s_error_original', 'f4', ('depth_top',))
439 | delta_r_1s_error_orig.long_name = 'Original delta R 1-sigma error'
440 | delta_r_1s_error_orig.description = 'Carbon reservoir correction 1-sigma error value(s) given in the orignal proxy site data set.'
441 | delta_r_1s_error_orig.missing_value = np.nan
442 | delta_r_1s_error_orig[:] = self.chronology_information.df['delta_R_1s_err_original'].values
443 |
444 | other_date = chron.createVariable('other_date', 'f4', ('depth_top',))
445 | other_date.missing_value = np.nan
446 | other_date.long_name = 'Other date'
447 | other_date[:] = self.chronology_information.df['other_date'].values
448 |
449 | other_1s_err = chron.createVariable('other_1s_err', 'f4', ('depth_top',))
450 | other_1s_err.long_name = 'Other date 1-sigma error'
451 | other_1s_err.missing_value = np.nan
452 | other_1s_err[:] = self.chronology_information.df['other_1s_err'].values
453 |
454 | other_type = chron.createVariable('other_type', 'S1', ('depth_top', 'str_dim'))
455 | other_type.long_name = 'Other date type'
456 | other_type.missing_value = 'NA'
457 | other_type._Encoding = 'ascii'
458 | other_type[:] = _normalize_to_ascii_array(self.chronology_information.df['other_type'].fillna('NA'))
459 |
460 | # Add depth cutoff value attributes to chronology group if
461 | # self.chronology_information has `cut_shallow` and `cut_deep` attributes.
462 | if hasattr(self.chronology_information, 'cut_shallow'):
463 | chron.cut_shallow = self.chronology_information.cut_shallow
464 | if hasattr(self.chronology_information, 'cut_deep'):
465 | chron.cut_deep = self.chronology_information.cut_deep
466 |
467 | return chron
468 |
469 | def _attach_data_ncgroup(self, parent):
470 | """Create and populate data group"""
471 | data = parent.createGroup('data')
472 | data.createDimension('depth', None)
473 | depth = data.createVariable('depth', 'f4', ('depth',), zlib=True)
474 | depth.long_name = 'Sample depth'
475 | depth.positive = 'down'
476 | depth.axis = 'Z'
477 | depth[:] = self.data.df['depth'].values
478 |
479 | file_depth_unit = str(self.variables['depth'].units)
480 | if file_depth_unit == '' or file_depth_unit is None:
481 | depth.units = 'cm'
482 | else:
483 | depth.units = file_depth_unit
484 |
485 |
486 | age_original = data.createVariable('age_original', 'f4', ('depth',),
487 | zlib=True)
488 | age_original.missing_value = np.nan
489 | age_original.long_name = 'Original age'
490 | age_original[:] = self.data.df['age'].values
491 |
492 | file_age_unit = str(self.variables['age'].units)
493 | if file_age_unit == '' or file_age_unit is None:
494 | age_original.units = 'cal years BP'
495 | else:
496 | age_original.units = file_age_unit
497 |
498 | if hasattr(self.data, 'age_ensemble') and hasattr(self.data, 'age_median'):
499 | data.createDimension('draw', self.data.age_ensemble.shape[1])
500 |
501 | age_median = data.createVariable('age_median', 'f4', ('depth',),
502 | zlib=True)
503 | age_median.units = 'cal years BP'
504 | age_median.long_name = 'Median age'
505 | age_median.missing_value = np.nan
506 | age_median[:] = self.data.age_median['age_median'].values
507 |
508 | agedraw = data.createVariable('age_ensemble', 'f4', ('depth', 'draw'),
509 | zlib=True)
510 | agedraw.units = 'cal years BP'
511 | agedraw.long_name = 'Age ensemble'
512 | agedraw.missing_value = np.nan
513 | agedraw[:] = self.data.age_ensemble.values
514 |
515 | for col in list(self.data.df.columns):
516 | col_name = col.lower()
517 |
518 | if col_name in ['depth', 'age']:
519 | continue
520 |
521 | var = data.createVariable(col_name, 'f4', ('depth',), zlib=True)
522 | var.missing_value = np.nan
523 |
524 | # Add more attributes to variable.
525 | attrib_dict = self._variable_attributes(col_name)
526 | for k, v in attrib_dict.items():
527 | setattr(var, k, v)
528 |
529 | # Overwrite units attributes with whatever units are given in the NcdcRecord.
530 | var.units = str(self.variables[col].units)
531 | var.comments = str(self.variables[col].detail)
532 |
533 | # Grab Mg/Ca cleaning information from "Data Collection Information - Notes"
534 | if 'mgca' in col_name:
535 | cleaning_note = str(self.data_collection.notes)
536 | if 'mg_bcp' in cleaning_note:
537 | var.mgca_cleaning_protocol = 'Barker cleaning with hydrogen peroxide'
538 | elif 'mg_red' in cleaning_note:
539 | var.mgca_cleaning_protocol = 'Fully reductive cleaning'
540 | else:
541 | var.mgca_cleaning_protocol = 'NA'
542 | var[:] = self.data.df[col].values
543 | return data
544 |
545 | def _attach_ncgroups(self, fl):
546 | """Dump contents into netCDF4.Dataset
547 |
548 | This is run whenever self.to_netcdf() is called.
549 |
550 | This runs all of the self._attach_*ncgroup() methods and attaches them
551 | to a netcdf4.Dataset object.
552 | """
553 | site_group = self._attach_site_ncgroup(fl)
554 | # Create and populate chronology group, if chronology_information exists
555 | if not self.chronology_information.df.empty:
556 | self._attach_chronology_ncgroup(site_group)
557 | self._attach_data_ncgroup(site_group)
558 |
559 | def to_netcdf(self, path_or_buffer):
560 | """Write NcdcRecord contents to a netCDF file
561 | """
562 | if isinstance(path_or_buffer, str):
563 | # Append to file, if it exists, if doesn't exist, create file.
564 | try:
565 | with netCDF4.Dataset(filename=path_or_buffer, mode='a', format='NETCDF4') as fl:
566 | self._attach_ncgroups(fl)
567 | except FileNotFoundError:
568 | with netCDF4.Dataset(filename=path_or_buffer, mode='w', format='NETCDF4') as fl:
569 | self._attach_ncgroups(fl)
570 |
571 | else:
572 | self._attach_ncgroups(path_or_buffer)
573 |
574 |
575 | class QcPlotMixin:
576 | """Mixins to add QC plot methods LGM proxy records"""
577 |
578 | @staticmethod
579 | def _ax_setup(*args, **kwargs):
580 | try:
581 | import matplotlib.pylab as plt
582 | except ModuleNotFoundError:
583 | raise ModuleNotFoundError('matplotlib needs to be installed for plots')
584 |
585 | return plt.gca(*args, **kwargs)
586 |
587 | def plot_datavariable(self, variable, ax=None):
588 | """
589 | Plot a variable in the record data as timeseries.
590 |
591 | The plot also compares the variables from a redated age model with
592 | the original age model, if the record has been redated.
593 |
594 | Parameters
595 | ----------
596 | variable : str
597 | Name of variable to plot. Must be in ``self.data``.
598 | ax : :class:`mpl.axes.Axes` or None, optional
599 | Existing axes to plot onto.
600 |
601 | Returns
602 | -------
603 | ax : :class:`mpl.axes.Axes`
604 |
605 | """
606 | if ax is None:
607 | ax = self._ax_setup()
608 |
609 | if variable not in self.data.df.columns:
610 | raise KeyError('{} not found'.format(variable))
611 |
612 | if hasattr(self.data, 'age_median'):
613 | proxy_df = (self.data.df.set_index('depth')
614 | .join(self.data.age_median, lsuffix='__', sort=True))
615 |
616 | new_age = proxy_df.loc[:, ('age_median', variable)].dropna()
617 | ax.plot(new_age.loc[:, 'age_median'], new_age.loc[:, variable], '.',
618 | color='C3', label='MCMC median age')
619 | ax.plot(new_age.loc[:, 'age_median'], new_age.loc[:, variable],
620 | color='C3', linewidth=0.5, label='_nolegend_')
621 | log.debug('Found new agemodel proxy timeseries')
622 | else:
623 | proxy_df = self.data.df.set_index('depth')
624 | log.debug('Assuming no new agemodel proxy timeseries')
625 |
626 | old_age = proxy_df.loc[:, ('age', variable)].dropna()
627 | ax.plot(old_age.loc[:, 'age'], old_age.loc[:, variable], 'x',
628 | color='C0', label='File age')
629 | ax.plot(old_age.loc[:, 'age'], old_age.loc[:, variable],
630 | color='C0', linewidth=0.5, label='_nolegend_')
631 |
632 | if 'd18o' in variable.lower():
633 | ax.invert_yaxis()
634 | ax.set_ylabel(variable)
635 | ax.grid()
636 |
637 | return ax
638 |
639 | def plot_sitemap(self, ax=None):
640 | """
641 | Plot sample site map using cartopy.
642 |
643 | Requires ``cartopy`` to be installed. Uses the site's northmost latitude
644 | and easternmost longitude to plot. So, we assume
645 | ``self.site_information.northernmost_latitude`` and
646 | ``self.site_information.easternmost_longitude`` are populated.
647 |
648 | Parameters
649 | ----------
650 | ax : :class:`mpl.axes.Axes` or None, optional
651 | Existing axes to plot onto.
652 |
653 | Returns
654 | -------
655 | ax : :class:`mpl.axes.Axes`
656 |
657 | """
658 | try:
659 | import cartopy.crs as ccrs
660 | import cartopy.feature as cfeature
661 | except ModuleNotFoundError:
662 | raise ModuleNotFoundError('cartopy needs to be installed for mapping')
663 |
664 | latlon = (self.site_information.northernmost_latitude,
665 | self.site_information.easternmost_longitude)
666 |
667 | if ax is None:
668 | ax = self._ax_setup(projection=ccrs.Robinson(central_longitude=latlon[1]))
669 |
670 | ax.set_global()
671 | ax.add_feature(cfeature.LAND, facecolor='#B0B0B0')
672 | ax.outline_patch.set_linewidth(0.5)
673 | ax.plot(latlon[1], latlon[0], 'o', color='C0', transform=ccrs.Geodetic())
674 |
675 | return ax
676 |
677 | def plot_sedrate(self, ax=None):
678 | """
679 | Plot prior and posterior sediment rates for record agemodel.
680 |
681 | Requires ``self.chronology_information.bacon_agemodel` to be populated
682 | with a ``snakebacon.AgeDepthModel``-like instance. You can do this with
683 | :method:`self.redateredated()`, for example.
684 |
685 | Parameters
686 | ----------
687 | ax : :class:`mpl.axes.Axes` or None, optional
688 | Existing axes to plot onto.
689 |
690 | Returns
691 | -------
692 | ax : :class:`mpl.axes.Axes`
693 |
694 | """
695 | if ax is None:
696 | ax = self._ax_setup()
697 |
698 | ax = self.chronology_information.bacon_agemodel.plot_sediment_rate(ax)
699 | ax.lines[-1].set_color('C3')
700 |
701 | return ax
702 |
703 | def plot_sedmemory(self, ax=None):
704 | """
705 | Plot prior and posterior sediment memory for record agemodel.
706 |
707 | Requires ``self.chronology_information.bacon_agemodel` to be populated
708 | with a ``snakebacon.AgeDepthModel``-like instance. You can do this with
709 | :method:`self.redateredated()`, for example.
710 |
711 | Parameters
712 | ----------
713 | ax : :class:`mpl.axes.Axes` or None, optional
714 | Existing axes to plot onto.
715 |
716 | Returns
717 | -------
718 | ax : :class:`mpl.axes.Axes`
719 |
720 | """
721 | if ax is None:
722 | ax = self._ax_setup()
723 |
724 | ax = self.chronology_information.bacon_agemodel.plot_sediment_memory(ax)
725 | ax.lines[-1].set_color('C3')
726 |
727 | return ax
728 |
729 | def plot_agedepth(self, maxage=None, prior_dwidth=30, ax=None):
730 | """
731 | Plot age models in relation to core depth.
732 |
733 | Parameters
734 | ----------
735 | maxage : float, int, or None, optional
736 | Cutoff age for the plot age model.
737 | prior_dwidth : int, optional
738 | Passed to :method:`snakebacon.AgeDepthModel.plot_prior_dates`.
739 | ax : :class:`mpl.axes.Axes` or None, optional
740 | Existing axes to plot onto.
741 |
742 | Returns
743 | -------
744 | ax : :class:`mpl.axes.Axes`
745 |
746 | """
747 | if ax is None:
748 | ax = self._ax_setup()
749 |
750 | agemodel = self.chronology_information.bacon_agemodel
751 | data_df = self.data.df
752 | # Hack to copy and crop the age model to a certain age.
753 | if maxage is not None:
754 | agemodel = deepcopy(agemodel)
755 | too_old = ~(agemodel.age_median() > maxage)
756 | agemodel._depth = agemodel.depth[too_old]
757 | agemodel._age_ensemble = agemodel.age_ensemble[too_old]
758 |
759 | # data_df = self.data.df.copy()
760 | # data_df = data_df.loc[data_df['age'] <= maxage, ('depth', 'age')]
761 |
762 | if len(agemodel._depth) > 0:
763 | # Skip age model plotting if maxage cut-out all samples.
764 | ax = agemodel.plot(ax=ax)
765 | ax.collections[-1].set_cmap('Greys')
766 |
767 | for l in ax.lines:
768 | l.set_color('C3')
769 |
770 | ax.plot(data_df.loc[:, 'depth'], data_df.loc[:, 'age'], 'C0',
771 | label='File age model')
772 | ax = agemodel.plot_prior_dates(dwidth=prior_dwidth, ax=ax)
773 | ax.collections[-1].set_color('k')
774 | ax.collections[-1].set_zorder(10)
775 |
776 | ax.autoscale_view()
777 | else:
778 | log.warning('No data age-depth data to plot')
779 |
780 | # Cut cut-off information, if available.
781 | if hasattr(self.chronology_information, 'cut_shallow'):
782 | ax.axvline(x=float(self.chronology_information.cut_shallow), color='C4',
783 | linestyle='-.', zorder=1.5)
784 | if hasattr(self.chronology_information, 'cut_deep'):
785 | ax.axvline(x=float(self.chronology_information.cut_deep), color='C4',
786 | linestyle='-.', zorder=1.5)
787 |
788 | ax.set_title('Age model')
789 |
790 | return ax
791 |
792 | def plot_deltar(self, ax=None):
793 | """
794 | Plot a description of the site carbon reservoir information.
795 |
796 | Parameters
797 | ----------
798 | ax : :class:`mpl.axes.Axes` or None, optional
799 | Existing axes to plot onto.
800 |
801 | Returns
802 | -------
803 | ax : :class:`mpl.axes.Axes`
804 |
805 | """
806 | if ax is None:
807 | ax = self._ax_setup()
808 |
809 | latlon = (self.site_information.northernmost_latitude,
810 | self.site_information.easternmost_longitude)
811 |
812 | elevation = self.site_information.elevation
813 |
814 | # These checks are not well written.
815 | try:
816 | deltar_original = np.int(np.round(self.chronology_information.df['delta_R_original'].values[0]))
817 | if np.isnan(deltar_original):
818 | deltar_original = None
819 | except (KeyError, ValueError, IndexError) as e:
820 | deltar_original = None
821 |
822 | try:
823 | deltar_std_original = np.int(np.round(self.chronology_information.df['delta_R_1s_err_original'].values[0]))
824 | if np.isnan(deltar_std_original):
825 | deltar_std_original = None
826 | except (KeyError, ValueError, IndexError) as e:
827 | deltar_std_original = None
828 |
829 | try:
830 | deltar_used = np.int(np.round(self.chronology_information.df['delta_R'].values[0]))
831 | except (KeyError, TypeError):
832 | deltar_used = None
833 |
834 | try:
835 | deltar_error_used = np.int(np.round(self.chronology_information.df['delta_R_1s_err'].values[0]))
836 | except (KeyError, TypeError):
837 | deltar_error_used = None
838 |
839 | text_template = 'Latitude: {}°\nLongitude: {}°\nElevation: {} m ' \
840 | '\n\nΔR: {}\nΔRσ: {}\nFile ΔR: {}\nFile ΔRσ: {}'
841 | text_str = text_template.format(latlon[0], latlon[1], elevation,
842 | deltar_used, deltar_error_used,
843 | deltar_original, deltar_std_original)
844 |
845 | ax.text(0.05, 0.9, text_str, verticalalignment='top',
846 | horizontalalignment='left', transform=ax.transAxes)
847 | ax.set_title('{}\n{}'.format(self.site_information.site_name, datetime.date.today().isoformat()))
848 | ax.axis('off')
849 |
850 | return ax
851 |
852 | def to_qcpdf(self, pdfpath, proxy_vars=None, plot_agedepth_kws=None):
853 | """
854 | Write quality-control report PDF.
855 |
856 | Requires :module:`matplotlib` and :module:`cartopy` to be installed.
857 |
858 | Parameters
859 | ----------
860 | pdfpath : str
861 | Path to write PDF to.
862 | proxy_vars : iterable or None, optional
863 | Name of series to include in time series plot. Attempts to use all
864 | available proxies if ``None``.
865 | plot_agedepth_kws : dict or None, optional
866 | Key-word arguments to pass to both call to ``self.plot_agedepth``
867 | when plotting.
868 | """
869 | log.debug('Writing QC report plots')
870 |
871 | try:
872 | from matplotlib.backends.backend_pdf import PdfPages
873 | import matplotlib.pylab as plt
874 | except ModuleNotFoundError:
875 | raise ModuleNotFoundError('matplotlib needs to be installed for plots')
876 |
877 | try:
878 | import cartopy.crs as ccrs
879 | except ModuleNotFoundError:
880 | raise ModuleNotFoundError('cartopy needs to be installed for mapping')
881 |
882 | if plot_agedepth_kws is None:
883 | plot_agedepth_kws = {}
884 |
885 | # Find "non-dimension" data variables to plot, if none were passed.
886 | not_proxy = ['age', 'age_median', 'depth', 'age_ensemble']
887 | if proxy_vars is None:
888 | # TODO(brews): This logic might be good candidate for a more general method.
889 | proxy_vars = [str(x) for x in self.data.df.columns if str(x).lower() not in not_proxy]
890 |
891 | latlon = (self.site_information.northernmost_latitude,
892 | self.site_information.easternmost_longitude)
893 |
894 | n_vars = len(proxy_vars)
895 | n_cols = n_vars + 1
896 |
897 | has_baconagemodel = hasattr(self.chronology_information, 'bacon_agemodel')
898 |
899 | with PdfPages(pdfpath) as pdf:
900 | fig = plt.figure(figsize=(6.5, 9))
901 |
902 | ax2 = plt.subplot2grid((n_cols, 2), (0, 1),
903 | projection=ccrs.Robinson(central_longitude=latlon[1]))
904 | ax3 = plt.subplot2grid((n_cols, 2), (0, 0))
905 |
906 | self.plot_sitemap(ax=ax2)
907 |
908 | self.plot_deltar(ax=ax3)
909 |
910 | for i, varstr in enumerate(proxy_vars):
911 | this_ax = plt.subplot2grid((n_cols, 1), (1 + i, 0), colspan=2)
912 | this_ax = self.plot_datavariable(varstr, ax=this_ax)
913 | this_ax.xaxis.label.set_visible(False)
914 | this_ax.title.set_visible(False)
915 | if i == 0:
916 | this_ax.title.set_visible(True)
917 | this_ax.set_title('Proxy variables')
918 |
919 | this_ax.xaxis.label.set_visible(True)
920 | this_ax.set_xlabel('Age (cal yr BP)')
921 | fig.tight_layout()
922 | pdf.savefig(bbox_inches='tight')
923 | plt.close()
924 |
925 | fig = plt.figure(figsize=(6.5, 9))
926 | ax1 = plt.subplot2grid((3, 2), (0, 0))
927 | ax2 = plt.subplot2grid((3, 2), (0, 1))
928 | ax4 = plt.subplot2grid((3, 2), (1, 0), rowspan=2, colspan=2)
929 |
930 | if has_baconagemodel:
931 | self.plot_sedrate(ax=ax1)
932 | self.plot_sedmemory(ax=ax2)
933 | self.plot_agedepth(maxage=50000, ax=ax4, **plot_agedepth_kws)
934 |
935 | fig.tight_layout()
936 | pdf.savefig(bbox_inches='tight')
937 | plt.close()
938 |
939 | fig = plt.figure(figsize=(6.5, 9))
940 | ax1 = plt.subplot(1, 1, 1)
941 | if has_baconagemodel:
942 | self.plot_agedepth(maxage=25000, ax=ax1, **plot_agedepth_kws)
943 | fig.tight_layout()
944 | pdf.savefig(bbox_inches='tight')
945 | plt.close()
946 |
947 | log.debug('QC report plot saved to {}'.format(pdfpath))
948 |
--------------------------------------------------------------------------------