├── tests
├── __init__.py
├── test_scoring.py
├── test_mie.py
├── test_plots.py
├── test_utils.py
├── test_distributions.py
└── test_models.py
├── docs
├── _config.yml
├── .gitignore
├── tutorial
│ ├── Makefile
│ ├── nephelometer.rst
│ ├── nephelometer.ipynb
│ └── aerosol_distributions.rst
├── _templates
│ └── autosummary
│ │ ├── base.rst
│ │ └── class.rst
├── tutorial.rst
├── introduction.rst
├── tools
│ └── nb_to_doc.py
├── _static
│ ├── style.css
│ └── copybutton.js
├── Makefile
├── api.rst
├── installing.rst
├── contributing.rst
├── index.rst
├── conf.py
└── sphinxext
│ └── gallery_generator.py
├── .gitattributes
├── src
└── opcsim
│ ├── equations
│ ├── __init__.py
│ ├── cdf.py
│ └── pdf.py
│ ├── rc_style.py
│ ├── __init__.py
│ ├── metrics.py
│ ├── mie.py
│ ├── utils.py
│ ├── distributions.py
│ ├── plots.py
│ └── models.py
├── examples
├── urban_distribution_pdf.py
├── ten_bin_opc.py
├── volume_mass_comparison.py
├── build_your_own_distribution.py
├── cumulative_mass.py
├── three_weights.py
├── opc_with_dist.py
├── hygroscopic_growth_pdf.py
└── opc_with_dist_number_and_vol.py
├── pyproject.toml
├── LICENSE
├── .github
└── workflows
│ ├── test-and-verify.yml
│ ├── docs.yml
│ └── release.yml
├── .gitignore
└── README.md
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | opcsim/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/src/opcsim/equations/__init__.py:
--------------------------------------------------------------------------------
1 | from .cdf import *
2 | from .pdf import *
3 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | *_files/
2 | #_build/
3 | #generated/
4 | #examples/
5 | #example_thumbs/
6 |
--------------------------------------------------------------------------------
/docs/tutorial/Makefile:
--------------------------------------------------------------------------------
1 | rst_files := $(patsubst %.ipynb,%.rst,$(wildcard *.ipynb))
2 |
3 | tutorial: ${rst_files}
4 |
5 | %.rst: %.ipynb
6 | ../tools/nb_to_doc.py $*
--------------------------------------------------------------------------------
/docs/_templates/autosummary/base.rst:
--------------------------------------------------------------------------------
1 | .. raw:: html
2 |
3 |
4 |
5 |
6 | {{ fullname | escape | underline}}
7 |
8 | .. currentmodule:: {{ module }}
9 |
10 | .. auto{{ objtype }}:: {{ objname }}
--------------------------------------------------------------------------------
/src/opcsim/rc_style.py:
--------------------------------------------------------------------------------
1 | """Make changes to the default styling which seaborn doesn't support
2 | """
3 | import matplotlib as mpl
4 |
5 | def set(**kwargs):
6 | mpl.rcParams['figure.autolayout'] = kwargs.pop('figure.autolayout', False)
7 | mpl.rcParams['mathtext.default'] = kwargs.pop('mathtext.default', 'regular')
8 |
9 | return
10 |
--------------------------------------------------------------------------------
/src/opcsim/__init__.py:
--------------------------------------------------------------------------------
1 | from importlib.metadata import version
2 |
3 | import warnings
4 | import pandas as pd
5 | import numpy as np
6 | import math
7 | import os
8 |
9 | from .distributions import *
10 | from .models import *
11 | from .plots import *
12 | from .utils import *
13 | from .metrics import *
14 | from .mie import *
15 | from .rc_style import set
16 |
17 | set()
18 |
19 | __version__ = version("opcsim")
--------------------------------------------------------------------------------
/docs/tutorial.rst:
--------------------------------------------------------------------------------
1 | .. _tutorial:
2 |
3 | opcsim Tutorial
4 | ===============
5 |
6 | Aerosol Distributions
7 | ---------------------
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | tutorial/aerosol_distributions
13 |
14 |
15 | Simulating and Evaluating an OPC
16 | --------------------------------
17 |
18 | .. toctree::
19 | :maxdepth: 2
20 |
21 | tutorial/opcs
22 | tutorial/scoring
23 |
24 | Simulating and Evaluating a nephelometer
25 | ----------------------------------------
26 |
27 | .. toctree::
28 | :maxdepth: 2
29 |
30 | tutorial/nephelometer
--------------------------------------------------------------------------------
/docs/introduction.rst:
--------------------------------------------------------------------------------
1 |
2 | .. _introduction:
3 |
4 | .. currentmodule:: opcsim
5 |
6 | An introduction to OPCSIM
7 | ==========================
8 |
9 | .. raw:: html
10 |
11 |
12 |
13 | Seaborn is a library for making statistical graphics in Python. It is built on top of `matplotlib `_ and closely integrated with `pandas `_ data structures.
14 |
15 | Here is some of the functionality that seaborn offers:
16 |
17 | - thing 1....
18 |
19 | Here's an example of what this means:
20 |
21 | .. raw:: html
22 |
23 |
24 |
--------------------------------------------------------------------------------
/examples/urban_distribution_pdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Visualize the Urban Aerosol Distribution
3 | ========================================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import opcsim
8 | sns.set(style='ticks', font_scale=1.25)
9 |
10 | # Load the example urban distribution
11 | d = opcsim.load_distribution("Urban")
12 |
13 | # Plot the number-weighted pdf with modes
14 | ax = opcsim.plots.pdfplot(d, with_modes=True)
15 |
16 | # Set the title and axes labels
17 | ax.set_title("Urban Distribution", fontsize=18)
18 |
19 | # Add a legend
20 | ax.legend(loc='best')
21 |
22 | # Set the ylim
23 | ax.set_ylim(0, None)
24 |
25 | # Remove the top and right spines
26 | sns.despine()
27 |
--------------------------------------------------------------------------------
/examples/ten_bin_opc.py:
--------------------------------------------------------------------------------
1 | """
2 | Urban Response of a 10-bin OPC
3 | ==============================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import opcsim
8 | sns.set(style='ticks', font_scale=1.5)
9 |
10 | # Load the example urban distribution
11 | d = opcsim.load_distribution("Urban")
12 |
13 | # Build a 10-bin OPC
14 | opc = opcsim.OPC(wl=0.658, n_bins=10, dmin=0.3)
15 |
16 | # calibrate the OPC using PSl's
17 | opc.calibrate("psl", method='spline')
18 |
19 | # compute the values
20 | vals = opc.histogram(d, weight="number", rh=0.0)
21 |
22 | # Plot the histogram response
23 | ax = opcsim.plots.histplot(vals, bins=opc.bins)
24 |
25 | # Remove the top and right spines
26 | sns.despine()
27 |
--------------------------------------------------------------------------------
/docs/_templates/autosummary/class.rst:
--------------------------------------------------------------------------------
1 | .. raw:: html
2 |
3 |
4 |
5 |
6 | {{ fullname | escape | underline}}
7 |
8 | .. currentmodule:: {{ module }}
9 |
10 | .. autoclass:: {{ objname }}
11 |
12 | {% block methods %}
13 | .. automethod:: __init__
14 |
15 | {% if methods %}
16 | .. rubric:: Methods
17 |
18 | .. autosummary::
19 | {% for item in methods %}
20 | ~{{ name }}.{{ item }}
21 | {%- endfor %}
22 | {% endif %}
23 | {% endblock %}
24 |
25 | {% block attributes %}
26 | {% if attributes %}
27 | .. rubric:: Attributes
28 |
29 | .. autosummary::
30 | {% for item in attributes %}
31 | ~{{ name }}.{{ item }}
32 | {%- endfor %}
33 | {% endif %}
34 | {% endblock %}
--------------------------------------------------------------------------------
/examples/volume_mass_comparison.py:
--------------------------------------------------------------------------------
1 | """
2 | Compare the Number and Mass-weighted Urban Distribution
3 | =======================================================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import opcsim
8 | sns.set(style='ticks', font_scale=1.25)
9 |
10 | # Load the example urban distribution
11 | urban = opcsim.load_distribution("Urban")
12 |
13 | # Set up a subplot with three subplots
14 | fig, ax = plt.subplots(2, sharex=True)
15 |
16 | # Plot the number-weighted pdf
17 | opcsim.plots.pdfplot(urban, ax=ax[0])
18 |
19 | # Plot the mass-weighted pdf
20 | opcsim.plots.pdfplot(urban, weight='mass', ax=ax[1])
21 |
22 | # Remove axis label from top subplot
23 | ax[0].set_xlabel("")
24 |
25 | # Remove the top and right spines
26 | sns.despine()
27 |
--------------------------------------------------------------------------------
/examples/build_your_own_distribution.py:
--------------------------------------------------------------------------------
1 | """
2 | Build Your Own Aerosol Distribution
3 | ===================================
4 |
5 | _thumb: .6, .6
6 | """
7 | import seaborn as sns
8 | import opcsim
9 |
10 | sns.set(style='ticks', font_scale=1.75)
11 |
12 | # Load the example urban distribution
13 | d = opcsim.AerosolDistribution()
14 |
15 | # Add a mode with 1000 particles/cc, GM=100nm, and GSD=1.5
16 | d.add_mode(n=1000, gm=0.1, gsd=1.5, label="Mode I",
17 | kappa=0., rho=1.5, refr=complex(1.5, 0))
18 |
19 | # Overlay the distribution
20 | ax = opcsim.plots.pdfplot(d)
21 |
22 | # Fix the y-axis to begin at 0
23 | ax.set_ylim(0, None)
24 |
25 | # Set the xlim to the "visible" region
26 | ax.set_xlim(0.01, 1)
27 |
28 | # Remove the top and right spines
29 | sns.despine()
--------------------------------------------------------------------------------
/examples/cumulative_mass.py:
--------------------------------------------------------------------------------
1 | """
2 | Cumulative Mass Loading for Various Distributions
3 | =================================================
4 | _thumb: .6, .5
5 | """
6 | import seaborn as sns
7 | import opcsim
8 |
9 | sns.set(style='ticks', font_scale=1.5)
10 |
11 | # Load the example urban distribution
12 | urban = opcsim.load_distribution("Urban")
13 | rural = opcsim.load_distribution("Rural")
14 |
15 | # Plot the mass-weighted cdf [urban]
16 | ax = opcsim.plots.cdfplot(urban, weight='mass')
17 |
18 | # Plot the mass-weighted cdf [rural]
19 | ax = opcsim.plots.cdfplot(rural, weight='mass', ax=ax)
20 |
21 | # Add a legend
22 | ax.legend(loc='best')
23 |
24 | # Set the x and y-axis limits
25 | ax.set_ylim(0, None)
26 | ax.set_xlim(.01, None)
27 |
28 | # Remove the top and right spines
29 | sns.despine()
30 |
--------------------------------------------------------------------------------
/examples/three_weights.py:
--------------------------------------------------------------------------------
1 | """
2 | Plot the Urban Distribution in N, SA, and V
3 | ===========================================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import opcsim
8 | sns.set(style='ticks')
9 |
10 | # Load the example urban distribution
11 | d = opcsim.load_distribution("Urban")
12 |
13 | # Set up a subplot with three subplots
14 | fig, ax = plt.subplots(3, sharex=True)
15 |
16 | # Plot the number-weighted pdf
17 | opcsim.plots.pdfplot(d, ax=ax[0])
18 |
19 | # Plot the SA-weighted pdf
20 | opcsim.plots.pdfplot(d, weight='surface', ax=ax[1])
21 |
22 | # Plot the volume-weighted pdf
23 | opcsim.plots.pdfplot(d, weight='volume', ax=ax[2])
24 |
25 | # Remove the xaxis labels
26 | ax[0].set_xlabel("")
27 | ax[1].set_xlabel("")
28 |
29 | # Remove the top and right spines
30 | sns.despine()
31 |
--------------------------------------------------------------------------------
/tests/test_scoring.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import unittest
5 | import opcsim
6 | import pandas as pd
7 | import numpy as np
8 |
9 | from opcsim.distributions import *
10 | from opcsim.models import *
11 | from opcsim.metrics import compute_bin_assessment
12 |
13 | class SetupTestCase(unittest.TestCase):
14 | def setUp(self):
15 | pass
16 |
17 | def tearDown(self):
18 | pass
19 |
20 | def test_compute_bin_assessments(self):
21 | opc = OPC(wl=0.658, n_bins=5)
22 |
23 | opc.calibrate("psl")
24 |
25 | d = AerosolDistribution("test")
26 | d.add_mode(n=10000, gm=0.1, gsd=2, label="mode 1", kappa=0.53, refr=complex(1.55, 0))
27 |
28 | rv = compute_bin_assessment(opc, refr=complex(1.55, 0), kappa=0.53)
29 |
30 | self.assertTrue(isinstance(rv, pd.DataFrame))
31 |
--------------------------------------------------------------------------------
/examples/opc_with_dist.py:
--------------------------------------------------------------------------------
1 | """
2 | 10-Bin OPC Response with Distribution
3 | =====================================
4 | _thumb: .6, .5
5 | """
6 | import seaborn as sns
7 | import opcsim
8 |
9 | sns.set(style='ticks', font_scale=1.5)
10 |
11 | # Load the example urban distribution
12 | d = opcsim.load_distribution("Urban")
13 |
14 | # Build a 10-bin OPC with a dmin of 300 nm
15 | opc = opcsim.OPC(wl=0.658, n_bins=10, dmin=0.3)
16 |
17 | # calibrate the OPC using PSl's
18 | opc.calibrate("psl", method='spline')
19 |
20 | # compute the values
21 | vals = opc.histogram(d, rh=0.0)
22 |
23 | # Plot the histogram response
24 | ax = opcsim.plots.histplot(vals, bins=opc.bins)
25 |
26 | # Overlay the distribution
27 | ax = opcsim.plots.pdfplot(d, ax=ax, fill=True, fill_kws=dict(alpha=.2),
28 | plot_kws=dict(linewidth=1))
29 |
30 | # Remove the top and right spines
31 | sns.despine()
32 |
33 |
--------------------------------------------------------------------------------
/docs/tools/nb_to_doc.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | """
3 | Convert empty IPython notebook to a sphinx doc page.
4 | """
5 | import sys
6 | from subprocess import check_call as sh
7 |
8 |
9 | def convert_nb(nbname):
10 |
11 | # Execute the notebook
12 | sh(["jupyter", "nbconvert", "--to", "notebook",
13 | "--execute", "--inplace", nbname + ".ipynb",
14 | "--ExecutePreprocessor.timeout=60"])
15 |
16 | # Convert to .rst for Sphinx
17 | sh(["jupyter", "nbconvert", "--to", "rst", nbname + ".ipynb",
18 | "--ExecutePreprocessor.timeout=60"])
19 |
20 | # Clear notebook output
21 | sh(["jupyter", "nbconvert", "--to", "notebook", "--inplace",
22 | "--ClearOutputPreprocessor.enabled=True", nbname + ".ipynb",
23 | "--ExecutePreprocessor.timeout=60"])
24 |
25 |
26 | if __name__ == "__main__":
27 |
28 | for nbname in sys.argv[1:]:
29 | convert_nb(nbname)
30 |
--------------------------------------------------------------------------------
/examples/hygroscopic_growth_pdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Visualize the Impact of Hygroscopic Growth
3 | ==========================================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import numpy as np
8 | import opcsim
9 | sns.set(style='ticks', font_scale=1.25)
10 |
11 | # build a distribution for a single mode of ammonium sulfate
12 | d = opcsim.AerosolDistribution("Ammonium Sulfate")
13 |
14 | # add a single mode
15 | d.add_mode(1e3, 0.8e-2, 1.5, refr=(1.521+0j), rho=1.77, kappa=0.53)
16 |
17 | # iterate over a few RH's and plot
18 | ax = None
19 | cpal = sns.color_palette("GnBu_d", 5)
20 |
21 | for i, rh in enumerate(np.linspace(5, 95, 5)):
22 | ax = opcsim.plots.pdfplot(d, rh=rh, plot_kws=dict(color=cpal[i]),
23 | ax=ax, weight='volume', label="RH={:.0f}%".format(rh))
24 |
25 | # Set the title and axes labels
26 | ax.set_title("Ammonium Sulfate", fontsize=18)
27 |
28 | # Add a legend
29 | ax.legend(loc='best')
30 |
31 | # Set the ylim
32 | ax.set_ylim(0, None)
33 |
34 | # Remove the top and right spines
35 | sns.despine()
36 |
--------------------------------------------------------------------------------
/examples/opc_with_dist_number_and_vol.py:
--------------------------------------------------------------------------------
1 | """
2 | 10-Bin OPC Response in Number and Volume Space
3 | ==============================================
4 | _thumb: .4, .4
5 | """
6 | import seaborn as sns
7 | import matplotlib.pyplot as plt
8 | import opcsim
9 | sns.set(style='ticks', font_scale=1.25)
10 |
11 | # Load the example urban distribution
12 | d = opcsim.load_distribution("Urban")
13 |
14 | # Build a 10-bin OPC
15 | opc = opcsim.OPC(wl=0.658, n_bins=10, dmin=0.3)
16 |
17 | # calibrate the OPC
18 | opc.calibrate("psl")
19 |
20 | # Set up the subplots
21 | fig, (ax1, ax2) = plt.subplots(2)
22 |
23 | # # Plot the histogram response
24 | ax1 = opcsim.plots.histplot(opc.histogram(d), bins=opc.bins, ax=ax1)
25 |
26 | # Overlay the distribution
27 | ax1 = opcsim.plots.pdfplot(d, ax=ax1)
28 |
29 | # # Repeat the above step but weight by volume
30 | ax2 = opcsim.plots.histplot(opc.histogram(d, weight='volume'), bins=opc.bins, ax=ax2)
31 |
32 | # Overlay the distribution
33 | ax2 = opcsim.plots.pdfplot(d, weight='volume', ax=ax2)
34 |
35 | # Remove axis labels
36 | ax1.set_xlabel("")
37 |
38 | # Remove the top and right spines
39 | sns.despine()
40 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "opcsim"
3 | version = "1.1.0a3"
4 | license = "MIT"
5 | description = "OPCSIM: simulating low-cost optical particle counters"
6 | authors = ["David H Hagan
", "Jesse H Kroll "]
7 | maintainers = ["David H Hagan "]
8 | readme = "README.md"
9 | homepage = "https://dhhagan.github.io/opcsim/"
10 | repository = "https://github.com/dhhagan/opcsim"
11 | documentation = "https://dhhagan.github.io/opcsim/"
12 | keywords = ["aerosols", "atmospheric chemistry"]
13 | packages = [
14 | { include = "opcsim", from="src" },
15 | ]
16 |
17 | [tool.poetry.dependencies]
18 | python = ">=3.8,<3.12"
19 | pandas = ">=1.2"
20 | seaborn = ">=0.12"
21 | numpy = ">=1.20,<1.24.0 || >1.24.0"
22 | scipy = ">=1.7"
23 | matplotlib = ">=3.4,<3.6.1 || >=3.6.1"
24 |
25 | [tool.poetry.urls]
26 | "Bug Tracker" = "https://github.com/dhhagan/opcsim/issues"
27 |
28 | [tool.poetry.group.dev.dependencies]
29 | pandoc = "^2.4"
30 | sphinx-bootstrap-theme = "^0.8.1"
31 | pytest = "^8.1.1"
32 | pytest-cov = "^5.0.0"
33 | jupyter = "^1.0.0"
34 | sphinx = "<6.0.0"
35 | numpydoc = "^1.5"
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016-2020 David H Hagan and Jesse H Kroll
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/tests/test_mie.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import opcsim
3 | import pandas as pd
4 | import numpy as np
5 |
6 | from opcsim.distributions import *
7 | from opcsim.models import *
8 |
9 | class SetupTestCase(unittest.TestCase):
10 | def setUp(self):
11 | pass
12 |
13 | def tearDown(self):
14 | pass
15 |
16 | def test_coef_pi_tau(self):
17 | pi, tau = opcsim.mie.coef_pi_tau(theta=30.0, x=.5)
18 | self.assertAlmostEqual(pi[0], 1.)
19 | self.assertAlmostEqual(0.86, round(tau[0], 2), places=1)
20 |
21 | def test_coef_ab(self):
22 | a, b = opcsim.mie.coef_ab(refr=complex(1.5, 0), x=.5)
23 | self.assertAlmostEqual(a[0].real, 6.06e-4, places=2)
24 | self.assertAlmostEqual(b[0].real, 7.5e-7, places=2)
25 |
26 | def test_s1s2(self):
27 | s1, s2 = opcsim.mie.s1s2(refr=complex(1.5, 0), x=.5, theta=30.)
28 | self.assertAlmostEqual(s1.real, 9.1e-4, places=2)
29 | self.assertAlmostEqual(s2.real, 1.8e-4, places=2)
30 |
31 | def test_cscat(self):
32 | rv = opcsim.mie.cscat(dp=0.5, wl=0.658, refr=complex(
33 | 1.9, .5), theta1=32., theta2=88.)
34 | self.assertTrue(type(rv), float)
--------------------------------------------------------------------------------
/.github/workflows/test-and-verify.yml:
--------------------------------------------------------------------------------
1 | name: run and build
2 | on: [pull_request, push]
3 | jobs:
4 | tests:
5 | runs-on: ubuntu-latest
6 | strategy:
7 | matrix:
8 | python: ["3.8", "3.9", "3.10", "3.11"]
9 | name: Python ${{ matrix.python }} tests
10 | steps:
11 | - name: Checkout branch
12 | uses: actions/checkout@v2
13 |
14 | - name: Setup Python ${{ matrix.python }}
15 | uses: actions/setup-python@master
16 | with:
17 | python-version: ${{ matrix.python }}
18 |
19 | - name: Install poetry
20 | uses: snok/install-poetry@v1
21 | with:
22 | virtualenvs-create: true
23 |
24 | - name: Install dependencies
25 | run: poetry install --no-interaction
26 |
27 | - name: Run tests and generate coverage report
28 | run: |
29 | poetry run pytest tests/ --cov=./ --cov-report=xml
30 |
31 | - name: Upload coverage to Codecov
32 | uses: codecov/codecov-action@v1
33 | with:
34 | token: ${{ secrets.CODECOV_TOKEN }}
35 | file: ./coverage.xml
36 | flags: unittests
37 | name: codecov-umbrella
38 | fail_ci_if_error: true
39 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: docs/gh-pages
2 | on: [workflow_dispatch]
3 |
4 | jobs:
5 | build-docs:
6 | name: Build docs and push to gh-pages
7 | runs-on: ubuntu-latest
8 | steps:
9 | - name: Checkout branch
10 | uses: actions/checkout@v2
11 |
12 | - name: Setup python
13 | uses: actions/setup-python@master
14 | with:
15 | python-version: 3.8
16 |
17 | - name: Install poetry
18 | uses: snok/install-poetry@v1
19 | with:
20 | virtualenvs-create: true
21 |
22 | - name: Install dependencies
23 | run: poetry install --no-interaction
24 |
25 | - name: Install pandoc
26 | run: sudo apt-get install pandoc
27 |
28 | - name: build docs
29 | run: |
30 | source $(poetry env info --path)/activate
31 | cd docs
32 | make clean
33 | make tutorials
34 | make html
35 | cd ..
36 |
37 | - name: deploy to gh-pages
38 | uses: Cecilapp/GitHub-Pages-deploy@v3
39 | env:
40 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
41 | with:
42 | email: david.hagan@quant-aq.com
43 | build_dir: docs/_build/html
44 |
45 |
--------------------------------------------------------------------------------
/docs/_static/style.css:
--------------------------------------------------------------------------------
1 | body { color: #444444 !important; }
2 |
3 | h1 { font-size: 40px !important; }
4 | h2 { font-size: 32px !important; }
5 | h3 { font-size: 24px !important; }
6 | h4 { font-size: 18px !important; }
7 | h5 { font-size: 14px !important; }
8 | h6 { font-size: 10px !important; }
9 |
10 | footer a{
11 |
12 | color: #4c72b0 !important;
13 | }
14 | a.reference {
15 | color: #4c72b0 !important;
16 | }
17 |
18 | blockquote p {
19 | font-size: 14px !important;
20 | }
21 |
22 | blockquote {
23 | padding-top: 4px !important;
24 | padding-bottom: 4px !important;
25 | margin: 0 0 0px !important;
26 | }
27 |
28 | pre {
29 | background-color: #f6f6f9 !important;
30 | }
31 |
32 | code {
33 | color: #49759c !important;
34 | background-color: #ffffff !important;
35 | }
36 |
37 | code.descclassname {
38 | padding-right: 0px !important;
39 | }
40 |
41 | code.descname {
42 | padding-left: 0px !important;
43 | }
44 |
45 | dt:target, span.highlighted {
46 | background-color: #ffffff !important;
47 | }
48 |
49 | ul {
50 | padding-left: 20px !important;
51 | }
52 |
53 | ul.dropdown-menu {
54 | padding-left: 0px !important;
55 | }
56 |
57 | .alert-info {
58 | background-color: #adb8cb !important;
59 | border-color: #adb8cb !important;
60 | color: #2c3e50 !important;
61 | }
62 |
63 |
64 | /* From https://github.com/twbs/bootstrap/issues/1768 */
65 | *[id]:before {
66 | display: block;
67 | content: " ";
68 | margin-top: -75px;
69 | height: 75px;
70 | visibility: hidden;
71 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | SPHINXPROJ = opcsim
9 | SOURCEDIR = .
10 | BUILDDIR = _build
11 |
12 | # Internal Variables
13 | PAPEROPT_a4 = -D latex_paper_size=a4
14 | PAPEROPT_letter = -D latex_paper_size=letter
15 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
18 |
19 | # Put it first so that "make" without argument is like "make help".
20 | help:
21 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
22 |
23 | .PHONY: help clean html Makefile
24 |
25 | # Catch-all target: route all unknown targets to Sphinx using the new
26 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
27 | %: Makefile
28 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
29 |
30 | clean:
31 | @echo "Cleaning..."
32 | -rm -rf $(BUILDDIR)/*
33 | -rm -rf examples/*
34 | -rm -rf example_thumbs/*
35 | -rm -rf tutorial/*_files/
36 | -rm -rf tutorial/*.rst
37 | -rm -rf generated/*
38 |
39 | tutorials:
40 | make -C tutorial
41 |
42 | introduction:
43 | tools/nb_to_doc.py introduction
44 |
45 | notebooks:
46 | make -C tutorial
47 | tools/nb_to_doc.py introduction
48 |
49 | html:
50 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
53 |
54 |
55 | upload:
56 | @echo "Uploading to github@gh-pages"
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /docs/tutorial/garbage*
2 | /docker/examples/*.ipynb_checkpoints
3 | *.pytest_cache*
4 |
5 | README.PVT.md
6 | # deploy.sh
7 |
8 | # Byte-compiled / optimized / DLL files
9 | __pycache__/
10 | *.py[cod]
11 | *$py.class
12 | *.cache
13 | *.DS_Store
14 |
15 | # C extensions
16 | *.so
17 |
18 | # Distribution / packaging
19 | .Python
20 | env/
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | *.egg-info/
33 | .installed.cfg
34 | *.egg
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *,cover
55 | .hypothesis/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | # docs/_build/
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # IPython Notebook
79 | .ipynb_checkpoints
80 |
81 | # pyenv
82 | .python-version
83 |
84 | # celery beat schedule file
85 | celerybeat-schedule
86 |
87 | # dotenv
88 | .env
89 |
90 | # virtualenv
91 | venv/
92 | ENV/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: build & release
2 | on:
3 | release:
4 | types: [published]
5 |
6 | jobs:
7 | build-n-publish:
8 | name: Build and publish to PyPI
9 | runs-on: ubuntu-20.04
10 | steps:
11 | - name: Checkout branch
12 | uses: actions/checkout@v2
13 |
14 | - name: Setup python
15 | uses: actions/setup-python@master
16 | with:
17 | python-version: 3.8
18 |
19 | - name: Install poetry
20 | uses: snok/install-poetry@v1
21 | with:
22 | virtualenvs-create: true
23 |
24 | - name: Install dependencies
25 | run: poetry install --no-interaction
26 |
27 | - name: Build and publish to PyPI
28 | run: |
29 | poetry build
30 | poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }}
31 |
32 | # publish-to-docker:
33 | # name: Build Docker image and push to DockerHub
34 | # runs-on: ubuntu-20.04
35 | # steps:
36 | # - name: Checkout branch
37 | # uses: actions/checkout@v2
38 |
39 | # - name: Build and deploy Docker image
40 | # uses: docker/build-push-action@v1
41 | # with:
42 | # username: ${{ secrets.DOCKER_USERNAME }}
43 | # password: ${{ secrets.DOCKER_PASSWORD }}
44 | # path: docker/
45 | # repository: dhhagan/opcsim
46 | # tags: latest
47 | # tag_with_ref: true
48 |
49 |
--------------------------------------------------------------------------------
/tests/test_plots.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import unittest
5 | import opcsim
6 | import pandas as pd
7 | import os
8 | import matplotlib as mpl
9 |
10 | from opcsim.distributions import *
11 | from opcsim.models import *
12 | from opcsim.plots import *
13 |
14 | class SetupTestCase(unittest.TestCase):
15 | def setUp(self):
16 | pass
17 |
18 | def tearDown(self):
19 | pass
20 |
21 | def test_histplot(self):
22 | opc = opcsim.OPC(wl=0.658, n_bins=10)
23 | opc.calibrate("psl")
24 |
25 | d = opcsim.load_distribution("Urban")
26 |
27 | ax = opcsim.plots.histplot(opc.evaluate(d), opc.bins)
28 |
29 | self.assertIsNotNone(ax)
30 |
31 | def test_pdfplot(self):
32 | d = opcsim.load_distribution("Urban")
33 |
34 | ax = opcsim.plots.pdfplot(d)
35 |
36 | self.assertIsNotNone(ax)
37 |
38 | # Try with a different weight
39 | ax = opcsim.plots.pdfplot(d, weight='volume')
40 |
41 | self.assertIsNotNone(ax)
42 |
43 | with self.assertRaises(Exception):
44 | ax = opcsim.plots.pdfplot(1)
45 |
46 | # Test invalid weight
47 | with self.assertRaises(ValueError):
48 | ax = opcsim.plots.pdfplot(d, weight='mass2')
49 |
50 | # Test with_modes
51 | ax = opcsim.plots.pdfplot(d, with_modes=True)
52 | self.assertIsNotNone(ax)
53 |
54 | def test_pdf_plot_with_fill(self):
55 | d = opcsim.load_distribution("Urban")
56 |
57 | ax = opcsim.plots.pdfplot(d, fill=True)
58 |
59 | self.assertIsNotNone(ax)
60 |
61 | # Try with a different weight
62 | ax = opcsim.plots.pdfplot(d, weight='volume')
63 |
64 | def test_cdfplot(self):
65 | d = opcsim.load_distribution("Urban")
66 |
67 | ax = opcsim.plots.cdfplot(d)
68 |
69 | self.assertIsNotNone(ax)
70 |
71 | # Try with a different weight
72 | ax = opcsim.plots.cdfplot(d, weight='volume')
73 |
74 | self.assertIsNotNone(ax)
75 |
76 | with self.assertRaises(Exception):
77 | ax = opcsim.plots.cdfplot(1)
78 |
79 | # Test invalid weight
80 | with self.assertRaises(ValueError):
81 | ax = opcsim.plots.cdfplot(d, weight='mass2')
82 |
83 | def test_calplot(self):
84 | opc = opcsim.OPC(wl=0.658, n_bins=10)
85 |
86 | opc.calibrate("psl", method="spline")
87 |
88 | ax = opcsim.plots.calplot(opc)
--------------------------------------------------------------------------------
/docs/_static/copybutton.js:
--------------------------------------------------------------------------------
1 | // originally taken from scikit-learn's Sphinx theme
2 | $(document).ready(function() {
3 | /* Add a [>>>] button on the top-right corner of code samples to hide
4 | * the >>> and ... prompts and the output and thus make the code
5 | * copyable.
6 | * Note: This JS snippet was taken from the official python.org
7 | * documentation site.*/
8 | var div = $('.highlight-python .highlight,' +
9 | '.highlight-python3 .highlight,' +
10 | '.highlight-pycon .highlight')
11 | var pre = div.find('pre');
12 |
13 | // get the styles from the current theme
14 | pre.parent().parent().css('position', 'relative');
15 | var hide_text = 'Hide the prompts and output';
16 | var show_text = 'Show the prompts and output';
17 | var border_width = pre.css('border-top-width');
18 | var border_style = pre.css('border-top-style');
19 | var border_color = pre.css('border-top-color');
20 | var button_styles = {
21 | 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
22 | 'border-color': border_color, 'border-style': border_style,
23 | 'border-width': border_width, 'color': border_color, 'text-size': '75%',
24 | 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em'
25 | }
26 |
27 | // create and add the button to all the code blocks that contain >>>
28 | div.each(function(index) {
29 | var jthis = $(this);
30 | if (jthis.find('.gp').length > 0) {
31 | var button = $('>>>');
32 | button.css(button_styles)
33 | button.attr('title', hide_text);
34 | jthis.prepend(button);
35 | }
36 | // tracebacks (.gt) contain bare text elements that need to be
37 | // wrapped in a span to work with .nextUntil() (see later)
38 | jthis.find('pre:has(.gt)').contents().filter(function() {
39 | return ((this.nodeType == 3) && (this.data.trim().length > 0));
40 | }).wrap('');
41 | });
42 |
43 | // define the behavior of the button when it's clicked
44 | $('.copybutton').toggle(
45 | function() {
46 | var button = $(this);
47 | button.parent().find('.go, .gp, .gt').hide();
48 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
49 | button.css('text-decoration', 'line-through');
50 | button.attr('title', show_text);
51 | },
52 | function() {
53 | var button = $(this);
54 | button.parent().find('.go, .gp, .gt').show();
55 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
56 | button.css('text-decoration', 'none');
57 | button.attr('title', hide_text);
58 | });
59 | });
60 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import opcsim
3 | import numpy as np
4 | import random
5 |
6 | class SetupTestCase(unittest.TestCase):
7 | def setUp(self):
8 | pass
9 |
10 | def tearDown(self):
11 | pass
12 |
13 | def test_make_bins(self):
14 | dmin = 0.5
15 | dmax = 10.
16 | n_bins = 2
17 |
18 | bins = opcsim.utils.make_bins(dmin, dmax, n_bins)
19 |
20 | self.assertEqual(bins.shape[0], n_bins)
21 | self.assertEqual(bins[0, 0], dmin)
22 | self.assertEqual(bins[-1, -1], dmax)
23 |
24 | ba = opcsim.utils.make_bins(dmin, dmax, n_bins, base=None)
25 |
26 | # Make sure the bin midpoints are correct
27 | self.assertEqual(np.mean([ba[0,0], ba[0,2]]), ba[0,1])
28 | self.assertGreater(ba[0,1], bins[0,1])
29 |
30 | def test_midpoints(self):
31 | dmin = 0.5
32 | dmax = 17.
33 |
34 | arr = np.array([[dmin, 2.5], [2.5, 10], [10, dmax]])
35 |
36 | bins = opcsim.utils.midpoints(arr)
37 |
38 | self.assertEqual(bins[0, 0], dmin)
39 | self.assertEqual(bins[-1, -1], dmax)
40 |
41 | with self.assertRaises(ValueError):
42 | bins = opcsim.utils.midpoints(np.array([[1, 2, 3, 4]]))
43 |
44 | def test_k_eff(self):
45 |
46 | # calculate easy weighting
47 | k1 = opcsim.utils.k_eff(kappas=[0.5, 1.5], weights=[1, 0])
48 | self.assertEqual(k1, 0.5)
49 |
50 | # average
51 | k1 = opcsim.utils.k_eff(kappas=[0, 1], weights=[0.5, 0.5])
52 | self.assertEqual(k1, 0.5)
53 |
54 | # compute based on diameters
55 | k1 = opcsim.utils.k_eff(kappas=[0, 1], diams=[1., 2.])
56 | self.assertEqual(k1, 8./9.)
57 |
58 | def test_ri_eff(self):
59 |
60 | # calculate easy weighting
61 | r1 = opcsim.utils.ri_eff(
62 | species=[complex(1.5, 0), complex(0, 0)], weights=[1, 0])
63 | self.assertEqual(r1, complex(1.5, 0))
64 |
65 | # calculate everage
66 | r1 = opcsim.utils.ri_eff(
67 | species=[complex(1.5, 0), complex(2.0, 1.0)], weights=[0.5, 0.5])
68 | self.assertEqual(r1, complex(1.75, 0.5))
69 |
70 | def power_law_fit(self):
71 | xs = np.linspace(1, 10, 10)
72 | a, b = 2, 0.1
73 | y = [x*np.random.uniform(0.99, 1.01) for x in a*np.power(xs, b)]
74 |
75 | fitted = opcsim.utils.power_law_fit(xs, cscat=y)
76 | self.assertAlmostEqual(y.sum(), fitted.sum())
77 |
78 | # test with weights
79 | fitted = opcsim.utils.power_law_fit(xs, cscat=y, fit_kws=dict(sigma=np.power(y, 10)))
80 | self.assertAlmostEqual(y.sum(), fitted.sum())
81 |
82 |
83 | def test_squash_dips(self):
84 | xs = np.array([1, 2, 3, 4, 3, 6, 7, 8, 9, 10])
85 | s = opcsim.utils.squash_dips(xs)
86 |
87 | self.assertTrue((np.diff(s) < 0).any() == False)
88 |
89 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | .. api_ref:
2 |
3 | .. currentmodule:: opcsim
4 |
5 | API Reference
6 | =============
7 |
8 | .. _distributions_api:
9 |
10 | Aerosol Distributions
11 | ---------------------
12 |
13 | .. rubric:: Aerosol Distribution Class
14 |
15 | .. autosummary::
16 | :toctree: generated/
17 |
18 | opcsim.AerosolDistribution
19 |
20 | .. rubric:: AerosolDistribution Methods
21 |
22 | .. autosummary::
23 | :toctree: generated/
24 |
25 | opcsim.AerosolDistribution.add_mode
26 | opcsim.AerosolDistribution.pdf
27 | opcsim.AerosolDistribution.cdf
28 |
29 | .. _models_api:
30 |
31 | Models
32 | ------
33 |
34 | OPC
35 | ^^^
36 |
37 | .. rubric:: OPC Class
38 |
39 | .. autosummary::
40 | :toctree: generated/
41 |
42 | opcsim.OPC
43 |
44 | .. rubric:: OPC Methods
45 |
46 | .. autosummary::
47 | :toctree: generated/
48 |
49 | opcsim.OPC.calibrate
50 | opcsim.OPC.evaluate
51 | opcsim.OPC.histogram
52 | opcsim.OPC.integrate
53 |
54 |
55 | Nephelometer
56 | ^^^^^^^^^^^^
57 |
58 | .. rubric:: Nephelometer Class
59 |
60 | .. autosummary::
61 | :toctree: generated/
62 |
63 | opcsim.Nephelometer
64 |
65 | .. rubric:: Nephelometer Methods
66 |
67 | .. autosummary::
68 | :toctree: generated/
69 |
70 | opcsim.Nephelometer.calibrate
71 | opcsim.Nephelometer.evaluate
72 |
73 |
74 | .. _plots_api:
75 |
76 | Visualization
77 | -------------
78 |
79 | .. autosummary::
80 | :toctree: generated/
81 |
82 | opcsim.plots.histplot
83 | opcsim.plots.pdfplot
84 | opcsim.plots.cdfplot
85 | opcsim.plots.calplot
86 |
87 |
88 | .. metrics_api:
89 |
90 | Metrics Functions
91 | -----------------
92 |
93 | .. autosummary::
94 | :toctree: generated/
95 |
96 | opcsim.metrics.compute_bin_assessment
97 |
98 | .. _utils_api:
99 |
100 | Utility Functions
101 | -----------------
102 |
103 | .. autosummary::
104 | :toctree: generated/
105 |
106 | opcsim.load_distribution
107 |
108 | opcsim.utils.make_bins
109 | opcsim.utils.midpoints
110 |
111 | opcsim.utils.k_kohler
112 | opcsim.utils.rho_eff
113 | opcsim.utils.k_eff
114 | opcsim.utils.ri_eff
115 |
116 |
117 | .. _mie_theory_api:
118 |
119 | Mie Theory Calculations
120 | -----------------------
121 |
122 | .. autosummary::
123 | :toctree: generated/
124 |
125 | opcsim.mie.coef_pi_tau
126 | opcsim.mie.coef_ab
127 | opcsim.mie.s1s2
128 | opcsim.mie.cscat
129 |
130 |
131 | .. _equations_api:
132 |
133 | Equations
134 | ---------
135 |
136 | .. autosummary::
137 | :toctree: generated/
138 |
139 | opcsim.equations.pdf.dn_ddp
140 | opcsim.equations.pdf.ds_ddp
141 | opcsim.equations.pdf.dv_ddp
142 |
143 | opcsim.equations.pdf.dn_dlndp
144 | opcsim.equations.pdf.ds_dlndp
145 | opcsim.equations.pdf.dv_dlndp
146 |
147 | opcsim.equations.pdf.dn_dlogdp
148 | opcsim.equations.pdf.ds_dlogdp
149 | opcsim.equations.pdf.dv_dlogdp
150 |
151 | opcsim.equations.cdf.nt
152 | opcsim.equations.cdf.st
153 | opcsim.equations.cdf.vt
154 |
155 |
156 |
157 |
--------------------------------------------------------------------------------
/src/opcsim/metrics.py:
--------------------------------------------------------------------------------
1 | """Contains the scoring algorithms used in the model.
2 | """
3 |
4 | import numpy as np
5 | import pandas as pd
6 | from .models import OPC
7 | from .utils import k_kohler, ri_eff
8 | from .mie import cscat
9 |
10 |
11 | def compute_bin_assessment(opc, refr, kappa, rh_values=[0., 35., 95.]):
12 | """Assess the ability of an OPC to assign particles to their correct bin.
13 |
14 | Parameters
15 | ----------
16 | opc: opcsim.OPC
17 | refr: complex
18 | The complex refractive index of the material to assess
19 | kappa: float
20 | The kappa value to use for hygroscopic growth
21 | rh_values: list-like
22 | A list of relative humidities to assess the OPC at.
23 |
24 | Returns
25 | -------
26 | rv: pd.DataFrame
27 | A dataframe containing the results with self-explanatory columns.
28 |
29 | Examples
30 | --------
31 |
32 | """
33 | assert(isinstance(opc, OPC)), "opc must be an instance of the opcsim.OPC class"
34 |
35 | # init the dataframe to hold our results
36 | rv = list()
37 |
38 | for rh in rh_values:
39 | for i, _bins in enumerate(opc.bins):
40 | # compute the wet diameter
41 | wet_diam_lo = k_kohler(diam_dry=_bins[0], kappa=kappa, rh=rh)
42 | wet_diam_hi = k_kohler(diam_dry=_bins[-1], kappa=kappa, rh=rh)
43 |
44 | # compute the pct_dry
45 | pct_dry = (_bins[0]**3) / (wet_diam_lo**3)
46 |
47 | # compute the effective RI
48 | ri = ri_eff(species=[refr, complex(1.333, 0)], weights=[pct_dry, 1-pct_dry])
49 |
50 | # compute the scattering cross-section
51 | cscat_lo_exp = cscat(
52 | dp=_bins[0], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])
53 | cscat_hi_exp = cscat(
54 | dp=_bins[-1], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])
55 |
56 | cscat_lo = cscat(
57 | dp=wet_diam_lo, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])
58 | cscat_hi = cscat(
59 | dp=wet_diam_hi, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])
60 |
61 | # assign bins
62 | bin_assign_lo = opc.calibration_function(values=[cscat_lo])
63 | bin_assign_hi = opc.calibration_function(values=[cscat_hi])
64 |
65 | # add results to the dataframe
66 | rv.append({
67 | "bin_true": i,
68 | "bin_lo": bin_assign_lo[0] if len(bin_assign_lo) > 0 else -99,
69 | "bin_hi": bin_assign_hi[0] if len(bin_assign_hi) > 0 else -99,
70 | "refr_eff": ri,
71 | "rh": rh,
72 | "cscat_hi_ratio": cscat_hi / cscat_hi_exp,
73 | "cscat_lo_ratio": cscat_lo / cscat_lo_exp,
74 | })
75 |
76 | rv = pd.DataFrame(rv)
77 |
78 | # force datatypes to be correct
79 | rv["bin_true"] = rv["bin_true"].astype(int)
80 | rv["bin_lo"] = rv["bin_lo"].astype(int)
81 | rv["bin_hi"] = rv["bin_hi"].astype(int)
82 | rv["rh"] = rv["rh"].astype(float)
83 | rv["cscat_hi_ratio"] = rv["cscat_hi_ratio"].astype(float)
84 | rv["cscat_lo_ratio"] = rv["cscat_lo_ratio"].astype(float)
85 |
86 | return rv
87 |
--------------------------------------------------------------------------------
/docs/installing.rst:
--------------------------------------------------------------------------------
1 | .. _installing:
2 |
3 | ================================
4 | Installation and Getting Started
5 | ================================
6 |
7 | If you are familiar with Python and installing python libraries, feel free to skip
8 | below to the installation section.
9 |
10 |
11 | ``opcsim`` requires python3.5+ to be installed on your computer. If
12 | you do not have an existing installation, I would recommend checking out the
13 | `Anaconda Python distribution `_. If you
14 | encounter issues installing Anaconda, check out `StackOverflow `_ or a simple
15 | Google search.
16 |
17 | Once you have python installed, you can go ahead and install the
18 | ``opcsim`` python library.
19 |
20 |
21 | **Dependencies**
22 |
23 | + Python3.5+
24 | + `numpy `_
25 | + `scipy `_
26 | + `pandas `_
27 | + `matplotlib `_
28 | + `seaborn `_
29 |
30 |
31 | ---------------------
32 | Installing ``opcsim``
33 | ---------------------
34 |
35 | There are several ways to install the package from source depending on your
36 | local development environment and familiarity with installing python libraries. If you
37 | are new to python (and don't have `git` installed), install using the `Install
38 | from Source` option.
39 |
40 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
41 | Install directly from pypi (best option)
42 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 |
44 | Installing directly from pypi using pip is the easiest way forward and will
45 | automagically install any dependencies that aren't already installed.
46 |
47 | .. code-block:: shell
48 |
49 | $ pip install opcsim [--upgrade]
50 |
51 |
52 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
53 | Install from GitHub using `pip`
54 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55 |
56 | Note: must have git installed
57 |
58 | .. code-block:: shell
59 |
60 | $ pip install --upgrade git+git://github.com/dhhagan/opcsim.git
61 |
62 |
63 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64 | Clone Repository and Install from GitHub
65 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 |
67 | If you plan on contributing to ``opcsim``, you will probably want to fork the
68 | library, and then clone it to your local environment. You can then install from
69 | source directly.
70 |
71 | .. code-block:: shell
72 |
73 | $ git clone https://github.com/dhhagan/opcsim.git
74 | $ cd opcsim/
75 | $ python3 setup.py install
76 |
77 |
78 | ~~~~~~~~~~~~~~~~~~~
79 | Install from Source
80 | ~~~~~~~~~~~~~~~~~~~
81 |
82 | .. code-block:: shell
83 |
84 | $ wget https://github.com/dhhagan/opcsim/archive/master.zip
85 | $ unzip master.zip
86 | $ cd opcsim-master/
87 | $ python3 setup.py install
88 |
89 |
90 | -------
91 | Testing
92 | -------
93 |
94 | Testing is automated using `unittests`. To run the unittests with coverage
95 | reporting, run the following commands from the main directory:
96 |
97 | .. code-block:: shell
98 |
99 | $ coverage run --source opcsim setup.py test
100 | $ coverage report -m
101 |
102 | Unittests are also run automatically through continuous integration via TravisCI
103 | upon every pull request and code coverage is tracked online with `Code Climate `_.
104 |
105 | -------------------------------
106 | Reporting Bugs and other Issues
107 | -------------------------------
108 |
109 | Please report any bugs or issues you find through the `GitHub issues tracker
110 | `_. Please provide as much
111 | information as possible that will make it easier to solve/fix the problem. Useful
112 | information to include would be the operating system, python version, and version
113 | of the ``opcsim`` library as well as any dependencies. If there are issues with
114 | graphics, screenshots are very helpful!
115 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://badge.fury.io/py/opcsim)
2 | [](https://zenodo.org/badge/latestdoi/72774719)
3 |
4 | [](https://github.com/dhhagan/opcsim/blob/master/LICENSE)
5 | 
6 | [](https://codecov.io/gh/dhhagan/opcsim)
7 | 
8 | 
9 |
10 | # opcsim
11 |
12 | opcsim is a Python library for simulating low-cost Optical Particle Sensors (both Optical Particle Counters and Nephelometers) and
13 | their response to various aerosol distributions.
14 |
15 | ## Citation
16 |
17 | The paper for this library can be found on the AMT website [here](https://amt.copernicus.org/articles/13/6343/2020/amt-13-6343-2020.html). It should be cited as:
18 |
19 | Hagan, D.H. and Kroll, J.H.: Assessing the accuracy of low-cost optical particle sensors using a physics-based approach, **Atmos. Meas. Tech.**, 13, 6343-6355, https://doi.org/10.5194/amt-13-6343-2020, 2020.
20 |
21 | ## Documentation
22 |
23 | Full online documentation can be found [here][1].
24 |
25 | The docs include a [tutorial][2], an [example gallery][3], and an [API Reference][4].
26 |
27 | In addition, documentation can be built locally for development purposes. To do so, please check out the complete details in the *contributing to opcsim* section of the documentation.
28 |
29 | ## Docker
30 |
31 | If you are familiar with Docker, there is a Docker image available to get up and running with OPCSIM with ease. To get started
32 | with an ephemeral container with a jupyter lab interface, navigate to your preferred working directory and execute:
33 |
34 | ```sh
35 | $ docker run --rm -p 8888:8888 -e JUPYTER_ENABLE_LAB=yes -v "$PWD":/home/joyvan/work dhhagan/opcsim:latest
36 | ```
37 |
38 | Once executed, you should see the url with token in your terminal that will allow you to bring up the jupyter lab instance.
39 |
40 |
41 | ## Dependencies
42 |
43 | Opcsim is supported for python3.6.1+.
44 |
45 | Installation requires [scipy][5], [numpy][6], [pandas][7], [matplotlib][8],
46 | and [seaborn][9].
47 |
48 |
49 | ## Installation
50 |
51 | To install (or upgrade to) the latest stable release:
52 |
53 | ```sh
54 |
55 | $ pip install opcsim [--upgrade]
56 | ```
57 |
58 | To install the development version directly from GitHub using pip:
59 |
60 | ```sh
61 |
62 | $ pip install git+https://github.com/dhhagan/opcsim.git
63 | ```
64 |
65 | In addition, you can either clone the repository and install from source or download/unzip the zip file and install from source using poetry:
66 |
67 | ```sh
68 |
69 | $ git clone https://github.com/dhhagan/opcsim.git
70 | $ cd /opcsim
71 | $ poetry install
72 | ```
73 |
74 | ## Testing
75 |
76 | All tests are automagically run via GitHub actions and Travis.ci. For results of these tests, please click on the link in the above travis badge. In addition, you can run tests locally using poetry.
77 |
78 | To run tests locally:
79 |
80 | ```sh
81 |
82 | $ poetry run pytest tests
83 | ```
84 |
85 |
86 | ## Development
87 |
88 | **opcsim** development takes place on GitHub. Issues and bugs can be submitted and tracked via the [GitHub Issue Tracker][10] for this repository. As of `v0.5.0`, *opcsim* uses [poetry][11] for versioning and managing dependencies and releases.
89 |
90 |
91 | [1]: https://dhhagan.github.io/opcsim/
92 | [2]: https://dhhagan.github.io/opcsim/tutorial.html
93 | [3]: https://dhhagan.github.io/opcsim/examples/index.html
94 | [4]: https://dhhagan.github.io/opcsim/api.html
95 | [5]: https://www.scipy.org/
96 | [6]: http://www.numpy.org/
97 | [7]: http://pandas.pydata.org/
98 | [8]: http://matplotlib.org/
99 | [9]: https://seaborn.pydata.org/
100 | [10]: https://github.com/dhhagan/opcsim/issues
101 | [11]: https://python-poetry.org/
102 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | .. _contributing:
2 |
3 | Contributing to *opcsim*
4 | ========================
5 |
6 | All development, issue-tracking, and conversation surrounding `opcsim` takes place on GitHub. If you aren't familiar with GitHub or, more generally Git, we recommend you check out some tutorials that will catch you up to speed. Some of our favorites are `Codecademy `_ and the `GitHub Guide `_.
7 |
8 | Language Syntax
9 | ---------------
10 |
11 | The documentation for ``opcsim`` is built with `Sphinx `_ and `sphinx-autodoc `_ using reStructuredText (reST). There are three primary parts that are all pulled together to build the final documentation; they include the API documentation which is pulled directly from the source-code docstrings, the tutorials which are written as jupyter notebooks, and the gallery/examples which are individual python files. More details on each can be found below.
12 |
13 | Editing the Main (Overview) Text (.rst files)
14 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
15 |
16 | There are several files that contain some text/information that is not pulled from docstrings (including this file) which can be found in the ``/docs`` directory. They are:
17 |
18 | 1. ``introduction.rst``
19 | 2. ``installing.rst``
20 | 3. ``contributing.rst``
21 | 4. ``api.rst``
22 | 5. ``tutorial.rst``
23 |
24 | You can edit the individual files, paying close attention to some that bring in text through the automatic generation process (take a look at ``api.rst`` for example). For the most part, you can simply edit these files using the sphinx rst syntax.
25 |
26 | Editing docstrings
27 | ^^^^^^^^^^^^^^^^^^
28 |
29 | Each function and/or class within the source code has an accompanying docstring. Using sphinx-autodoc, we pull these docstrings and format them in a human-readable way to generate nice, clean documentation! Thus, it is imperative to use the correct formatting of the docstrings, so autodoc can do its job.
30 |
31 | Docstrings are written using reST, which allows for very rich documentation, including things like equations, images, and examples. For examples, simply check out any of the files in the source code, or click on "source" on one of the documentation pages. For a primer on how and why we document python code, check out `this post `_.
32 |
33 | Editing and/or Adding Tutorials
34 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
35 |
36 | Tutorials are written as jupyter notebooks and are located in the ``/docs/tutorial`` folder. If editing an existing tutorial, one can simply fire up the jupyter notebook as one typically would (``$ jupyter notebook``) from terminal/command line, edit, and save. Changes will take effect once the changes are pushed to the master branch of the repository.
37 |
38 | If interested in adding a new tutorial, you can create a new notebook file in the same directory and then edit/save as you did above. It is important to note that the first/top cell in the notebook needs to follow a special format, which can be copied and pasted from an existing tutorial notebook file by clicking within the cell, and copying the raw text. Once you are finished with the new tutorial, you must add the name of the file to the ``Makefile`` within the tutorial directory so that it is built when ``$ make notebooks`` is executed. To do so, simply add a new line below the last entry in the file with form ``tools/nb_to_doc.py ``.
39 |
40 |
41 | Editing and/or Adding Examples
42 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
43 |
44 | Each example is a single python executable file that will output a figure, along with the formatted and highlighted code snippet. They are located within the ``/docs/examples`` folder and you can easily add new ones by simply adding a new file. We recommend you start by copying another file (``$ cp old-file.py new-file.py``) and then editing it. Make sure to save the file with a name that is fairly descriptive! Other notes:
45 |
46 | * Where it says "_thumb" in the docstring at the top of the file, the following two numbers describe the center point of the thumbnail image that will be created. You can play around with these numbers (always between 0-1) to see what looks best for your example.
47 |
48 |
49 | Building a Local Version of the Documentation
50 | ---------------------------------------------
51 |
52 | Once you have edited the documentation you are working on, you can render it by building a local copy. To do so, follow these instructions:
53 |
54 | First, navigate to the *docs* directory::
55 |
56 | $ cd /docs
57 |
58 | Next, clean out the old files::
59 |
60 | $ make clean
61 |
62 | Next, build the notebooks::
63 |
64 | $ make notebooks
65 |
66 | Finally, build the html::
67 |
68 | $ make html
69 |
70 |
71 | Once these steps are completed, you can open up the *index.html* file located in ``/docs/_build/html/`` in your favorite browser. Once you are satisfied with the local version, feel free to send a Pull Request to the repository and it will be integrated into the official documentation.
72 |
--------------------------------------------------------------------------------
/tests/test_distributions.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import opcsim
3 | import numpy as np
4 | import random
5 |
6 | class SetupTestCase(unittest.TestCase):
7 | def setUp(self):
8 | pass
9 |
10 | def tearDown(self):
11 | pass
12 |
13 | def test_add_modes(self):
14 | tmp = opcsim.AerosolDistribution()
15 |
16 | n, gm, gsd = random.random(), random.random(), random.random()
17 |
18 | tmp.add_mode(n, gm, gsd, "label")
19 |
20 | self.assertEqual(len(tmp.modes), 1)
21 |
22 | def test_get_modes(self):
23 | tmp = opcsim.AerosolDistribution()
24 |
25 | label = 'mode1'
26 | n, gm, gsd = random.random(), random.random(), random.random()
27 |
28 | tmp.add_mode(n, gm, gsd, label)
29 |
30 | # Get the mode
31 | m = tmp._get_mode(label)
32 |
33 | self.assertEqual(m['N'], n)
34 |
35 | # Test gettin a non-existing mode
36 | ne = tmp._get_mode('none')
37 |
38 | self.assertEqual(ne, None)
39 |
40 | def test_pdf(self):
41 | # Use a sample Urban distribution
42 | d = opcsim.load_distribution("Urban")
43 |
44 | dp = 0.1
45 |
46 | dps = np.linspace(0.01, 1., 100)
47 |
48 | # Test to make sure the evaluation works for an individual diameter
49 | # Number-Weighted
50 | pdf = d.pdf(dp, base=None)
51 |
52 | # Evaluate an array
53 | pdf_arr = d.pdf(dps)
54 |
55 | # Evaluate for a single mode
56 | pdf_1 = d.pdf(dp, mode="Mode I")
57 |
58 | self.assertGreaterEqual(pdf, 0.0)
59 | self.assertGreaterEqual(pdf, pdf_1)
60 | self.assertEqual(len(pdf_arr), len(dps))
61 |
62 | # Check the log and log10 weighted versions
63 | pdf_log = d.pdf(dp, base='log')
64 | pdf_log10 = d.pdf(dp, base='log10')
65 |
66 | # Make sure the pdf functions of various weights are correct
67 | self.assertEqual(round(pdf * dp, 3), round(pdf_log, 3))
68 | self.assertEqual(round(pdf * dp * np.log(10), 3), round(pdf_log10, 3))
69 |
70 | # Various Weights for Number-Weighted
71 | pdf_s = d.pdf(dp, weight='surface', base=None)
72 | pdf_v = d.pdf(dp, weight='volume', base=None)
73 | pdf_m = d.pdf(dp, weight='mass', base=None)
74 |
75 | # Various Weights for log-weighted
76 | pdf_s = d.pdf(dp, weight='surface', base='log')
77 | pdf_v = d.pdf(dp, weight='volume', base='log')
78 | pdf_m = d.pdf(dp, weight='mass', base='log')
79 |
80 | # Various Weights for log10-weighted
81 | pdf_s = d.pdf(dp, weight='surface', base='log10')
82 | pdf_v = d.pdf(dp, weight='volume', base='log10')
83 | pdf_m = d.pdf(dp, weight='mass', base='log10')
84 |
85 | with self.assertRaises(Exception):
86 | d.pdf(dp, weight='error')
87 |
88 | with self.assertRaises(Exception):
89 | d.pdf(dp, base='error')
90 |
91 | def test_cdf_number(self):
92 | # Use a sample Urban distribution
93 | d = opcsim.load_distribution("Urban")
94 |
95 | cdf_1 = d.cdf(dmax=1.0)
96 | cdf_25 = d.cdf(dmax=2.5)
97 | cdf_diff = d.cdf(dmin=1.0, dmax=2.5)
98 |
99 | self.assertGreaterEqual(cdf_25, cdf_1)
100 | self.assertEqual(round(cdf_diff, 3), round(cdf_25 - cdf_1, 3))
101 |
102 | with self.assertRaises(Exception):
103 | d.cdf(0.1, weight='error')
104 |
105 | def test_cdf_surface(self):
106 | d = opcsim.load_distribution("Urban")
107 |
108 | # Test the surface area weighted versions
109 | cdf_sa = d.cdf(dmax=1.0, weight='surface')
110 | cdf_sa2 = d.cdf(dmax=2.5, weight='surface')
111 | cdf_sa_diff = d.cdf(dmin=1.0, dmax=2.5, weight='surface')
112 |
113 | self.assertGreaterEqual(cdf_sa2, cdf_sa)
114 | self.assertEqual(round(cdf_sa_diff, 3), round(cdf_sa2 - cdf_sa, 3))
115 |
116 | def test_cdf_volume(self):
117 | d = opcsim.load_distribution("Urban")
118 |
119 | # Test the surface area weighted versions
120 | cdf_v = d.cdf(dmax=1.0, weight='volume')
121 | cdf_v2 = d.cdf(dmax=2.5, weight='volume')
122 | cdf_v_diff = d.cdf(dmin=1.0, dmax=2.5, weight='volume')
123 |
124 | self.assertGreaterEqual(cdf_v2, cdf_v)
125 | self.assertEqual(round(cdf_v_diff, 3), round(cdf_v2 - cdf_v, 3))
126 |
127 | with self.assertRaises(ValueError):
128 | d.cdf(dmin=2.5, dmax=1.)
129 |
130 | # Test a single mode
131 | cdf = d.cdf(dmax=2.5, mode='Mode I')
132 |
133 | self.assertIsNotNone(cdf)
134 |
135 | def test_cdf_mass(self):
136 | d = opcsim.load_distribution("Urban")
137 |
138 | # Test the mass weighted versions
139 | cdf_m = d.cdf(dmax=1.0, weight='mass')
140 | cdf_m2 = d.cdf(dmax=2.5, weight='mass')
141 | cdf_m_diff = d.cdf(dmin=1.0, dmax=2.5, weight='mass')
142 |
143 | self.assertGreaterEqual(cdf_m2, cdf_m)
144 | self.assertEqual(round(cdf_m_diff, 3), round(cdf_m2 - cdf_m, 3))
145 |
146 | # Make sure the mass is roughly correct
147 | self.assertGreaterEqual(cdf_m, 3.)
148 | self.assertLessEqual(cdf_m, 20.)
149 |
150 | def test_bad_distribution(self):
151 | with self.assertRaises(ValueError):
152 | d = opcsim.load_distribution("None")
153 |
154 | def test_repr(self):
155 | d = opcsim.load_distribution("Urban")
156 |
157 | self.assertTrue(repr(d) == "AerosolDistribution: urban")
158 |
--------------------------------------------------------------------------------
/src/opcsim/equations/cdf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import math
5 | import numpy as np
6 | from scipy.special import erf, erfc
7 |
8 | def nt(n, gm, gsd, dmin=None, dmax=10.):
9 | """Evaluate the total number of particles between two diameters.
10 |
11 | The CDF of a lognormal distribution is calculated using equation
12 | 8.39 from Seinfeld and Pandis.
13 |
14 | Mathematically, it is represented as:
15 |
16 | .. math::
17 |
18 | N_t(D_p)=∫_{D_{min}}^{D_{max}}n_N(D_p^*)dD^*_p=\\frac{N_t}{2}+\\frac{N_t}{2}*erf\Big(\\frac{ln(D_p/D_{pg})}{\sqrt{2} lnσ_g}\Big) \\;\\;(cm^{-3})
19 |
20 | Parameters
21 | ----------
22 | n : float
23 | Total aerosol number concentration in units of #/cc
24 | gm : float
25 | Median particle diameter (geometric mean) in units of microns.
26 | gsd : float
27 | Geometric Standard Deviation of the distribution.
28 | dmin : float
29 | The minimum particle diameter in microns. Default value is 0 :math:`\mu m`.
30 | dmax : float
31 | The maximum particle diameter in microns. Default value is 10 :math:`\mu m`.
32 |
33 | Returns
34 | -------
35 | N | float
36 | Returns the total number of particles between dmin and dmax in units of
37 | [:math:`particles*cm^{-3}`]
38 |
39 | See Also
40 | --------
41 | opcsim.equations.pdf.dn_ddp
42 | opcsim.equations.pdf.dn_dlndp
43 | opcsim.equations.pdf.dn_dlogdp
44 |
45 | Examples
46 | --------
47 |
48 | Evaluate the number of particles in a simple distribution between 0 and
49 | 2.5 :math:`\mu m`:
50 |
51 | >>> d = opcsim.AerosolDistribution()
52 | >>> d.add_mode(1e3, 100, 1.5, "mode 1")
53 | >>> n = opcsim.equations.cdf.nt(1e3, 0.1, 1.5, dmax=2.5)
54 |
55 | """
56 | res = (n/2.) * (1 + erf((np.log(dmax/gm)) / (np.sqrt(2) * np.log(gsd))))
57 |
58 | if dmin is not None and dmin > 0.0:
59 | res -= nt(n, gm, gsd, dmin=None, dmax=dmin)
60 |
61 | return res
62 |
63 | def st(n, gm, gsd, dmin=None, dmax=10.):
64 | """Evaluate the total surface area of the particles between two diameters.
65 |
66 | The CDF of the lognormal distribution is calculated using equation 8.11
67 | from Seinfeld and Pandis.
68 |
69 | Mathematically, it is represented as:
70 |
71 | .. math::
72 |
73 | S_t=π∫_{-∞}^{∞}D_p^2n_N^e(ln D_p)d lnD_p \\;\\;(\mu m^2 cm^{-3})
74 |
75 | Parameters
76 | ----------
77 | n : float
78 | Total number of particles in units of #/cc
79 | gm : float
80 | Median particle diameter (geometric mean) in units of :math:`\mu m`.
81 | gsd : float
82 | Geometric Standard Deviation of the distribution.
83 | dmin : float
84 | The minimum particle diameter in microns. Default value is 0 :math:`\mu m`.
85 | dmax : float
86 | The maximum particle diameter in microns. Default value is 10 :math:`\mu m`.
87 |
88 | Returns
89 | -------
90 | Surface Area | float
91 | Returns the total surface area of particles between :math:`D_{min}`
92 | and :math:`D_{max}` in units of :math:`\mu m^2 \\; cm^{-3}`
93 |
94 | See Also
95 | --------
96 | opcsim.equations.pdf.ds_ddp
97 | opcsim.equations.pdf.ds_dlndp
98 | opcsim.equations.pdf.ds_dlogdp
99 |
100 | Examples
101 | --------
102 |
103 | Integrate a sample distribution between 0 and 2.5 microns:
104 |
105 | >>> d = opcsim.AerosolDistribution()
106 | >>> d.add_mode(1e3, 100, 1.5, "mode 1")
107 | >>> n = opcsim.equations.cdf.st(1e3, 0.1, 1.5, dmax=2.5)
108 |
109 | """
110 | res = (np.pi/2.)*n*(gm**2) * np.exp(2*(np.log(gsd)** 2)) * \
111 | erfc((np.sqrt(2) * np.log(gsd)) - (np.log(dmax/gm) / (np.sqrt(2) * np.log(gsd))))
112 |
113 | if dmin is not None and dmin > 0.0:
114 | res -= st(n, gm, gsd, dmin=None, dmax=dmin)
115 |
116 | return res
117 |
118 | def vt(n, gm, gsd, dmin=None, dmax=10.):
119 | """Evaluate the total volume of the particles between two diameters.
120 |
121 | The CDF of the lognormal distribution is calculated using equation 8.12
122 | from Seinfeld and Pandis.
123 |
124 | Mathematically, it is represented as:
125 |
126 | .. math::
127 |
128 | V_t=\\frac{π}{6}∫_{-∞}^{∞}D_p^3n_N^e(ln D_p)d lnD_p \\;\\;(\mu m^3 cm^{-3})
129 |
130 | Parameters
131 | ----------
132 | n : float
133 | Total aerosol number concentration in units of #/cc
134 | gm : float
135 | Median particle diameter (geometric mean) in units of :math:`\mu m`.
136 | gsd : float
137 | Geometric Standard Deviation of the distribution.
138 | dmin : float
139 | The minimum particle diameter in microns. Default value is 0 :math:`\mu m`.
140 | dmax : float
141 | The maximum particle diameter in microns. Default value is 10 :math:`\mu m`.
142 |
143 | Returns
144 | -------
145 | Volume | float
146 | Returns the total volume of particles between :math:`D_{min}` and
147 | :math:`D_{max}` in units of :math:`\mu m^3 cm^{-3}`
148 |
149 | See Also
150 | --------
151 | opcsim.equations.pdf.dv_ddp
152 | opcsim.equations.pdf.dv_dlndp
153 | opcsim.equations.pdf.dv_dlogdp
154 |
155 | Examples
156 | --------
157 |
158 | Integrate a sample distribution between 0 and 2.5 microns:
159 |
160 | >>> d = opcsim.AerosolDistribution()
161 | >>> d.add_mode(1e3, 100, 1.5, "mode 1")
162 | >>> n = opcsim.equations.cdf.vt(1e3, 0.1, 1.5, dmax=2.5)
163 |
164 | """
165 |
166 | res = (np.pi/12.)*n*(gm**3) * np.exp(9./2.*(np.log(gsd)**2)) * \
167 | erfc((1.5*np.sqrt(2) * np.log(gsd)) - (np.log(dmax/gm) / (np.sqrt(2) * np.log(gsd))))
168 |
169 | if dmin is not None and dmin > 0.0:
170 | res -= vt(n, gm, gsd, dmin=None, dmax=dmin)
171 |
172 | return res
173 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. raw:: html
2 |
3 |
21 |
22 | opcsim: Simulating Optical Particle Sensors
23 | ============================================
24 |
25 | .. raw:: html
26 |
27 |
28 |
29 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | `opcsim` is a Python library for simulating the response of low-cost optical
73 | particle sensors (OPS's) to various aerosol distributions to better understand
74 | the sources of error and limitations of these devices. It provides an
75 | easy-to-use API for building simple OPC and Nephelometer models as well as
76 | model and visualize aerosol distributions.
77 |
78 | For more information, please read our paper available in `Atmospheric Measurement Techniques `_.
79 |
80 |
81 | To view the source code or report a bug, please visit the `github repository
82 | `_.
83 |
84 |
85 | .. raw:: html
86 |
87 |
88 |
89 |
90 |
91 |
92 |
Contents
93 |
94 |
95 |
96 |
97 | .. toctree::
98 | :maxdepth: 1
99 |
100 | Installation
101 | Contributing
102 | API reference
103 | Tutorial
104 | Example gallery
105 |
106 | .. raw:: html
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
Features
116 |
117 |
118 |
119 |
120 | * Simulate optical particle counters
121 | * Simulate nephelometers
122 | * Understand how particle sensors react to changes in aerosol size and composition
123 | * Easily visualize aerosol distirbutions
124 |
125 | .. raw:: html
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 | .. raw:: html
137 |
138 |
139 |
140 |
Abstract
141 |
142 |
143 | Low-cost sensors for measuring particulate matter (PM) offer the ability to understand
144 | human exposure to air pollution at spatiotemporal scales that have previously been
145 | impractical. However, such low-cost PM sensors tend to be poorly characterized, and
146 | their measurements of mass concentration can be subject to considerable error. Recent
147 | studies have investigated how individual factors can contribute to this error, but these
148 | studies are largely based on empirical comparisons and generally do not examine the
149 | role of multiple factors simultaneously. Here, we present a new physics-based framework
150 | and open-source software package (opcsim) for evaluating the ability of low-cost optical
151 | particle sensors (optical particle counters and nephelometers) to accurately characterize
152 | the size distribution and/or mass loading of aerosol particles. This framework, which uses
153 | Mie Theory to calculate the response of a given sensor to a given particle population, is
154 | used to estimate the relative error in mass loading for different sensor types, given
155 | variations in relative humidity, aerosol optical properties, and the underlying particle size
156 | distribution. Results indicate that such error, which can be substantial, is dependent on
157 | the sensor technology (nephelometer vs. optical particle counter), the specific
158 | parameters of the individual sensor, and differences between the aerosol used to
159 | calibrate the sensor and the aerosol being measured. We conclude with a summary of
160 | likely sources of error for different sensor types, environmental conditions, and particle
161 | classes, and offer general recommendations for choice of calibrant under different
162 | measurement scenarios.
163 |
164 |
165 |
166 |
167 | To cite this work, please use the following:
168 |
169 |
170 | Hagan, D.H. and Kroll, Jesse H.: Assessing the accuracy of low-cost optical particle sensors using a
171 | physics-based approach, Atmos. Meas. Tech. Disc., submitted, 2020.
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
--------------------------------------------------------------------------------
/docs/tutorial/nephelometer.rst:
--------------------------------------------------------------------------------
1 | .. _nephelometer_tutorial:
2 |
3 |
4 | Using OPCSIM to Simulate a Nephelometer
5 | =======================================
6 |
7 | This section of the tutorial will walk you through how we model
8 | Nephelometers, how you can build/model a Nephelometer, and how we can
9 | evaluate Nephelometers across a wide range of conditions using this
10 | tool.
11 |
12 | .. code:: ipython3
13 |
14 | # Make imports
15 | import opcsim
16 | import numpy as np
17 | import matplotlib.pyplot as plt
18 | import matplotlib.ticker as mticks
19 | import seaborn as sns
20 |
21 | %matplotlib inline
22 |
23 | # turn off warnings temporarily
24 | import warnings
25 | warnings.simplefilter('ignore')
26 |
27 | # Let's set some default seaborn settings
28 | sns.set(context='notebook', style='ticks', palette='dark', font_scale=1.75,
29 | rc={'figure.figsize': (12,6), **opcsim.plots.rc_log})
30 |
31 | Nephelometer Representation
32 | ---------------------------
33 |
34 | In OPCSIM, we define a Nephelometer using two parameters: the wavelength
35 | of light used in the device and its viewing angle. Unlike photometers
36 | and some optical particle counters, most low-cost commercial
37 | nephelometers gather light across as wide a range of angles as possible.
38 | This minimizes some of the uncertainty associated with the Mie resonance
39 | and allows manufacturers to use cheap photo-detectors while still
40 | gathering enough signal to distinguish from noise.
41 |
42 | To build a Nephelometer, simply initialize using the
43 | ``opcsim.Nephelometer`` class:
44 |
45 | .. code:: ipython3
46 |
47 | # init a nephelometer with a 658 nm laser, gathering light from between 7-173 degrees
48 | neph = opcsim.Nephelometer(wl=0.658, theta=(7., 173))
49 |
50 | neph
51 |
52 |
53 |
54 |
55 | .. parsed-literal::
56 |
57 |
58 |
59 |
60 |
61 | Calibration
62 | -----------
63 |
64 | Nephelometers gather the total scattered light from many anglees across
65 | an entire aerosol distribution. Typically, users of low-cost
66 | nephelometers co-locate their device with a reference device of higher
67 | (or known) quality and simply compare the output signal from the
68 | nephelometer to the integrated mass value (i.e. :math:`PM_1`,
69 | :math:`PM_{2.5}`, or :math:`PM_{10}`) from the reference device. To keep
70 | things as simple and realistic as possible, we follow this approach.
71 |
72 | To calibrate a nephelometer in OPCSIM, you provide an aerosol
73 | distribution to the ``calibrate`` method - the actual mass values for
74 | :math:`PM_1`, :math:`PM_{2.5}`, and :math:`PM_{10}` are calculated
75 | exactly and the total scattered light is computed as well. The ratio
76 | between the total scattered light and each of the mass loadings are
77 | stored as calibration factors and are used again when evaluating
78 | previously unseen distributions.
79 |
80 | To calibrate our nephelometer above to a synthetic distribution of
81 | Ammonium Sulfate:
82 |
83 | .. code:: ipython3
84 |
85 | d1 = opcsim.AerosolDistribution("AmmSulf")
86 |
87 | d1.add_mode(n=1e4, gm=125e-3, gsd=1.5, refr=complex(1.521, 0), kappa=0.53, rho=1.77)
88 |
89 | # calibrate the nephelometer at 0% RH
90 | neph.calibrate(d1, rh=0.)
91 |
92 | We can explore the calibration factors that were just determined - the
93 | units are a bit arbitrary, since we don’t consider the intensity/power
94 | of the laser as we assume it is constant. Thus, these units are
95 | something like :math:`cm^2/(\mu g/ m^3)`
96 |
97 | .. code:: ipython3
98 |
99 | neph.pm1_ratio
100 |
101 |
102 |
103 |
104 | .. parsed-literal::
105 |
106 | 1.1744058563022682e-08
107 |
108 |
109 |
110 | Similarly, we get ratio’s for :math:`PM_{2.5}` and :math:`PM_{10}`:
111 |
112 | .. code:: ipython3
113 |
114 | neph.pm25_ratio
115 |
116 |
117 |
118 |
119 | .. parsed-literal::
120 |
121 | 1.1743521379654175e-08
122 |
123 |
124 |
125 | .. code:: ipython3
126 |
127 | neph.pm10_ratio
128 |
129 |
130 |
131 |
132 | .. parsed-literal::
133 |
134 | 1.1743521375694496e-08
135 |
136 |
137 |
138 | Evaluating a Nephelometer for New Aerosol Distributions
139 | -------------------------------------------------------
140 |
141 | The entire point of this tool is to be able to simulate what would
142 | happen under different circumstances. To do so, we use the ``evaluate``
143 | method, which takes an AerosolDistribution as an argument (as well as an
144 | optional relative humidity) and returns the total scattered light,
145 | :math:`PM_1`, :math:`PM_{2.5}`, and :math:`PM_{10}`.
146 |
147 | .. code:: ipython3
148 |
149 | # evaluate the same distribution we used to calibrate
150 | neph.evaluate(d1, rh=0.)
151 |
152 |
153 |
154 |
155 | .. parsed-literal::
156 |
157 | (4.454460852883902e-07, 37.92948433439533, 37.93121934108556, 37.9312193538752)
158 |
159 |
160 |
161 | .. code:: ipython3
162 |
163 | # evaluate the same distribution we used to calibrate, but at a higher RH
164 | neph.evaluate(d1, rh=85.0)
165 |
166 |
167 |
168 |
169 | .. parsed-literal::
170 |
171 | (2.0830803505720828e-06,
172 | 177.37312355805727,
173 | 177.38123712884368,
174 | 177.38123718865307)
175 |
176 |
177 |
178 | What if we went ahead and tried to evaluate on a totally unseen
179 | distribution? Let’s go ahead and evaluate on an **urban** distribution:
180 |
181 | .. code:: ipython3
182 |
183 | d2 = opcsim.load_distribution("urban")
184 |
185 | d2
186 |
187 |
188 |
189 |
190 | .. parsed-literal::
191 |
192 | AerosolDistribution: urban
193 |
194 |
195 |
196 | First, let’s determine the actual :math:`PM_1`, :math:`PM_{2.5}`, and
197 | :math:`PM_{10}` loadings for this distribution:
198 |
199 | .. code:: ipython3
200 |
201 | print ("PM1 = {:.2f} ug/m3".format(d2.cdf(dmin=0., dmax=1., weight='mass', rho=1.65)))
202 | print ("PM2.5 = {:.2f} ug/m3".format(d2.cdf(dmin=0., dmax=2.5, weight='mass', rho=1.65)))
203 | print ("PM10 = {:.2f} ug/m3".format(d2.cdf(dmin=0., dmax=10., weight='mass', rho=1.65)))
204 |
205 |
206 | .. parsed-literal::
207 |
208 | PM1 = 8.97 ug/m3
209 | PM2.5 = 9.00 ug/m3
210 | PM10 = 9.00 ug/m3
211 |
212 |
213 | Next, let’s evaluate the Nephelometer:
214 |
215 | .. code:: ipython3
216 |
217 | neph.evaluate(d2, rh=0.)
218 |
219 |
220 |
221 |
222 | .. parsed-literal::
223 |
224 | (1.7166105544467467e-07,
225 | 14.616842595213745,
226 | 14.61751121278495,
227 | 14.617511217713679)
228 |
229 |
230 |
231 | So, we’re off by about a factor of 2, in part due to differences in
232 | assumed density and in part due to the fact the urban distribution
233 | scatters less light per unit mass than our calibration aerosol.
234 |
235 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # opcsim documentation build configuration file, created by
5 | # sphinx-quickstart on Sun Mar 5 20:29:37 2017.
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #
20 | import sys, os
21 | import sphinx_bootstrap_theme
22 | import matplotlib as mpl
23 |
24 | mpl.use("Agg")
25 |
26 | # -- General configuration ------------------------------------------------
27 |
28 | # If your documentation needs a minimal Sphinx version, state it here.
29 | #
30 | # needs_sphinx = '1.0'
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | sys.path.insert(0, os.path.abspath('sphinxext'))
36 | extensions = [
37 | 'sphinx.ext.autodoc',
38 | 'sphinx.ext.doctest',
39 | 'sphinx.ext.coverage',
40 | 'sphinx.ext.mathjax',
41 | 'sphinx.ext.autosummary',
42 | 'sphinx.ext.intersphinx',
43 | 'matplotlib.sphinxext.plot_directive',
44 | 'gallery_generator',
45 | 'numpydoc'
46 | ]
47 |
48 | autosummary_generate = True
49 | numpydoc_show_class_members = False
50 |
51 | plot_include_source = True
52 | plot_formats = [("png", 90)]
53 | plot_html_show_formats = False
54 | plot_html_show_source_link = False
55 |
56 | # Add any paths that contain templates here, relative to this directory.
57 | templates_path = ['_templates']
58 |
59 | # The suffix(es) of source filenames.
60 | # You can specify multiple suffix as a list of string:
61 | #
62 | # source_suffix = ['.rst', '.md']
63 | source_suffix = '.rst'
64 |
65 | # The master toctree document.
66 | master_doc = 'index'
67 |
68 | # General information about the project.
69 | project = 'opcsim'
70 |
71 | import time
72 |
73 | author = 'David H Hagan and Jesse H Kroll'
74 | copyright = '2016-{}, {}'.format(time.strftime("%Y"), author)
75 |
76 |
77 | # The version info for the project you're documenting, acts as replacement for
78 | # |version| and |release|, also used in various other places throughout the
79 | # built documents.
80 | #
81 | # The short X.Y version.
82 | sys.path.insert(0, os.path.abspath(os.path.pardir))
83 | import opcsim
84 |
85 | version = opcsim.__version__
86 | # The full version, including alpha/beta/rc tags.
87 | release = opcsim.__version__
88 |
89 | # The language for content autogenerated by Sphinx. Refer to documentation
90 | # for a list of supported languages.
91 | #
92 | # This is also used if you do content translation via gettext catalogs.
93 | # Usually you set "language" from the command line for these cases.
94 | language = None
95 |
96 | # List of patterns, relative to source directory, that match files and
97 | # directories to ignore when looking for source files.
98 | # This patterns also effect to html_static_path and html_extra_path
99 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
100 |
101 | # The name of the Pygments (syntax highlighting) style to use.
102 | pygments_style = 'sphinx'
103 |
104 | # If true, `todo` and `todoList` produce output, else they produce nothing.
105 | todo_include_todos = False
106 |
107 |
108 | mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"
109 |
110 | # -- Options for HTML output ----------------------------------------------
111 |
112 | # The theme to use for HTML and HTML Help pages. See the documentation for
113 | # a list of builtin themes.
114 | #
115 | html_theme = 'bootstrap'
116 |
117 | html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
118 | # Theme options are theme-specific and customize the look and feel of a theme
119 | # further. For a list of options available for each theme, see the
120 | # documentation.
121 | #
122 |
123 | html_theme_options = {
124 | 'source_link_position': 'footer',
125 | 'bootswatch_theme': 'paper',
126 | 'navbar_sidebarrel': False,
127 | 'bootstrap_version': "3",
128 | 'navbar_links': [
129 | ("API", "api"),
130 | ("Tutorial", "tutorial"),
131 | ("Gallery", "examples/index")
132 | ]
133 | }
134 |
135 | # Add any paths that contain custom static files (such as style sheets) here,
136 | # relative to this directory. They are copied after the builtin static files,
137 | # so a file named "default.css" will overwrite the builtin "default.css".
138 | html_static_path = ['_static', 'example_thumbs']
139 |
140 |
141 | # -- Options for HTMLHelp output ------------------------------------------
142 |
143 | # Output file base name for HTML help builder.
144 | htmlhelp_basename = 'opcsimdoc'
145 |
146 |
147 | # -- Options for LaTeX output ---------------------------------------------
148 |
149 | latex_elements = {
150 | # The paper size ('letterpaper' or 'a4paper').
151 | #
152 | # 'papersize': 'letterpaper',
153 |
154 | # The font size ('10pt', '11pt' or '12pt').
155 | #
156 | # 'pointsize': '10pt',
157 |
158 | # Additional stuff for the LaTeX preamble.
159 | #
160 | # 'preamble': '',
161 |
162 | # Latex figure (float) alignment
163 | #
164 | # 'figure_align': 'htbp',
165 | }
166 |
167 | # Grouping the document tree into LaTeX files. List of tuples
168 | # (source start file, target name, title,
169 | # author, documentclass [howto, manual, or own class]).
170 | latex_documents = [
171 | (master_doc, 'opcsim.tex', 'opcsim Documentation',
172 | 'David H Hagan & Jesse H Kroll', 'manual'),
173 | ]
174 |
175 |
176 | # -- Options for manual page output ---------------------------------------
177 |
178 | # One entry per manual page. List of tuples
179 | # (source start file, name, description, authors, manual section).
180 | man_pages = [
181 | (master_doc, 'opcsim', 'opcsim Documentation',
182 | [author], 1)
183 | ]
184 |
185 | # -- Options for Texinfo output -------------------------------------------
186 |
187 | # Grouping the document tree into Texinfo files. List of tuples
188 | # (source start file, target name, title, author,
189 | # dir menu entry, description, category)
190 | texinfo_documents = [
191 | (master_doc, 'opcsim', 'opcsim Documentation',
192 | author, 'opcsim', 'One line description of project.',
193 | 'Miscellaneous'),
194 | ]
195 |
196 | mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"
197 |
198 | # Example configuration for intersphinx: refer to the Python standard library.
199 | intersphinx_mapping = {'numpy': ('http://docs.scipy.org/doc/numpy/', None),
200 | 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
201 | 'matplotlib': ('http://matplotlib.org/', None),
202 | 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
203 | 'statsmodels': ('http://www.statsmodels.org/stable/', None)}
204 |
205 | def setup(app):
206 | app.add_javascript('copybutton.js')
207 | app.add_stylesheet('style.css')
208 |
--------------------------------------------------------------------------------
/src/opcsim/mie.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import scipy.special as bessel
4 |
5 |
6 | def coef_pi_tau(theta, x):
7 | """Compute the angle-dependant functions (pi and tau) using upward recurrence.
8 |
9 | Per Bohren and Huffman (1983) eq. 4.47, the angle-dependant functions :math:`\pi_n` and
10 | :math:`\\tau_n` can be computed using upward recurrance from the relations:
11 |
12 | .. math::
13 |
14 | \pi_n=\\frac{2n-1}{n-1}\mu\pi_{n-1}-\\frac{n}{n-1}\pi_{n-2}
15 |
16 | .. math::
17 |
18 | \\tau_n=n\mu\pi_n - (n+1)\pi_{n-1}
19 |
20 |
21 |
22 | Parameters
23 | ----------
24 | theta: float
25 | The scattering angle in degrees.
26 | x: float
27 | The dimensionless size parameter, used to determine the number of elements to compute.
28 |
29 | Returns
30 | -------
31 | `\pi_n`, `\\tau_n`: np.ndarray of floats
32 |
33 | """
34 | # compute mu = cos(theta)
35 | mu = np.cos(math.radians(theta))
36 |
37 | # compute the max number of iterations
38 | nc = int(np.round(2 + x + 4*np.power(x, 1./3.)))
39 |
40 | # init arrays to hold the values
41 | pi, tau = np.zeros(nc), np.zeros(nc)
42 |
43 | # set the initial params
44 | pi[0] = 1
45 | pi[1] = 3*mu
46 | tau[0] = mu
47 | tau[1] = 3*np.cos(2*np.arccos(mu))
48 |
49 | # iterate and solve
50 | for n in range(2, nc):
51 | pi[n] = (mu*pi[n-1]*(2*n+1) - (pi[n-2]*(n+1)))/n
52 | tau[n] = (n+1)*mu*pi[n] - (n+2)*pi[n-1]
53 |
54 | return pi, tau
55 |
56 |
57 | def coef_ab(refr, x):
58 | """Compute the external field coefficients using the logarithmic derivative.
59 |
60 | Bohren and Huffman (1983) equations 4.88 and 4.89 show how to compute :math:`a_n` and :math:`b_n`
61 | using the logarithmic derivative (Aden, 1951).
62 |
63 | .. math::
64 |
65 | a_n=\\frac{[D_n(mx)/m + n/x]\psi_n(x) - \psi_{n-1}(x)}{[D_n(mx)/m + n/x]\\xi_n(x) - \\xi_{n-1}(x)},
66 |
67 | .. math::
68 |
69 | b_n=\\frac{[mD_n(mx) + n/x]\psi_n(x) - \psi_{n-1}(x)}{[mD_n(mx) + n/x]\\xi_n(x) - \\xi_{n-1}(x)}
70 |
71 | where the logarithmic derivative is computed as:
72 |
73 | .. math::
74 |
75 | D_{n-1}=\\frac{n}{\\rho}-\\frac{1}{D_n + n/\\rho}
76 |
77 |
78 | Parameters
79 | ----------
80 | refr: complex
81 | The complex refractive index of the material.
82 | x: float
83 | The dimensionless size parameter.
84 |
85 | Returns
86 | -------
87 | `a_n`, `b_n`: np.ndarray of floats
88 | The external field coefficients.
89 |
90 | """
91 | # compute the number of values to calculate
92 | nc = int(np.round(2 + x + 4*np.power(x, 1/3)))
93 |
94 | # calculate z, the product of the RI and dimensionless size parameter
95 | z = refr*x
96 |
97 | nmx = int(np.round(max(nc, np.abs(z)) + 16))
98 |
99 | #
100 | n = np.arange(1, nc + 1)
101 | nu = n + 0.5
102 |
103 | # use scipy's bessel functions to compute
104 | sqx = np.sqrt(0.5 * np.pi * x)
105 |
106 | px = sqx * bessel.jv(nu, x)
107 | p1x = np.append(np.sin(x), px[0:nc-1])
108 |
109 | chx = -sqx*bessel.yv(nu, x)
110 | ch1x = np.append(np.cos(x), chx[0:nc-1])
111 |
112 | gsx = px - (0 + 1j)*chx
113 | gs1x = p1x - (0 + 1j)*ch1x
114 |
115 | # Bohren & Huffman eq. 4.89
116 | dn = np.zeros(nmx, dtype=np.complex128)
117 |
118 | for i in range(nmx-1, 1, -1):
119 | dn[i-1] = (i/z) - (1 / (dn[i] + i/z))
120 |
121 | # drop terms beyond nc
122 | d = dn[1:nc+1]
123 |
124 | da = d/refr + n/x
125 | db = refr*d + n/x
126 |
127 | an = (da*px - p1x) / (da*gsx - gs1x)
128 | bn = (db*px - p1x) / (db*gsx - gs1x)
129 |
130 | return an, bn
131 |
132 |
133 | def s1s2(refr, x, theta):
134 | """Compute the complex scattering amplitudes S1 and S2 at angle theta.
135 |
136 | Bohren and Huffman (1983) list the equations for computing the complex scattering
137 | amplitudes as Eq. 4.74:
138 |
139 | .. math::
140 |
141 | S_1=\sum_{n=1}^{n_c}\\frac{2n+1}{n(n+1)}(a_n\pi_n + b_n\\tau_n),
142 |
143 | .. math::
144 |
145 | S_2=\sum_{n=1}^{n_c}\\frac{2n+1}{n(n+1)}(a_n\\tau_n + b_n\pi_n)
146 |
147 | Parameters
148 | ----------
149 | refr: complex
150 | The complex refractive index.
151 | x: float
152 | The dimensionless size parameter.
153 | theta: float
154 | The scattering angle in degrees.
155 |
156 | Returns
157 | -------
158 | `S_1`, `S_2`: complex
159 | The complex scattering amplitudes.
160 |
161 | """
162 | # compute the number of coefficients to calculate
163 | nc = int(np.round(2 + x + 4*np.power(x, 1/3)))
164 |
165 | # compute the external field coefficients
166 | an, bn = coef_ab(refr=refr, x=x)
167 |
168 | # compute the pi and tau coefficients
169 | pi, tau = coef_pi_tau(theta=theta, x=x)
170 |
171 | # init an array for holding the S1 and S2 values
172 | n = np.arange(1, nc+1)
173 |
174 | # compute the coef for the series
175 | cn = (2*n + 1) / (n*(n+1))
176 |
177 | # compute the series and sum
178 | S1 = (cn * (an*pi + bn*tau)).sum()
179 | S2 = (cn * (an*tau + bn*pi)).sum()
180 |
181 | return S1, S2
182 |
183 |
184 | def cscat(dp, wl, refr, theta1, theta2, nsteps=100, **kwargs):
185 | """Compute the scattering cross section between two angles according to Jaenicke and Hanusch (1993).
186 |
187 | Following the lead of Jaenicke and Hanusch (1993), we can compute the scattering cross section for
188 | a given viewing angle [:math:`\Theta_1` - :math:`\Theta_2`] as:
189 |
190 | .. math::
191 |
192 | C_{sca}=\\frac{\lambda^2}{4\pi} \int_{\Theta_1}^{\Theta_2}[i_1(\Theta) + i_2(\Theta)]sin\Theta d\Theta
193 |
194 | where :math:`\lambda` is the incident wavelength of light and :math:`i_1` and :math:`i_2` are the intensity
195 | distribution functions, calculated as:
196 |
197 | .. math::
198 |
199 | i_1(\Theta)=\mid S_1(\Theta) \mid^2,
200 |
201 | .. math::
202 |
203 | i_2(\Theta)=\mid S_2(\Theta) \mid^2
204 |
205 |
206 | The integral is calculated step-wise using the numpy.trapz function.
207 |
208 |
209 | Parameters
210 | ----------
211 | dp: float
212 | The particle diameter in microns.
213 | wl: float
214 | The wavelength of incident light in microns.
215 | refr: complex
216 | The complex refractive index of the material.
217 | theta1: float
218 | The angle from which to begin the integration.
219 | theta1: float
220 | The angle from which to end the integration.
221 | nsteps: int
222 | The number of steps in theta to use in performing the step-wise integration.
223 |
224 | Returns
225 | -------
226 | `C_{scat}`: float
227 | The scattering cross-section.
228 |
229 | """
230 | # build an array of angles
231 | thetas = np.linspace(theta1, theta2, nsteps)
232 |
233 | # compute the dimensionless parameter x
234 | x = dp*np.pi / wl
235 |
236 | # init an array to hold the inside of the integral
237 | rv = np.zeros(nsteps)
238 |
239 | # iterate over each step and compute the inside part of the integral
240 | for i in range(nsteps):
241 | s1, s2 = s1s2(refr=refr, x=x, theta=thetas[i])
242 |
243 | # compute i1 and i2
244 | i1 = (s1 * np.conjugate(s1)).real
245 | i2 = (s2 * np.conjugate(s2)).real
246 |
247 | rv[i] = (i1 + i2) * np.sin(math.radians(thetas[i]))
248 |
249 | # convert the array of angles from degrees to radians
250 | thetas = np.array([math.radians(x) for x in thetas])
251 |
252 | # compute cscat (convert the wavelength to cm to make units match common literature values)
253 | rv = ((wl*1e-4)**2 / (4*np.pi)) * np.trapz(rv, thetas)
254 |
255 | return rv
256 |
--------------------------------------------------------------------------------
/tests/test_models.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import opcsim
3 | import pandas as pd
4 | import numpy as np
5 |
6 | from opcsim.distributions import *
7 | from opcsim.models import *
8 |
9 | class SetupTestCase(unittest.TestCase):
10 | def setUp(self):
11 | pass
12 |
13 | def tearDown(self):
14 | pass
15 |
16 | def test_opc_model(self):
17 | n_bins = 10
18 | dmin = 0.3
19 | dmax = 10.0
20 | wl = 0.658
21 | theta = (32.0, 88.0)
22 |
23 | # create an opc based on the above
24 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
25 | dmax=dmax, theta=theta)
26 |
27 | self.assertIsInstance(opc, opcsim.OPC)
28 | self.assertEqual(opc.n_bins, 10)
29 | self.assertEqual(opc.dmin, dmin)
30 | self.assertEqual(opc.dmax, dmax)
31 | self.assertEqual(opc.wl, wl)
32 | self.assertEqual(opc.theta, theta)
33 | self.assertEqual(opc.label, None)
34 | self.assertEqual(opc.calibration_function, None)
35 | self.assertEqual(len(opc.bin_boundaries), n_bins+1)
36 | self.assertTrue((opc.dlogdp == np.log10(
37 | opc.bins[:, -1]) - np.log10(opc.bins[:, 0])).all())
38 | self.assertTrue((opc.ddp == opc.bins[:, -1] - opc.bins[:, 0]).all())
39 |
40 | # test for bins manually set as list of values (boundaries)
41 | bins = [0.38, 0.54, 0.78, 1.05, 1.5, 2.5, 3.5, 5.]
42 | opc = opcsim.OPC(wl=wl, bins=bins, theta=theta)
43 |
44 | # test for bins manually set as 3xn array
45 | self.assertIsInstance(opc, opcsim.OPC)
46 | self.assertEqual(opc.n_bins, len(bins)-1)
47 | self.assertEqual(opc.dmin, bins[0])
48 | self.assertEqual(opc.dmax, bins[-1])
49 | self.assertEqual(opc.wl, wl)
50 | self.assertEqual(opc.theta, theta)
51 | self.assertEqual(opc.bins.shape[0], opc.n_bins)
52 |
53 | def test_opc_calibrate(self):
54 | n_bins = 10
55 | dmin = 0.3
56 | dmax = 10.0
57 | wl = 0.658
58 | theta = (32.0, 88.0)
59 |
60 | # create an opc based on the above
61 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
62 | dmax=dmax, theta=theta)
63 |
64 | # calibrate the OPC for PSL's
65 | self.assertIsNone(opc.calibration_function)
66 | opc.calibrate(material="psl")
67 | self.assertIsNotNone(opc.calibration_function)
68 |
69 | # test the calibration for a spec'd material
70 | # create an opc based on the above
71 |
72 | # calibrate the OPC for a random RI
73 | opc.calibrate(material=complex(1.5, 0))
74 | self.assertIsNotNone(opc.calibration_function)
75 |
76 | # try fitting the data this time...
77 | # create an opc based on the above
78 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
79 | dmax=dmax, theta=theta)
80 |
81 | # calibrate the OPC for PSL's
82 | self.assertIsNone(opc.calibration_function)
83 | opc.calibrate(material="psl", method="linear")
84 | self.assertIsNotNone(opc.calibration_function)
85 |
86 | # try for an integer
87 | opc.calibrate(material=1.5)
88 | self.assertIsNotNone(opc.calibration_function)
89 |
90 | # calibrate the OPC for PSL's
91 | opc.calibrate(material="psl", method="piecewise")
92 | self.assertIsNotNone(opc.calibration_function)
93 |
94 | # try for a bad string
95 | with self.assertRaises(ValueError):
96 | opc.calibrate(material="random_thing")
97 |
98 | # non-existent calibration method
99 | with self.assertRaises(ValueError):
100 | opc.calibrate("psl", method="wrong")
101 |
102 | def test_opc_evaluate(self):
103 | n_bins = 10
104 | dmin = 0.3
105 | dmax = 10.0
106 | wl = 0.658
107 | theta = (32.0, 88.0)
108 |
109 | # build a distribution
110 | d = opcsim.AerosolDistribution()
111 | d.add_mode(n=1e3, gm=0.4, gsd=1.5, rho=1.6, refr=complex(1.5, 0))
112 |
113 | # create an opc based on the above
114 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
115 | dmax=dmax, theta=theta)
116 |
117 | with self.assertRaises(Exception):
118 | h = opc.histogram(d)
119 |
120 | # calibrate the OPC for PSL's
121 | self.assertIsNone(opc.calibration_function)
122 | opc.calibrate(material="psl")
123 | self.assertIsNotNone(opc.calibration_function)
124 |
125 | # test the histogram
126 | h = opc.evaluate(d)
127 |
128 | def test_opc_histogram(self):
129 | n_bins = 10
130 | dmin = 0.3
131 | dmax = 10.0
132 | wl = 0.658
133 | theta = (32.0, 88.0)
134 |
135 | # build a distribution
136 | d = opcsim.AerosolDistribution()
137 | d.add_mode(n=1e3, gm=0.4, gsd=1.5, rho=1.6, refr=complex(1.5, 0))
138 |
139 | # create an opc based on the above
140 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
141 | dmax=dmax, theta=theta)
142 |
143 | opc.calibrate(material="psl")
144 | self.assertIsNotNone(opc.calibration_function)
145 |
146 | # create the histogram
147 | h = opc.histogram(d, weight='number')
148 |
149 | self.assertEqual(len(h), opc.n_bins)
150 |
151 | # calculate surface area, volume, and mass distributions
152 | h = opc.histogram(d, weight='surface')
153 | h = opc.histogram(d, weight='volume')
154 | h = opc.histogram(d, weight='mass', rho=1.65)
155 |
156 | # test dN/dDp
157 | h = opc.histogram(d, weight="number", base=None)
158 |
159 | # force error
160 | with self.assertRaises(ValueError):
161 | h = opc.histogram(d, weight="unknown")
162 |
163 | def test_opc_integrate(self):
164 | n_bins = 10
165 | dmin = 0.3
166 | dmax = 10.0
167 | wl = 0.658
168 | theta = (32.0, 88.0)
169 |
170 | # build a distribution
171 | d = opcsim.AerosolDistribution()
172 | d.add_mode(n=1e3, gm=0.4, gsd=1.5, rho=1.6, refr=complex(1.5, 0))
173 |
174 | # create an opc based on the above
175 | opc = opcsim.OPC(wl=wl, n_bins=n_bins, dmin=dmin,
176 | dmax=dmax, theta=theta)
177 |
178 | opc.calibrate(material="psl")
179 | self.assertIsNotNone(opc.calibration_function)
180 |
181 | # integrate in number space
182 | n1 = opc.integrate(d, dmin=0, dmax=1., weight="number")
183 | n2 = opc.integrate(d, dmin=0., dmax=2.5, weight="number")
184 | n3 = opc.integrate(d, dmin=0., dmax=10., weight="number")
185 |
186 | self.assertGreaterEqual(n2, n1)
187 | self.assertGreaterEqual(n3, n1)
188 | self.assertGreaterEqual(n3, n2)
189 |
190 | # force a value error
191 | with self.assertRaises(ValueError):
192 | n1 = opc.integrate(d, dmin=0., dmax=1., weight="bad-weight")
193 |
194 | # test in-between bounds
195 | n1 = opc.integrate(d, dmin=.6, dmax=1., weight="number")
196 | n2 = opc.integrate(d, dmin=.61, dmax=.62, weight="number")
197 |
198 | self.assertGreater(n1, n2)
199 |
200 | # integrate surface area, volume, and mass distributions
201 | n1 = opc.integrate(d, dmin=0., dmax=1., weight="surface")
202 | n2 = opc.integrate(d, dmin=0., dmax=1., weight="volume")
203 | n3 = opc.integrate(d, dmin=0., dmax=1., weight="mass", rho=1.5)
204 |
205 | def test_nephelometer(self):
206 | neph = opcsim.Nephelometer(wl=0.658, theta=(7., 173.))
207 |
208 | self.assertIsNone(neph.pm1_ratio)
209 | self.assertIsNone(neph.pm25_ratio)
210 | self.assertIsNone(neph.pm10_ratio)
211 |
212 | # calibrate the device to a distribution
213 | d = opcsim.AerosolDistribution()
214 | d.add_mode(n=1000, gm=.2, gsd=1.5, kappa=0.53, refr=complex(1.592, 0), rho=1.77)
215 |
216 | neph.calibrate(d, rh=0.)
217 |
218 | self.assertIsNotNone(neph.pm1_ratio)
219 | self.assertIsNotNone(neph.pm25_ratio)
220 | self.assertIsNotNone(neph.pm10_ratio)
221 |
222 | # test evaluate functionality
223 | vals = neph.evaluate(d, rh=0.)
224 | vals2 = neph.evaluate(d, rh=95.)
225 |
226 | self.assertGreaterEqual(vals2[1], vals[1])
227 | self.assertGreaterEqual(vals2[2], vals[2])
228 | self.assertGreaterEqual(vals2[3], vals[3])
229 |
230 |
--------------------------------------------------------------------------------
/src/opcsim/utils.py:
--------------------------------------------------------------------------------
1 | """Utility functions
2 | """
3 | import numpy as np
4 | from scipy.optimize import curve_fit
5 | import math
6 |
7 | # assuming STP
8 | RHO_H20 = 0.997
9 |
10 |
11 | def make_bins(dmin, dmax, n_bins, base='log'):
12 | """Returns a 3xn array of bin diameters.
13 |
14 | Build a 3xn array of bin diameters that can then be fed directly into
15 | the OPC class. The default behaviour is to space the bins equally on a log10
16 | basis.
17 |
18 | Parameters
19 | ----------
20 |
21 | dmin : float
22 | Minimum particle diameter in microns
23 | dmax : float
24 | Maximum particle diameter in microns
25 | n_bins : int
26 | Number of bins
27 | base : {'log' | 'none' | None}
28 |
29 | Returns
30 | -------
31 | bins : array
32 | Returns a 3xn_bins array in the format of [left edge, midpoint, right edge]
33 |
34 |
35 | Examples
36 | --------
37 |
38 | Build a set of bins for an OPC with dmin=0.5, dmax=2.5, and 3 bins:
39 |
40 | >>> bins = opcsim.utils.make_bins(0.5, 2.5, 3)
41 |
42 | """
43 | if base is None:
44 | base = 'none'
45 |
46 | bins = np.zeros((n_bins, 2)) * np.nan
47 |
48 | # Set the left edge of bin0 and right edge of the last bin
49 | bins[0, 0] = dmin
50 | bins[-1, -1] = dmax
51 |
52 | mult = 1. / (( np.log10(dmax) - np.log10(dmin) ) / n_bins )
53 |
54 | for i in range(n_bins):
55 | bins[i, -1] = 10**(np.log10( bins[i, 0]) + ( 1. / mult ))
56 |
57 | if i < n_bins - 1:
58 | bins[i + 1, 0] = bins[i, -1]
59 |
60 | return midpoints(bins, base=base)
61 |
62 |
63 | def midpoints(bins, base='log'):
64 | """Returns a 3xn array of bin diameters.
65 |
66 | Build a 3xn array of bin diameters from a 2xn array that can then be fed
67 | directly into the OPC class. The default behaviour is to space the bins
68 | equally on a log10 basis.
69 |
70 | Parameters
71 | ----------
72 |
73 | bins : 2xn array
74 | A 2xn array with bin boundaries.
75 | base : {'log' | 'none' | None}
76 |
77 | Returns
78 | -------
79 | bins : array
80 | Returns a 3xn_bins array in the format of [left edge, midpoint, right edge]
81 |
82 |
83 | Examples
84 | --------
85 |
86 | Build a set of bins for an OPC similar to a Dylos DC1100 Pro:
87 |
88 | >>> arr = np.array([0.5, 2.5], [2.5, 10])
89 | >>> bins = opcsim.utils.midpoints(arr)
90 |
91 | """
92 | base = 'base' if base is None else base
93 |
94 | if bins.shape[1] != (2 or 3):
95 | raise ValueError("Invalid bins array. Must be either 2xn or 3xn.")
96 |
97 | tmp = np.zeros((bins.shape[0], 3)) * np.nan
98 | tmp[:, 0] = bins[:, 0]
99 | tmp[:, 2] = bins[:, -1]
100 |
101 | for i in range(tmp.shape[0]):
102 | if base.lower() == 'log':
103 | tmp[i, 1] = 10**np.log10([tmp[i,0], tmp[i, 2]]).mean()
104 | else:
105 | tmp[i, 1] = np.mean([tmp[i, 0], tmp[i, 2]])
106 |
107 | return tmp
108 |
109 |
110 | def k_kohler(diam_dry, kappa=0., rh=0.):
111 | """Calculate the wet diameter of a particle based on the hygroscopic growth
112 | parameter, kappa (k-Kohler theory).
113 |
114 | .. math::
115 |
116 | D_w=D_d*\sqrt[3]{1 + \\frac{a_w}{1-a_w}\kappa_{eff}}
117 |
118 | Parameters
119 | ----------
120 | diameter_dry: float
121 | The dry diameter in any units (nm or um most likely)
122 | kappa: float, optional
123 | The effective kappa value
124 | rh: float: optional
125 | The relative humidity as a percentage (0.0-100.0)
126 |
127 | Returns
128 | -------
129 | diameter_wet
130 | The wet diameter in the same units supplied for the dry diameter
131 |
132 | """
133 | # calculate the water activity
134 | aw = rh / 100.
135 |
136 | return diam_dry * math.pow(1 + kappa * (aw / (1 - aw)), 1./3.)
137 |
138 |
139 | def rho_eff(rho, weights=None, diams=None):
140 | """Calculate the effective density of a particle by calculating the
141 | wet and dry percentages and taking the weighted sum. Alternatively,
142 | an array of diameters can be passed which will be used to
143 | calculate the volumetric weights.
144 |
145 | Parameters
146 | ----------
147 | rho: ndarray of floats
148 | An array of particle densities.
149 | weights: ndarray of floats
150 | An array of volumetric weight percentages.
151 | diams: ndarray of floats
152 | An array of diameters for each species.
153 |
154 | Returns
155 | -------
156 | rho_eff: float
157 | The weighted density of the wet particle.
158 | """
159 | rho = np.asarray(rho)
160 |
161 | # if diams are present, compute their weights
162 | if diams is not None:
163 | diams = np.asarray(diams)
164 | weights = (diams**3) / (diams**3).sum()
165 |
166 | weights = np.asarray(weights)
167 |
168 | return (weights * rho).sum()
169 |
170 |
171 | def k_eff(kappas, weights=None, diams=None):
172 | """Calculate the effective k-kohler coefficient from
173 | an array of kappa values and their weights. Alternatively,
174 | an array of diameters can be passed which will be used to
175 | calculate the volumetric weights.
176 |
177 | .. math::
178 |
179 | \kappa=\sum_{i=1} \epsilon_i \kappa_i
180 |
181 | Parameters
182 | ----------
183 | kappas: ndarray
184 | An array of k-kohler coefficients.
185 | weights: ndarray, optional
186 | An array of volumetric weights.
187 | diams: ndarray
188 | An array of diameters used to calculate the weights.
189 |
190 | Returns
191 | -------
192 | k_eff: float
193 | The effective k-kohler coefficient.
194 | """
195 | kappas = np.asarray(kappas)
196 |
197 | # if diams are present, compute their weights
198 | if diams is not None:
199 | diams = np.asarray(diams)
200 | weights = (diams**3) / (diams**3).sum()
201 |
202 | weights = np.asarray(weights)
203 |
204 | return (kappas * weights).sum()
205 |
206 |
207 | def ri_eff(species, weights=None):
208 | """Calculate the effective refractive index for an
209 | array of refractive indices and their respective weights.
210 | Alternatively, an array of diameters can be passed which
211 | will be used to calculate the volumetric weights.
212 |
213 | .. math::
214 |
215 | n_{eff}=\sum_{i=1}^{N} \\frac{V_i}{V_{total}}*n_i
216 |
217 | Parameters
218 | ----------
219 | species: ndarray
220 | An array of refractive indices.
221 | weights: ndarray, optional
222 | An array of volumetric weights.
223 |
224 | Returns
225 | -------
226 | ri_eff: float
227 | The effective refractive index.
228 | """
229 | species = np.asarray(species)
230 | weights = np.asarray(weights)
231 |
232 | # calculate the real and imag parts separately
233 | real = (species.real * weights).sum()
234 | imag = (species.imag * weights).sum()
235 |
236 | return complex(real, imag)
237 |
238 |
239 | def power_law_fit(diams, cscat, fit_kws={}):
240 | """Generate a power-law fit (linear in log-log space)
241 | between bin midpoints and the scattering cross-section.
242 |
243 | Parameters
244 | ----------
245 | diams: ndarray
246 | An array of diameters corresponding to the boundaries of each bin.
247 | cscat: ndarray
248 | An array of Cscat values corresponding to the boundaries of each bin.
249 | fit_kws: dict
250 | A dictionary of keyword arguments that is passed directly to scipy.curve_fit
251 | for the optimization. Please see the scipy.optimize.curve_fit docs for details.
252 |
253 | Returns
254 | -------
255 | rv: ndarray
256 | An array of fitted Cscat values for each bin boundary diameter.
257 |
258 | Examples
259 | --------
260 |
261 | """
262 | def f(dp, a, b):
263 | return a*np.power(dp, b)
264 |
265 | # fit the function
266 | popt, _ = curve_fit(f, diams, cscat, **fit_kws)
267 |
268 | return f(diams, *popt)
269 |
270 |
271 | def squash_dips(cscat_vals):
272 | """Remove any dips in an array by interpolating
273 | around those points. If there are no dips and all
274 | points are monotonically increasing, the same
275 | array should be returned.
276 |
277 | Parameters
278 | ----------
279 | cscat: ndarray
280 | An array of scattering cross-section values.
281 |
282 | Returns
283 | -------
284 | rv: ndarray
285 | An array of smoothed scattering cross-section values.
286 |
287 | """
288 | cpy = cscat_vals.copy()
289 |
290 | if (np.diff(cpy) < 0).any():
291 | idx = np.where(np.diff(cpy) < 0)[0]
292 |
293 | if idx.shape[0] > 0:
294 | for i in np.nditer(idx):
295 | cpy[i] = np.mean([cpy[i-1], cpy[i+1]])
296 |
297 | return cpy
298 |
--------------------------------------------------------------------------------
/src/opcsim/equations/pdf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | """
4 | This file contains all of the probability distribution functions
5 | """
6 | import math
7 | import numpy as np
8 |
9 | def dn_ddp(dp, n, gm, gsd):
10 | """Evaluate the number distribution as a lognormal PDF.
11 |
12 | The PDF of a lognormal distribution is calculated using equation 8.34
13 | from Seinfeld and Pandis.
14 |
15 | .. math::
16 |
17 | n_N(D_p)=\\frac{dN}{dD_p}=\\frac{N_t}{\sqrt{2π}D_p lnσ_g}exp\Big(-\\frac{(lnD_p - lnD̄_{pg})^2}{2ln^2σ_g}\Big)
18 |
19 | Parameters
20 | ----------
21 | dp : float or an array of floats
22 | Particle diameter in microns.
23 | n : float
24 | Total aerosol number concentration in units of #/cc
25 | gm : float
26 | Median particle diameter (geometric mean) in units of microns.
27 | gsd : float
28 | Geometric Standard Deviation of the distribution.
29 |
30 | Returns
31 | -------
32 | n(Dp) | float or an array of floats
33 | Returns the total number of particles at diameter dp in units of
34 | :math:`\mu m^{-1} cm^{-3}`.
35 |
36 | See Also
37 | --------
38 | opcsim.equations.pdf.dn_dlndp
39 | opcsim.equations.pdf.dn_dlogdp
40 | opcsim.equations.cdf.nt
41 |
42 | """
43 | res = (n / (np.sqrt(2*np.pi)*dp*np.log(gsd))) * \
44 | np.exp(-(np.log(dp) - np.log(gm))** 2 / (2*np.log(gsd)** 2))
45 |
46 | return res
47 |
48 | def dn_dlndp(dp, n, gm, gsd):
49 | """The PDF of a lognormal distribution as calculated using equation 8.33 by way
50 | of 8.21 from Seinfeld and Pandis.
51 |
52 | .. math::
53 |
54 | n_N^e(lnD_p)=\\frac{dN}{dlnD_p}=\\frac{N_t}{\sqrt{2π} lnσ_g}exp(-\\frac{(lnD_p - lnD̄_{pg})^2}{2ln^2σ_g})
55 |
56 | Parameters
57 | ----------
58 | dp : float or an array of floats
59 | Particle diameter in microns.
60 | n : float
61 | Total aerosol number concentration in units of #/cc
62 | gm : float
63 | Median particle diameter (geometric mean) in units of microns.
64 | gsd : float
65 | Geometric Standard Deviation of the distribution.
66 |
67 | Returns
68 | -------
69 | dn/dlndp | float
70 | Returns the total number of particles at diameter dp in units of
71 | [cm-3]
72 |
73 | See Also
74 | --------
75 | opcsim.equations.pdf.dn_ddp
76 | opcsim.equations.pdf.dn_dlogdp
77 | opcsim.equations.cdf.nt
78 |
79 | """
80 | return dp * dn_ddp(dp, n, gm, gsd)
81 |
82 | def dn_dlogdp(dp, n, gm, gsd):
83 | """The PDF of a lognormal distribution on a log10 basis as calculated using
84 | equation 8.18 from Seinfeld and Pandis.
85 |
86 | .. math::
87 |
88 | n_N^o(logD_p)=\\frac{dN}{dlogD_p}=ln(10)D_pn_N(D_p)
89 |
90 | Parameters
91 | ----------
92 | dp : float or array of floats
93 | Particle diameter in microns.
94 | n : float
95 | Total aerosol number concentration in units of #/cc
96 | gm : float
97 | Median particle diameter (geometric mean) in units of microns.
98 | gsd : float
99 | Geometric Standard Deviation of the distribution.
100 |
101 | Returns
102 | -------
103 | dn/dlogdp | float
104 | Returns the total number of particles at diameter dp in units of
105 | [cm-3]
106 |
107 | See Also
108 | --------
109 | opcsim.equations.pdf.dn_ddp
110 | opcsim.equations.pdf.dn_dlndp
111 | opcsim.equations.cdf.nt
112 |
113 | """
114 | return np.log(10) * dp * dn_ddp(dp, n, gm, gsd)
115 |
116 | def ds_ddp(dp, n, gm, gsd):
117 | """The surface-area weighted PDF of a lognormal distribution as calculated
118 | using equation 8.4 from Seinfeld and Pandis.
119 |
120 | .. math::
121 |
122 | n_s(D_p)=πD_p^2n_N(D_p)
123 |
124 | Parameters
125 | ----------
126 | dp : float or array of floats
127 | Particle diameter in microns.
128 | n : float
129 | Total aerosol number concentration in units of #/cc
130 | gm : float
131 | Median particle diameter (geometric mean) in units of microns.
132 | gsd : float
133 | Geometric Standard Deviation of the distribution.
134 |
135 | Returns
136 | -------
137 | ds/ddp | float
138 | Returns the surface area of particles at diameter dp in units of
139 | [um cm-3]
140 |
141 | See Also
142 | --------
143 | opcsim.equations.pdf.ds_dlndp
144 | opcsim.equations.pdf.ds_dlogdp
145 | opcsim.equations.cdf.st
146 |
147 | """
148 | return np.pi * dp ** 2 * dn_ddp(dp, n, gm, gsd)
149 |
150 | def ds_dlndp(dp, n, gm, gsd):
151 | """The surface-area weighted PDF of a lognormal distribution as calculated
152 | using equation 8.10 from Seinfeld and Pandis.
153 |
154 | .. math::
155 |
156 | n_s^e(ln D_p)=πD_p^2n_N^e(D_p)
157 |
158 | Parameters
159 | ----------
160 | dp : float or array of floats
161 | Particle diameter in microns.
162 | n : float
163 | Total aerosol number concentration in units of #/cc
164 | gm : float
165 | Median particle diameter (geometric mean) in units of microns.
166 | gsd : float
167 | Geometric Standard Deviation of the distribution.
168 |
169 | Returns
170 | -------
171 | ds/dlndp | float
172 | Returns the surface area of particles at diameter dp on a log basis
173 | in units of [um2 cm-3]
174 |
175 | See Also
176 | --------
177 | opcsim.equations.pdf.ds_ddp
178 | opcsim.equations.pdf.ds_dlogdp
179 | opcsim.equations.cdf.st
180 |
181 | """
182 | return np.pi * dp ** 2 * dn_dlndp(dp, n, gm, gsd)
183 |
184 | def ds_dlogdp(dp, n, gm, gsd):
185 | """The surface-area weighted PDF of a lognormal distribution as calculated
186 | using equation 8.19 from Seinfeld and Pandis.
187 |
188 | .. math::
189 |
190 | n_s^o(log D_p)=ln(10)D_p n_s(D_p)
191 |
192 | Parameters
193 | ----------
194 | dp : float or array of floats
195 | Particle diameter in microns.
196 | n : float
197 | Total aerosol number concentration in units of #/cc
198 | gm : float
199 | Median particle diameter (geometric mean) in units of microns.
200 | gsd : float
201 | Geometric Standard Deviation of the distribution.
202 |
203 | Returns
204 | -------
205 | ds/dlogdp | float
206 | Returns the surface area of particles at diameter dp on a log10 basis
207 | in units of [um2 cm-3]
208 |
209 | See Also
210 | --------
211 | opcsim.equations.pdf.ds_ddp
212 | opcsim.equations.pdf.ds_dlndp
213 | opcsim.equations.cdf.st
214 |
215 | """
216 | return np.log(10) * dp * ds_ddp(dp, n, gm, gsd)
217 |
218 | def dv_ddp(dp, n, gm, gsd):
219 | """The volume weighted PDF of a lognormal distribution as calculated
220 | using equation 8.6 from Seinfeld and Pandis.
221 |
222 | .. math::
223 |
224 | n_V(D_p)=\\frac{π}{6}D_p^3 n_N(D_p)
225 |
226 | Parameters
227 | ----------
228 | dp : float or array of floats
229 | Particle diameter in microns.
230 | n : float
231 | Total aerosol number concentration in units of #/cc
232 | gm : float
233 | Median particle diameter (geometric mean) in units of microns.
234 | gsd : float
235 | Geometric Standard Deviation of the distribution.
236 |
237 | Returns
238 | -------
239 | dv/ddp | float
240 | Returns the volume of particles at diameter dp
241 | in units of [um2 cm-3]
242 |
243 | See Also
244 | --------
245 | opcsim.equations.pdf.dv_dlndp
246 | opcsim.equations.pdf.dv_dlogdp
247 | opcsim.equations.cdf.vt
248 |
249 | """
250 | return (np.pi / 6.) * dp ** 3 * dn_ddp(dp, n, gm, gsd)
251 |
252 | def dv_dlndp(dp, n, gm, gsd):
253 | """The volume weighted PDF of a lognormal distribution as calculated
254 | using equation 8.10 from Seinfeld and Pandis.
255 |
256 | .. math::
257 |
258 | n_V^e(ln D_p)=\\frac{π}{6}D_p^3 n_N^e(ln D_p)
259 |
260 | Parameters
261 | ----------
262 | dp : float or array of floats
263 | Particle diameter in microns.
264 | n : float
265 | Total aerosol number concentration in units of #/cc
266 | gm : float
267 | Median particle diameter (geometric mean) in units of microns.
268 | gsd : float
269 | Geometric Standard Deviation of the distribution.
270 |
271 | Returns
272 | -------
273 | dV/dlndp | float
274 | Returns the volume of particles at diameter dp on a log basis
275 | in units of [um3 cm-3]
276 |
277 | See Also
278 | --------
279 | opcsim.equations.pdf.dv_ddp
280 | opcsim.equations.pdf.dv_dlogdp
281 | opcsim.equations.cdf.vt
282 |
283 | """
284 | return (np.pi / 6.) * dp ** 3 * dn_dlndp(dp, n, gm, gsd)
285 |
286 | def dv_dlogdp(dp, n, gm, gsd):
287 | """The volume weighted PDF of a lognormal distribution as calculated
288 | using equation 8.20 from Seinfeld and Pandis.
289 |
290 | .. math::
291 |
292 | n_V^o(log D_p)=log(10)*D_p n_V(D_p)
293 |
294 | Parameters
295 | ----------
296 | dp : float or array of floats
297 | Particle diameter in microns.
298 | n : float
299 | Total aerosol number concentration in units of #/cc
300 | gm : float
301 | Median particle diameter (geometric mean) in units of microns.
302 | gsd : float
303 | Geometric Standard Deviation of the distribution.
304 |
305 | Returns
306 | -------
307 | dV/dlogdp | float
308 | Returns the volume of particles at diameter dp on a log10 basis
309 | in units of [um3 cm-3]
310 |
311 | See Also
312 | --------
313 | opcsim.equations.pdf.dv_ddp
314 | opcsim.equations.pdf.dv_dlndp
315 | opcsim.equations.cdf.vt
316 |
317 | """
318 | return np.log(10) * dp * dv_ddp(dp, n, gm, gsd)
319 |
--------------------------------------------------------------------------------
/docs/tutorial/nephelometer.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "raw",
5 | "metadata": {},
6 | "source": [
7 | ".. _nephelometer_tutorial:\n"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "# Using OPCSIM to Simulate a Nephelometer\n",
15 | "\n",
16 | "This section of the tutorial will walk you through how we model Nephelometers, how you can build/model a Nephelometer, and how we can evaluate Nephelometers across a wide range of conditions using this tool."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {
23 | "execution": {
24 | "iopub.execute_input": "2024-10-04T01:15:57.641899Z",
25 | "iopub.status.busy": "2024-10-04T01:15:57.641664Z",
26 | "iopub.status.idle": "2024-10-04T01:15:58.388335Z",
27 | "shell.execute_reply": "2024-10-04T01:15:58.388022Z"
28 | }
29 | },
30 | "outputs": [],
31 | "source": [
32 | "# Make imports\n",
33 | "import opcsim\n",
34 | "import numpy as np\n",
35 | "import matplotlib.pyplot as plt\n",
36 | "import matplotlib.ticker as mticks\n",
37 | "import seaborn as sns\n",
38 | "\n",
39 | "%matplotlib inline\n",
40 | "\n",
41 | "# turn off warnings temporarily\n",
42 | "import warnings\n",
43 | "warnings.simplefilter('ignore')\n",
44 | "\n",
45 | "# Let's set some default seaborn settings\n",
46 | "sns.set(context='notebook', style='ticks', palette='dark', font_scale=1.75, \n",
47 | " rc={'figure.figsize': (12,6), **opcsim.plots.rc_log})"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "metadata": {},
53 | "source": [
54 | "## Nephelometer Representation\n",
55 | "\n",
56 | "In OPCSIM, we define a Nephelometer using two parameters: the wavelength of light used in the device and its viewing angle. Unlike photometers and some optical particle counters, most low-cost commercial nephelometers gather light across as wide a range of angles as possible. This minimizes some of the uncertainty associated with the Mie resonance and allows manufacturers to use cheap photo-detectors while still gathering enough signal to distinguish from noise.\n",
57 | "\n",
58 | "To build a Nephelometer, simply initialize using the `opcsim.Nephelometer` class:"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {
65 | "execution": {
66 | "iopub.execute_input": "2024-10-04T01:15:58.390546Z",
67 | "iopub.status.busy": "2024-10-04T01:15:58.390374Z",
68 | "iopub.status.idle": "2024-10-04T01:15:58.393269Z",
69 | "shell.execute_reply": "2024-10-04T01:15:58.393014Z"
70 | }
71 | },
72 | "outputs": [],
73 | "source": [
74 | "# init a nephelometer with a 658 nm laser, gathering light from between 7-173 degrees\n",
75 | "neph = opcsim.Nephelometer(wl=0.658, theta=(7., 173))\n",
76 | "\n",
77 | "neph"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "## Calibration\n",
85 | "\n",
86 | "Nephelometers gather the total scattered light from many anglees across an entire aerosol distribution. Typically, users of low-cost nephelometers co-locate their device with a reference device of higher (or known) quality and simply compare the output signal from the nephelometer to the integrated mass value (i.e. $PM_1$, $PM_{2.5}$, or $PM_{10}$) from the reference device. To keep things as simple and realistic as possible, we follow this approach. \n",
87 | "\n",
88 | "To calibrate a nephelometer in OPCSIM, you provide an aerosol distribution to the `calibrate` method - the actual mass values for $PM_1$, $PM_{2.5}$, and $PM_{10}$ are calculated exactly and the total scattered light is computed as well. The ratio between the total scattered light and each of the mass loadings are stored as calibration factors and are used again when evaluating previously unseen distributions.\n",
89 | "\n",
90 | "To calibrate our nephelometer above to a synthetic distribution of Ammonium Sulfate:"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {
97 | "execution": {
98 | "iopub.execute_input": "2024-10-04T01:15:58.413036Z",
99 | "iopub.status.busy": "2024-10-04T01:15:58.412908Z",
100 | "iopub.status.idle": "2024-10-04T01:15:58.904528Z",
101 | "shell.execute_reply": "2024-10-04T01:15:58.904191Z"
102 | }
103 | },
104 | "outputs": [],
105 | "source": [
106 | "d1 = opcsim.AerosolDistribution(\"AmmSulf\")\n",
107 | "\n",
108 | "d1.add_mode(n=1e4, gm=125e-3, gsd=1.5, refr=complex(1.521, 0), kappa=0.53, rho=1.77)\n",
109 | "\n",
110 | "# calibrate the nephelometer at 0% RH\n",
111 | "neph.calibrate(d1, rh=0.)"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {},
117 | "source": [
118 | "We can explore the calibration factors that were just determined - the units are a bit arbitrary, since we don't consider the intensity/power of the laser as we assume it is constant. Thus, these units are something like $cm^2/(\\mu g/ m^3)$"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {
125 | "execution": {
126 | "iopub.execute_input": "2024-10-04T01:15:58.906273Z",
127 | "iopub.status.busy": "2024-10-04T01:15:58.906189Z",
128 | "iopub.status.idle": "2024-10-04T01:15:58.908520Z",
129 | "shell.execute_reply": "2024-10-04T01:15:58.908261Z"
130 | }
131 | },
132 | "outputs": [],
133 | "source": [
134 | "neph.pm1_ratio"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "Similarly, we get ratio's for $PM_{2.5}$ and $PM_{10}$:"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {
148 | "execution": {
149 | "iopub.execute_input": "2024-10-04T01:15:58.910068Z",
150 | "iopub.status.busy": "2024-10-04T01:15:58.909971Z",
151 | "iopub.status.idle": "2024-10-04T01:15:58.912094Z",
152 | "shell.execute_reply": "2024-10-04T01:15:58.911811Z"
153 | }
154 | },
155 | "outputs": [],
156 | "source": [
157 | "neph.pm25_ratio"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "metadata": {
164 | "execution": {
165 | "iopub.execute_input": "2024-10-04T01:15:58.913483Z",
166 | "iopub.status.busy": "2024-10-04T01:15:58.913387Z",
167 | "iopub.status.idle": "2024-10-04T01:15:58.915444Z",
168 | "shell.execute_reply": "2024-10-04T01:15:58.915178Z"
169 | }
170 | },
171 | "outputs": [],
172 | "source": [
173 | "neph.pm10_ratio"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "metadata": {},
179 | "source": [
180 | "## Evaluating a Nephelometer for New Aerosol Distributions\n",
181 | "\n",
182 | "The entire point of this tool is to be able to simulate what would happen under different circumstances. To do so, we use the `evaluate` method, which takes an AerosolDistribution as an argument (as well as an optional relative humidity) and returns the total scattered light, $PM_1$, $PM_{2.5}$, and $PM_{10}$."
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {
189 | "execution": {
190 | "iopub.execute_input": "2024-10-04T01:15:58.916979Z",
191 | "iopub.status.busy": "2024-10-04T01:15:58.916882Z",
192 | "iopub.status.idle": "2024-10-04T01:15:59.512907Z",
193 | "shell.execute_reply": "2024-10-04T01:15:59.512608Z"
194 | }
195 | },
196 | "outputs": [],
197 | "source": [
198 | "# evaluate the same distribution we used to calibrate\n",
199 | "neph.evaluate(d1, rh=0.)"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "metadata": {
206 | "execution": {
207 | "iopub.execute_input": "2024-10-04T01:15:59.514520Z",
208 | "iopub.status.busy": "2024-10-04T01:15:59.514403Z",
209 | "iopub.status.idle": "2024-10-04T01:16:00.070104Z",
210 | "shell.execute_reply": "2024-10-04T01:16:00.069815Z"
211 | }
212 | },
213 | "outputs": [],
214 | "source": [
215 | "# evaluate the same distribution we used to calibrate, but at a higher RH\n",
216 | "neph.evaluate(d1, rh=85.0)"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "metadata": {},
222 | "source": [
223 | "What if we went ahead and tried to evaluate on a totally unseen distribution? Let's go ahead and evaluate on an **urban** distribution:"
224 | ]
225 | },
226 | {
227 | "cell_type": "code",
228 | "execution_count": null,
229 | "metadata": {
230 | "execution": {
231 | "iopub.execute_input": "2024-10-04T01:16:00.071778Z",
232 | "iopub.status.busy": "2024-10-04T01:16:00.071674Z",
233 | "iopub.status.idle": "2024-10-04T01:16:00.073936Z",
234 | "shell.execute_reply": "2024-10-04T01:16:00.073638Z"
235 | }
236 | },
237 | "outputs": [],
238 | "source": [
239 | "d2 = opcsim.load_distribution(\"urban\")\n",
240 | "\n",
241 | "d2"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "metadata": {},
247 | "source": [
248 | "First, let's determine the actual $PM_1$, $PM_{2.5}$, and $PM_{10}$ loadings for this distribution:"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {
255 | "execution": {
256 | "iopub.execute_input": "2024-10-04T01:16:00.075483Z",
257 | "iopub.status.busy": "2024-10-04T01:16:00.075384Z",
258 | "iopub.status.idle": "2024-10-04T01:16:00.077853Z",
259 | "shell.execute_reply": "2024-10-04T01:16:00.077554Z"
260 | }
261 | },
262 | "outputs": [],
263 | "source": [
264 | "print (\"PM1 = {:.2f} ug/m3\".format(d2.cdf(dmin=0., dmax=1., weight='mass', rho=1.65)))\n",
265 | "print (\"PM2.5 = {:.2f} ug/m3\".format(d2.cdf(dmin=0., dmax=2.5, weight='mass', rho=1.65)))\n",
266 | "print (\"PM10 = {:.2f} ug/m3\".format(d2.cdf(dmin=0., dmax=10., weight='mass', rho=1.65)))"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "Next, let's evaluate the Nephelometer:"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": null,
279 | "metadata": {
280 | "execution": {
281 | "iopub.execute_input": "2024-10-04T01:16:00.079411Z",
282 | "iopub.status.busy": "2024-10-04T01:16:00.079315Z",
283 | "iopub.status.idle": "2024-10-04T01:16:01.501202Z",
284 | "shell.execute_reply": "2024-10-04T01:16:01.500883Z"
285 | }
286 | },
287 | "outputs": [],
288 | "source": [
289 | "neph.evaluate(d2, rh=0.)"
290 | ]
291 | },
292 | {
293 | "cell_type": "markdown",
294 | "metadata": {},
295 | "source": [
296 | "So, we're off by about a factor of 2, in part due to differences in assumed density and in part due to the fact the urban distribution scatters less light per unit mass than our calibration aerosol."
297 | ]
298 | },
299 | {
300 | "cell_type": "code",
301 | "execution_count": null,
302 | "metadata": {},
303 | "outputs": [],
304 | "source": []
305 | }
306 | ],
307 | "metadata": {
308 | "kernelspec": {
309 | "display_name": "Python 3",
310 | "language": "python",
311 | "name": "python3"
312 | },
313 | "language_info": {
314 | "codemirror_mode": {
315 | "name": "ipython",
316 | "version": 3
317 | },
318 | "file_extension": ".py",
319 | "mimetype": "text/x-python",
320 | "name": "python",
321 | "nbconvert_exporter": "python",
322 | "pygments_lexer": "ipython3",
323 | "version": "3.10.6"
324 | }
325 | },
326 | "nbformat": 4,
327 | "nbformat_minor": 4
328 | }
329 |
--------------------------------------------------------------------------------
/docs/sphinxext/gallery_generator.py:
--------------------------------------------------------------------------------
1 | """
2 | Sphinx plugin to run example scripts and create a gallery page.
3 | Lightly modified from the mpld3 project.
4 | """
5 | from __future__ import division
6 | import os
7 | import os.path as op
8 | import re
9 | import glob
10 | import token
11 | import tokenize
12 | import shutil
13 |
14 | from seaborn.external import six
15 |
16 | from matplotlib import image
17 | import matplotlib.pyplot as plt
18 |
19 | import matplotlib
20 | matplotlib.use('Agg')
21 |
22 |
23 | if six.PY3:
24 | # Python 3 has no execfile
25 | def execfile(filename, globals=None, locals=None):
26 | with open(filename, "rb") as fp:
27 | six.exec_(compile(fp.read(), filename, 'exec'), globals, locals)
28 |
29 |
30 | RST_TEMPLATE = """
31 |
32 | .. _{sphinx_tag}:
33 |
34 | {docstring}
35 |
36 | .. image:: {img_file}
37 |
38 | **Python source code:** :download:`[download source: {fname}]<{fname}>`
39 |
40 | .. raw:: html
41 |
42 |
43 |
44 | .. literalinclude:: {fname}
45 | :lines: {end_line}-
46 |
47 | .. raw:: html
48 |
49 |
50 |
51 | """
52 |
53 |
54 | INDEX_TEMPLATE = """
55 |
56 | .. raw:: html
57 |
58 |
119 |
120 | .. _{sphinx_tag}:
121 |
122 | Example gallery
123 | ===============
124 |
125 | {toctree}
126 |
127 | {contents}
128 |
129 | .. raw:: html
130 |
131 |
132 | """
133 |
134 |
135 | def create_thumbnail(infile, thumbfile,
136 | width=275, height=275,
137 | cx=0.5, cy=0.5, border=4):
138 | baseout, extout = op.splitext(thumbfile)
139 |
140 | im = image.imread(infile)
141 | rows, cols = im.shape[:2]
142 | x0 = int(cx * cols - .5 * width)
143 | y0 = int(cy * rows - .5 * height)
144 | xslice = slice(x0, x0 + width)
145 | yslice = slice(y0, y0 + height)
146 | thumb = im[yslice, xslice]
147 | thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
148 | thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
149 |
150 | dpi = 100
151 | fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
152 |
153 | ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
154 | frameon=False, xticks=[], yticks=[])
155 | ax.imshow(thumb, aspect='auto', resample=True,
156 | interpolation='bilinear')
157 | fig.savefig(thumbfile, dpi=dpi)
158 | return fig
159 |
160 |
161 | def indent(s, N=4):
162 | """indent a string"""
163 | return s.replace('\n', '\n' + N * ' ')
164 |
165 |
166 | class ExampleGenerator(object):
167 | """Tools for generating an example page from a file"""
168 |
169 | def __init__(self, filename, target_dir):
170 | self.filename = filename
171 | self.target_dir = target_dir
172 | self.thumbloc = .5, .5
173 | self.extract_docstring()
174 | with open(filename, "r") as fid:
175 | self.filetext = fid.read()
176 |
177 | outfilename = op.join(target_dir, self.rstfilename)
178 |
179 | # Only actually run it if the output RST file doesn't
180 | # exist or it was modified less recently than the example
181 | if (not op.exists(outfilename)
182 | or (op.getmtime(outfilename) < op.getmtime(filename))):
183 |
184 | self.exec_file()
185 | else:
186 |
187 | print("skipping {0}".format(self.filename))
188 |
189 | @property
190 | def dirname(self):
191 | return op.split(self.filename)[0]
192 |
193 | @property
194 | def fname(self):
195 | return op.split(self.filename)[1]
196 |
197 | @property
198 | def modulename(self):
199 | return op.splitext(self.fname)[0]
200 |
201 | @property
202 | def pyfilename(self):
203 | return self.modulename + '.py'
204 |
205 | @property
206 | def rstfilename(self):
207 | return self.modulename + ".rst"
208 |
209 | @property
210 | def htmlfilename(self):
211 | return self.modulename + '.html'
212 |
213 | @property
214 | def pngfilename(self):
215 | pngfile = self.modulename + '.png'
216 | return "_images/" + pngfile
217 |
218 | @property
219 | def thumbfilename(self):
220 | pngfile = self.modulename + '_thumb.png'
221 | return pngfile
222 |
223 | @property
224 | def sphinxtag(self):
225 | return self.modulename
226 |
227 | @property
228 | def pagetitle(self):
229 | return self.docstring.strip().split('\n')[0].strip()
230 |
231 | @property
232 | def plotfunc(self):
233 | match = re.search(r"opcsim\.(.+plot)\(", self.filetext)
234 | if match:
235 | return match.group(1).split(".")[-1]
236 | match = re.search(r"opcsim\.(.+map)\(", self.filetext)
237 | if match:
238 | return match.group(1).split(".")[-1]
239 | match = re.search(r"opcsim\.(.+Grid)\(", self.filetext)
240 | if match:
241 | return match.group(1).split(".")[-1]
242 | return ""
243 |
244 | def extract_docstring(self):
245 | """ Extract a module-level docstring
246 | """
247 | lines = open(self.filename).readlines()
248 | start_row = 0
249 | if lines[0].startswith('#!'):
250 | lines.pop(0)
251 | start_row = 1
252 |
253 | docstring = ''
254 | first_par = ''
255 | line_iter = lines.__iter__()
256 | tokens = tokenize.generate_tokens(lambda: next(line_iter))
257 | for tok_type, tok_content, _, (erow, _), _ in tokens:
258 | tok_type = token.tok_name[tok_type]
259 | if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
260 | continue
261 | elif tok_type == 'STRING':
262 | docstring = eval(tok_content)
263 | # If the docstring is formatted with several paragraphs,
264 | # extract the first one:
265 | paragraphs = '\n'.join(line.rstrip()
266 | for line in docstring.split('\n')
267 | ).split('\n\n')
268 | if len(paragraphs) > 0:
269 | first_par = paragraphs[0]
270 | break
271 |
272 | thumbloc = None
273 | for i, line in enumerate(docstring.split("\n")):
274 | m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
275 | if m:
276 | thumbloc = float(m.group(1)), float(m.group(2))
277 | break
278 | if thumbloc is not None:
279 | self.thumbloc = thumbloc
280 | docstring = "\n".join([l for l in docstring.split("\n")
281 | if not l.startswith("_thumb")])
282 |
283 | self.docstring = docstring
284 | self.short_desc = first_par
285 | self.end_line = erow + 1 + start_row
286 |
287 | def exec_file(self):
288 | print("running {0}".format(self.filename))
289 |
290 | plt.close('all')
291 | my_globals = {'pl': plt,
292 | 'plt': plt}
293 | execfile(self.filename, my_globals)
294 |
295 | fig = plt.gcf()
296 | fig.canvas.draw()
297 | pngfile = op.join(self.target_dir, self.pngfilename)
298 | thumbfile = op.join("example_thumbs", self.thumbfilename)
299 | self.html = "
" % self.pngfilename
300 | fig.savefig(pngfile, dpi=75, bbox_inches="tight")
301 |
302 | cx, cy = self.thumbloc
303 | create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
304 |
305 | def toctree_entry(self):
306 | return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
307 |
308 | def contents_entry(self):
309 | return (".. raw:: html\n\n"
310 | " \n\n"
318 | "\n\n"
319 | "".format(self.htmlfilename,
320 | self.thumbfilename,
321 | self.plotfunc))
322 |
323 |
324 | def main(app):
325 | static_dir = op.join(app.builder.srcdir, '_static')
326 | target_dir = op.join(app.builder.srcdir, 'examples')
327 | image_dir = op.join(app.builder.srcdir, 'examples/_images')
328 | thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
329 | source_dir = op.abspath(op.join(app.builder.srcdir,
330 | '..', 'examples'))
331 | if not op.exists(static_dir):
332 | os.makedirs(static_dir)
333 |
334 | if not op.exists(target_dir):
335 | os.makedirs(target_dir)
336 |
337 | if not op.exists(image_dir):
338 | os.makedirs(image_dir)
339 |
340 | if not op.exists(thumb_dir):
341 | os.makedirs(thumb_dir)
342 |
343 | if not op.exists(source_dir):
344 | os.makedirs(source_dir)
345 |
346 | banner_data = []
347 |
348 | toctree = ("\n\n"
349 | ".. toctree::\n"
350 | " :hidden:\n\n")
351 | contents = "\n\n"
352 |
353 | # Write individual example files
354 | for filename in glob.glob(op.join(source_dir, "*.py")):
355 |
356 | ex = ExampleGenerator(filename, target_dir)
357 |
358 | banner_data.append({"title": ex.pagetitle,
359 | "url": op.join('examples', ex.htmlfilename),
360 | "thumb": op.join(ex.thumbfilename)})
361 | shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
362 | output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
363 | docstring=ex.docstring,
364 | end_line=ex.end_line,
365 | fname=ex.pyfilename,
366 | img_file=ex.pngfilename)
367 | with open(op.join(target_dir, ex.rstfilename), 'w') as f:
368 | f.write(output)
369 |
370 | toctree += ex.toctree_entry()
371 | contents += ex.contents_entry()
372 |
373 | if len(banner_data) < 10:
374 | banner_data = (4 * banner_data)[:10]
375 |
376 | # write index file
377 | index_file = op.join(target_dir, 'index.rst')
378 | with open(index_file, 'w') as index:
379 | index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
380 | toctree=toctree,
381 | contents=contents))
382 |
383 |
384 | def setup(app):
385 | app.connect('builder-inited', main)
386 |
--------------------------------------------------------------------------------
/src/opcsim/distributions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | """
4 | from .equations.pdf import *
5 | from .equations.cdf import *
6 | from .utils import k_kohler, rho_eff, RHO_H20
7 |
8 | DISTRIBUTION_DATA = {
9 | 'urban': [
10 | (7100, 0.0117, 0.232, "Mode I"),
11 | (6320, 0.0373, 0.250, "Mode II"),
12 | (960, 0.151, 0.204, "Mode III")
13 | ],
14 | 'marine': [
15 | (133, 0.008, 0.657, "Mode I"),
16 | (66.6, 0.266, 0.210, "Mode II"),
17 | (3.1, 0.58, 0.396, "Mode III")
18 | ],
19 | 'rural': [
20 | (6650, 0.015, 0.225, "Mode I"),
21 | (147, 0.054, 0.557, "Mode II"),
22 | (1990, 0.084, 0.266, "Mode III")
23 | ],
24 | 'remote continental': [
25 | (3200, 0.02, 0.161, "Mode I"),
26 | (2900, 0.116, 0.217, "Mode II"),
27 | (0.3, 1.8, 0.380, "Mode III")
28 | ],
29 | 'free troposphere': [
30 | (129, 0.007, 0.645, "Mode I"),
31 | (59.7, 0.250, 0.253, "Mode II"),
32 | (63.5, 0.52, 0.425, "Mode III")
33 | ],
34 | 'polar': [
35 | (21.7, 0.138, 0.245, "Mode I"),
36 | (0.186, 0.75, 0.300, "Mode II"),
37 | (3e-4, 8.6, 0.291, "Mode III")
38 | ],
39 | 'desert': [
40 | (726, 0.002, 0.247, "Mode I"),
41 | (114, 0.038, 0.770, "Mode II"),
42 | (0.178, 21.6, 0.438, "Mode III")
43 | ],
44 | }
45 |
46 | def _get_pdf_func(base, weight, dp, n, gm, gsd, rho=1.):
47 | """"""
48 | weight = weight.lower()
49 |
50 | if weight not in ['number', 'surface', 'volume', 'mass']:
51 | raise Exception("Invalid argument for weight: ['number', 'surface', 'volume', 'mass']")
52 |
53 | if base not in [None, 'none', 'log', 'log10']:
54 | raise Exception("Invalid argument for base: ['none', 'log', 'log10']")
55 |
56 | if base == 'none' or base == None:
57 | if weight == 'number':
58 | return dn_ddp(dp, n, gm, gsd)
59 | elif weight == 'surface':
60 | return ds_ddp(dp, n, gm, gsd)
61 | elif weight == 'volume':
62 | return dv_ddp(dp, n, gm, gsd)
63 | elif weight == 'mass':
64 | return dv_ddp(dp, n, gm, gsd) * rho * 1e-6
65 | elif base == 'log':
66 | if weight == 'number':
67 | return dn_dlndp(dp, n, gm, gsd)
68 | elif weight == 'surface':
69 | return ds_dlndp(dp, n, gm, gsd)
70 | elif weight == 'volume':
71 | return dv_dlndp(dp, n, gm, gsd)
72 | elif weight == 'mass':
73 | return dv_dlndp(dp, n, gm, gsd) * rho *1e-6
74 | elif base == 'log10':
75 | if weight == 'number':
76 | return dn_dlogdp(dp, n, gm, gsd)
77 | elif weight == 'surface':
78 | return ds_dlogdp(dp, n, gm, gsd)
79 | elif weight == 'volume':
80 | return dv_dlogdp(dp, n, gm, gsd)
81 | elif weight == 'mass':
82 | return dv_dlogdp(dp, n, gm, gsd) * rho * 1e-6
83 |
84 |
85 | def _get_cdf_func(n, gm, gsd, dmin=None, dmax=10., weight='number', rho=1.):
86 | """"""
87 | weight = weight.lower()
88 | if weight not in ['number', 'surface', 'volume', 'mass']:
89 | raise Exception("Invalid argument for weight: ['number', 'surface', 'volume', 'mass']")
90 |
91 | if weight == 'number':
92 | return nt(n, gm, gsd, dmin, dmax)
93 | elif weight == 'surface':
94 | return st(n, gm, gsd, dmin, dmax)
95 | elif weight == 'volume':
96 | return vt(n, gm, gsd, dmin, dmax)
97 | elif weight == 'mass':
98 | return vt(n, gm, gsd, dmin, dmax) * rho
99 |
100 |
101 | def load_distribution(label):
102 | """Load sample distributions as described by Seinfeld+Pandis Table 8.3.
103 |
104 | There are currently 7 options including: Urban, Marine, Rural, Remote
105 | continental, Free troposphere, Polar, and Desert.
106 |
107 | Parameters
108 | ----------
109 |
110 | label : {'Urban' | 'Marine' | 'Rural' | 'Remote Continental' | 'Free Troposphere' | 'Polar' | 'Desert'}
111 | Choose which sample distribution to load.
112 |
113 | Returns
114 | -------
115 | An instance of the AerosolDistribution class
116 |
117 | Examples
118 | --------
119 |
120 | >>> d = opcsim.load_distribution("Urban")
121 |
122 | """
123 | label = label.lower()
124 |
125 | if label not in DISTRIBUTION_DATA.keys():
126 | raise ValueError("Invalid label.")
127 |
128 | _tmp = AerosolDistribution(label)
129 |
130 | for each in DISTRIBUTION_DATA[label]:
131 | _tmp.add_mode(each[0], each[1], 10**each[2], each[3])
132 |
133 | return _tmp
134 |
135 |
136 | class AerosolDistribution(object):
137 | """Define an aerosol distribution.
138 |
139 | Assuming an aerosol distribution can be described as the sum of n lognormal
140 | modes, define as (Seinfeld and Pandis equation 8.54):
141 |
142 | .. math::
143 |
144 | n_N^o(logD_p)=Σ_{i=1}^{n}\\frac{N_i}{\sqrt{2π} logσ_i}exp(-\\frac{(logD_p - logD̄_{pi})^2}{2log^2σ_i})
145 |
146 | """
147 | def __init__(self, label=None):
148 | """Initialize an Aerosol Distribution.
149 |
150 | Parameters
151 | ----------
152 | label : string, optional
153 | Label the distribution
154 |
155 | Returns
156 | -------
157 | AerosolDistribution
158 | An instance of the AerosolDistribution class
159 |
160 | Examples
161 | --------
162 |
163 | >>> d = AerosolDistribution("Urban")
164 |
165 | """
166 | self.label = label
167 | self.modes = []
168 |
169 | def _get_mode(self, label):
170 | """Return the mode with label=`label`. """
171 | for mode in self.modes:
172 | if mode['label'].lower() == label.lower():
173 | return mode
174 |
175 | return None
176 |
177 | def add_mode(self, n, gm, gsd, label=None, kappa=0., rho=1., refr=complex(1.5, 0)):
178 | """Add a mode to the distribution as defined using N, GM, and GSD. Additionally,
179 | each mode has optical and chemical properties that can be set including: the k-kohler
180 | coefficient, the density, and the refractive index.
181 |
182 | Parameters
183 | ----------
184 | n : float
185 | Total number of particles (#/cc)
186 | gm : float
187 | Median particle diameter (Geometric Mean) in units of microns.
188 | gsd : float
189 | Geometric Standard Deviation
190 | label : string, optional
191 | Label for the mode
192 | kappa: float, optional
193 | The k-kohler coefficient to describe hygroscopic growth. Default is 0.
194 | rho: float: optional
195 | The particle density in units of g/cm3. Default is 1.
196 | refr: complex: optional
197 | The complex refractive index. Default is 1.5+0i. This should be the dry
198 | refractive index. If/when the distribution is evaluated at a non-zero RH,
199 | the refractive index will be adjusted based on it's water content at that
200 | RH using the defined kappa value.
201 |
202 | Examples
203 | --------
204 |
205 | Create a single-mode aerosol distribution with parameters N=7100,
206 | GM=11.7 nm, GSD=1.706. These are the sample parameters for the first
207 | mode of a typical Urban aerosol distribution as described in Seinfeld
208 | and Pandis (Table 8.3).
209 |
210 | >>> d = AerosolDistribution("Urban")
211 | >>> d.add_mode(n=7100, gm=0.0117, gsd=1.706, label="Mode 1")
212 |
213 | We can also go ahead and add the second and third modes if we so choose:
214 |
215 | >>> d.add_mode(n=6320, gm=0.0373, gsd=1.778, label="Mode 2")
216 | >>> d.add_mode(n=960, gm=0.151, gsd=1.599, label="Mode 3")
217 |
218 | """
219 | self.modes.append(
220 | {
221 | 'label':label if label else "Mode {}".format(len(self.modes)),
222 | 'N':n, 'GM':gm,
223 | 'GSD':gsd,
224 | "kappa": kappa,
225 | "rho": rho,
226 | "refr": refr
227 | }
228 | )
229 |
230 | def pdf(self, dp, base='log10', weight='number', mode=None, rh=0., rho=None):
231 | """Evaluate and return the probability distribution function at
232 | particle diameter `dp`.
233 |
234 | Using equation 8.54 from Seinfeld and Pandis, we can evaluate the
235 | probability distribution function for a multi-modal aerosol
236 | distribution by summing the individual pdf's. By default, the calculations
237 | are based on the dry diameter of the particle (assuming RH=0); however,
238 | you can adjust the relative humidity using the **rh** parameter which
239 | will calculate the size based on growth per k-kohler theory using the
240 | k-kohler coefficient set for each individual mode.
241 |
242 | Parameters
243 | ----------
244 | dp : float or an array of floats
245 | Particle diameter(s) to evaluate the pdf (um)
246 | base : {None | 'none' | 'log' | 'log10'}
247 | Base algorithm to use. Default is 'log10'
248 | weight : {'number' | 'surface' | 'volume' | 'mass'}
249 | Choose how to weight the pdf. Default is `number`.
250 | mode : string or None
251 | Choose to only evaluate the pdf for a single mode
252 | of the entire distribution. If set to `None`, the entire
253 | distribution will be evaluated.
254 | rh: float: optional
255 | The relative humidity as a percentage (0-100). Default is 0.
256 | rho: float: optional
257 | The particle density. If set, this will override the density set
258 | for individual modes.
259 |
260 | Returns
261 | -------
262 | float
263 | The evaluated pdf at particle diameter dp
264 |
265 | Examples
266 | --------
267 |
268 | Evaluate the PDF at 100 nm
269 |
270 | >>> d = opcsim.load_distribution("Urban")
271 | >>> d.pdf(0.1)
272 |
273 | Evaluate the PDF at a range of particle diameters
274 |
275 | >>> d.pdf(np.linespace(0.1, 1., 100))
276 |
277 | Evaluate the PDF for a volume-weighted distribution -> returns dVdlogDp
278 |
279 | >>> d.pdf(0.1, weight='volume')
280 |
281 | Evaluate the PDF for a volume-weighted distribution in ln-space
282 |
283 | >>> d.pdf(0.1, weight='volume', base='log')
284 |
285 | """
286 | value = 0.0
287 |
288 | if mode is not None:
289 | modes = [self._get_mode(mode)]
290 | else:
291 | modes = self.modes
292 |
293 | for mode in modes:
294 | # calculate the wet diameter of the particle based on k-kohler theory
295 | gm = k_kohler(diam_dry=mode['GM'], kappa=mode["kappa"], rh=rh)
296 |
297 | # override rho if set
298 | rho = rho if rho else mode["rho"]
299 |
300 | # calculate the effective density, taking into account hygroscopic growth
301 | rho = rho_eff([rho, RHO_H20], diams=[mode['GM'], gm - mode['GM']])
302 |
303 | value += _get_pdf_func(base, weight, dp, mode['N'], gm, mode['GSD'], rho)
304 |
305 | return value
306 |
307 | def cdf(self, dmax, dmin=None, weight='number', mode=None, rh=0., rho=None):
308 | """Evaluate and return the cumulative probability distribution function
309 | between `dmin` and `dmax`.
310 |
311 | Using equation _ from Seinfeld and Pandis, we can evaluate the cdf of a
312 | a multi-modal particle size distribution by summing the individual
313 | distributions. For example, evaluating the cdf over the entire size
314 | range will return the total number of particles in that size range. If
315 | weighted by surface area or volume, it will return the integrated
316 | surface area or volume respectively. By default, the calculations
317 | are based on the dry diameter of the particle (assuming RH=0); however,
318 | you can adjust the relative humidity using the **rh** parameter which
319 | will calculate the size based on growth per k-kohler theory using the
320 | k-kohler coefficient set for each individual mode.
321 |
322 | Parameters
323 | ----------
324 | dmin : float
325 | The minimum particle diameter in the integration (um)
326 | dmax : float
327 | The maximum particle diameter in the integration (um)
328 | weight : {'number' | 'surface' | 'volume' | 'mass'}
329 | Choose how to weight the pdf. Default is `number`
330 | mode : string or None
331 | Choose to only evaluate the pdf for a single mode
332 | of the entire distribution. If set to `None`, the entire
333 | distribution will be evaluated.
334 | rh: float: optional
335 | The relative humidity as a percentage (0-100). Default is 0.
336 | rho: float: optional
337 | The particle density. If set, this will override the density set
338 | for individual modes.
339 |
340 | Returns
341 | -------
342 | float
343 | The integrated distribution function representing the total number of
344 | {particles, surface area, volume} between dmin and dmax.
345 |
346 | Examples
347 | --------
348 |
349 | Evaluate the CDF between over the entire distribution:
350 |
351 | >>> d = opcsim.load_distribution("Urban")
352 | >>> d.cdf()
353 |
354 | Evaluate the CDF up to 100 nm
355 |
356 | >>> d.cdf(dmax=0.1)
357 |
358 | Evaluate the total volume/mass under 2.5 microns
359 |
360 | >>> d.cdf(dmax=2.5, weight='volume')
361 |
362 | """
363 | if dmin is not None:
364 | if not dmin < dmax:
365 | raise ValueError("dmin must be less than dmax")
366 |
367 | value = 0.0
368 |
369 | if mode is not None:
370 | modes = [self._get_mode(mode)]
371 | else:
372 | modes = self.modes
373 |
374 | for mode in modes:
375 | # calculate the wet diameter of the particle based on k-kohler theory
376 | gm = k_kohler(diam_dry=mode['GM'], kappa=mode["kappa"], rh=rh)
377 |
378 | # override rho if set
379 | rho = rho if rho else mode["rho"]
380 |
381 | # calculate the effective density, taking into account hygroscopic growth
382 | rho = rho_eff([rho, RHO_H20], diams=[mode['GM'], gm - mode['GM']])
383 |
384 | value += _get_cdf_func(mode['N'], gm, mode['GSD'], dmin, dmax, weight, rho)
385 |
386 | return value
387 |
388 | def __repr__(self):
389 | return "AerosolDistribution: {}".format(self.label)
390 |
--------------------------------------------------------------------------------
/src/opcsim/plots.py:
--------------------------------------------------------------------------------
1 | import seaborn as sns
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import matplotlib.ticker as mtick
5 | import itertools
6 |
7 | from .distributions import AerosolDistribution
8 | from .models import OPC
9 | from .mie import cscat
10 |
11 | lrg_number_fmt = mtick.ScalarFormatter()
12 | lrg_number_fmt.set_powerlimits((-3, 4))
13 |
14 | rc_log = {
15 | 'xtick.major.size': 10.0,
16 | 'xtick.minor.size': 6.0,
17 | 'ytick.major.size': 10.0,
18 | 'ytick.minor.size': 8.0,
19 | 'xtick.color': '0.1',
20 | 'ytick.color': '0.1',
21 | 'axes.linewidth': 1.75
22 | }
23 |
24 | YLABEL = {
25 | 'none': {
26 | 'number': "$dN/dD_p \; [\mu m^{-1} cm^{-3}]$",
27 | 'surface': "$dS/dD_p \; [\mu m cm^{-3}]$",
28 | 'volume': "$dV/dD_p \; [\mu m^2 cm^{-3}]$",
29 | 'mass': "$dM/dD_p$"
30 | },
31 | 'log': {
32 | 'number': "$dN/dlnD_p \; [# cm^{-3}]$",
33 | 'surface': "$dS/dlnD_p \; [\mu m^2 cm^{-3}]$",
34 | 'volume': "$dV/dlnD_p \; [\mu m^3 cm^{-3}]$",
35 | 'mass': "$dM/dlnD_p \; [\mu g m^{-3}]$"
36 | },
37 | 'log10': {
38 | 'number': "$dN/dlogD_p \; [cm^{-3}]$",
39 | 'surface': "$dS/dlogD_p \; [\mu m^2 cm^{-3}]$",
40 | 'volume': "$dV/dlogD_p \; [\mu m^3 cm^{-3}]$",
41 | 'mass': "$dM/dlogD_p \; [\mu g m^{-3}]$"
42 | }
43 | }
44 |
45 | YLABEL_CDF = {
46 | 'number': 'Total Number [$cm^{-3}$]',
47 | 'surface': 'Total Surface Area [$\mu m^2cm^{-3}$]',
48 | 'volume': 'Total Volume [$\mu m^3 cm^{-3}$]',
49 | 'mass': "Total Mass [$\mu g m^{-3}$]"
50 | }
51 |
52 | def histplot(data, bins, ax=None, plot_kws={}, fig_kws={}, **kwargs):
53 | """Plot the particle size distribution as a histogram/bar chart.
54 |
55 | Parameters
56 | ----------
57 | data : array of floats
58 | An array containing the y variable you are plotting (ex. dNdlogDp)
59 | bins : 3xn array
60 | A 3xn array containing the bin values for an OPC
61 | ax : matplotlib axis
62 | If an axis is provided, the histogram will be plotted on this axis.
63 | Otherwise, a new axis object will be created.
64 | plot_kws : dict
65 | Optional keyword arguments to include. They are sent as an argument to
66 | the matplotlib bar plot.
67 | fig_kws : dict
68 | Optional keyword arguments to include for the figure.
69 |
70 | Returns
71 | -------
72 | ax : matplotlib axis object
73 |
74 | Examples
75 | --------
76 |
77 | Plot a 10-bin OPC's response to the Urban Distribution
78 |
79 | .. plot::
80 | :context: close-figs
81 |
82 | >>> import opcsim, seaborn as sns
83 | >>> opc = opcsim.OPC(wl=0.658, n_bins=10, dmin=0.3)
84 | >>> opc.calibrate("psl")
85 | >>> d = opcsim.load_distribution("Urban")
86 | >>> ax = opcsim.plots.histplot(opc.evaluate(d), opc.bins)
87 | >>> ax.set_ylabel("$dN/dlogD_p$")
88 | >>> sns.despine()
89 |
90 | We can also plot the same OPC in volume (mass) space
91 |
92 | .. plot::
93 | :context: close-figs
94 |
95 | >>> ax = opcsim.plots.histplot(
96 | ... opc.evaluate(d, weight='volume'), opc.bins)
97 | >>> ax.set_ylabel("$dV/dlogD_p$")
98 | >>> sns.despine()
99 |
100 | How about overlaying two OPC's
101 |
102 | .. plot::
103 | :context: close-figs
104 |
105 | >>> opcb = opcsim.OPC(wl=0.658, n_bins=5, dmin=0.3)
106 | >>> opcb.calibrate("psl")
107 | >>> ax = opcsim.plots.histplot(opc.evaluate(d),
108 | ... opc.bins, label="10 bin OPC")
109 | >>> ax = opcsim.plots.histplot(opcb.evaluate(d), opcb.bins,
110 | ... label="5 bin OPC", ax=ax)
111 | >>> ax.set_ylabel("$dN/dlogD_p$")
112 | >>> ax.legend(loc='best')
113 | >>> sns.despine()
114 |
115 |
116 | What if we want to fill in the boxes?
117 |
118 | .. plot::
119 | :context: close-figs
120 |
121 | >>> plot_kws = dict(fill=True)
122 | >>> ax = opcsim.plots.histplot(opc.evaluate(d),
123 | ... opc.bins, plot_kws=plot_kws)
124 | >>> ax.set_ylabel("$dN/dlogD_p$")
125 | >>> sns.despine()
126 |
127 | """
128 | # Set the default figure kws
129 | default_fig_kws = dict()
130 | fig_kws = dict(default_fig_kws, **fig_kws)
131 |
132 | # Make a new axis object if one wasnt' set
133 | if ax is None:
134 | plt.figure(**fig_kws)
135 | ax = plt.gca()
136 |
137 | # Get the next color available in the palette
138 | nc = next(ax._get_lines.prop_cycler)['color']
139 |
140 | # Set the plot_kws as a mapping of default and kwargs
141 | default_plot_kws = dict(
142 | alpha=1,
143 | edgecolor=nc,
144 | color=nc,
145 | linewidth=5,
146 | fill=False,
147 | label=kwargs.pop('label', None))
148 |
149 | # Set the plot_kws
150 | plot_kws = dict(default_plot_kws, **plot_kws)
151 |
152 | # Plot the bar plot
153 | ax.bar(x=bins[:, 0], height=data, width=bins[:, -1] - bins[:, 0],
154 | align='edge', **plot_kws)
155 |
156 | # Set the xaxis to be log10
157 | ax.semilogx()
158 |
159 | # Set the xlabel
160 | ax.set_xlabel("$D_p \; [\mu m]$")
161 |
162 | ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
163 |
164 | return ax
165 |
166 |
167 | def pdfplot(distribution, ax=None, weight='number', base='log10', with_modes=False,
168 | fill=False, plot_kws={}, fig_kws={}, fill_kws={}, **kwargs):
169 | """Plot the PDF of an aerosol size distribution.
170 |
171 | Parameters
172 | ----------
173 | distribution : valid `AerosolDistribution`
174 | An aerosol distribution with the method `pdf` that can be evaluated at
175 | an array of particle diameters.
176 | weight : {'number' | 'surface' | 'volume' | 'mass'}
177 | Choose how to weight the pdf. Default is `number`.
178 | base : {'none' | 'log' | 'log10'}
179 | Base algorithm to use. Default is 'log10'.
180 | ax : matplotlib axis
181 | If an axis is provided, the histogram will be plotted on this axis.
182 | Otherwise, a new axis object will be created.
183 | with_modes : bool
184 | If true, all modes of a given distribution will be plotted along with
185 | their sum. If false, only the sum will be plotted.
186 | fill : bool
187 | If true, the area under the PDF will be filled. Cannot be used while
188 | plotting individual modes (with_modes=True).
189 | plot_kws : dict
190 | Optional keyword arguments to include. They are sent as an argument to
191 | the matplotlib plot call.
192 | fig_kws : dict
193 | Optional keyword arguments to include for the figure.
194 | fill_kws : dict
195 | Optional keyword arguments to include for the fill. Sent to
196 | matplotlib.axes.fill_between.
197 |
198 | Returns
199 | -------
200 | ax : matplotlib axis object
201 |
202 | Examples
203 | --------
204 |
205 | Plot the number-weighted Urban aerosol distribution
206 |
207 | .. plot::
208 | :context: close-figs
209 |
210 | >>> import opcsim, seaborn as sns
211 | >>> d = opcsim.load_distribution("Urban")
212 | >>> ax = opcsim.plots.pdfplot(d)
213 | >>> ax.set_title("Urban Aerosol Distribution", fontsize=16)
214 | >>> sns.despine()
215 |
216 | Let's take a look at the individual modes as well
217 |
218 | .. plot::
219 | :context: close-figs
220 |
221 | >>> ax = opcsim.plots.pdfplot(d, with_modes=True)
222 | >>> ax.set_title("Urban Aerosol Distribution", fontsize=16)
223 | >>> ax.legend(loc='best')
224 | >>> sns.despine()
225 |
226 | Let's plot the volume weighted version
227 |
228 | .. plot::
229 | :context: close-figs
230 |
231 | >>> ax = opcsim.plots.pdfplot(d, weight='volume', with_modes=True)
232 | >>> ax.set_title("Volume Weighted Urban Aerosol Distribution", fontsize=16)
233 | >>> ax.legend(loc='best')
234 | >>> sns.despine()
235 |
236 | Let's plot a few different distributions together
237 |
238 | .. plot::
239 | :context: close-figs
240 |
241 | >>> d2 = opcsim.load_distribution("Marine")
242 | >>> d3 = opcsim.load_distribution("Rural")
243 | >>> ax = opcsim.plots.pdfplot(d)
244 | >>> ax = opcsim.plots.pdfplot(d2, ax=ax)
245 | >>> ax = opcsim.plots.pdfplot(d3, ax=ax)
246 | >>> ax.set_title("Various Aerosol Distributions", fontsize=16)
247 | >>> ax.legend(loc='best')
248 | >>> sns.despine()
249 |
250 | """
251 | if not hasattr(distribution, 'pdf'):
252 | raise Exception("Invalid AerosolDistribution.")
253 |
254 | if weight not in ['number', 'surface', 'volume', 'mass']:
255 | raise ValueError("Invalid weight: ['number', 'surface', 'volume', 'mass']")
256 |
257 | # Set the default dp values to plot against
258 | dp = kwargs.pop('dp', np.logspace(-3, 1, 1000))
259 |
260 | # Get the current default color pallete
261 | cp = sns.color_palette()
262 | cc = map("C{}".format, itertools.cycle(range(len(cp))))
263 |
264 | # Set the default figure kws
265 | default_fig_kws = dict()
266 | fig_kws = dict(default_fig_kws, **fig_kws)
267 |
268 | # Make a new axis object if one wasnt' set
269 | if ax is None:
270 | plt.figure(**fig_kws)
271 | ax = plt.gca()
272 |
273 | # Plot the compete distribution
274 | nc = next(cc)
275 |
276 | # Set the plot_kws as a mapping of default and kwargs
277 | default_plot_kws = dict(
278 | alpha=1,
279 | linewidth=4,
280 | color=nc
281 | )
282 |
283 | # Set the plot_kws
284 | plot_kws = dict(default_plot_kws, **plot_kws)
285 |
286 | # Set the default fill_kws
287 | default_fill_kws = dict(color=plot_kws['color'])
288 | fill_kws = dict(default_fill_kws, **fill_kws)
289 |
290 | # If label kwarg is present, use -> otherwise use default
291 | label = kwargs.pop('label', distribution.label)
292 |
293 | # Get the data to plot
294 | data = distribution.pdf(dp, base=base, weight=weight, **kwargs)
295 |
296 | # If fill is selected, fill the gap, otherwise just plot a line
297 | ax.plot(dp, data, label=label, **plot_kws)
298 |
299 | if fill:
300 | ax.fill_between(dp, 0, data, label=label, **fill_kws)
301 |
302 | # Get data
303 | if with_modes is True:
304 | for m in distribution.modes:
305 | data = distribution.pdf(dp, base=base, weight=weight, mode=m['label'])
306 |
307 | # Pop off color from the plot_kws
308 | plot_kws['color'] = next(cc)
309 |
310 | ax.plot(dp, data, label=m['label'], ls='--', **plot_kws)
311 |
312 | if base is not ('none' or None):
313 | ax.semilogx()
314 | ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.4g"))
315 |
316 | ax.set_xlabel("$D_p \; [\mu m]$")
317 | ax.set_ylabel(YLABEL[base][weight])
318 |
319 | # Set the yaxis to show scientific notation when numbers > 1e4
320 | ax.yaxis.set_major_formatter(lrg_number_fmt)
321 |
322 | return ax
323 |
324 |
325 | def cdfplot(distribution, ax=None, weight='number', plot_kws={},
326 | fig_kws={}, **kwargs):
327 | """Plot the CDF of a particle size distribution.
328 |
329 | Parameters
330 | ----------
331 | distribution : valid `AerosolDistribution`
332 | An aerosol distribution with the method `pdf` that can be evaluated at
333 | an array of particle diameters.
334 | weight : {'number' | 'surface' | 'volume' | 'mass'}
335 | Choose how to weight the pdf. Default is `number`.
336 | ax : matplotlib axis
337 | If an axis is provided, the histogram will be plotted on this axis.
338 | Otherwise, a new axis object will be created.
339 | plot_kws : dict
340 | Optional keyword arguments to include. They are sent as an argument to
341 | the matplotlib bar plot.
342 | fig_kws : dict
343 | Optional keyword arguments to include for the figure.
344 |
345 | Returns
346 | -------
347 | ax : matplotlib axis object
348 |
349 | Examples
350 | --------
351 |
352 | Plot the number-weighted Urban aerosol distribution
353 |
354 | .. plot::
355 | :context: close-figs
356 |
357 | >>> import opcsim, seaborn as sns
358 | >>> d = opcsim.load_distribution("Urban")
359 | >>> ax = opcsim.plots.cdfplot(d)
360 | >>> ax.set_title("Urban Aerosol Distribution", fontsize=16)
361 | >>> sns.despine()
362 |
363 | Let's plot the volume weighted version
364 |
365 | .. plot::
366 | :context: close-figs
367 |
368 | >>> ax = opcsim.plots.cdfplot(d, weight='volume')
369 | >>> ax.set_title("Volume Weighted Urban Aerosol Distribution", fontsize=16)
370 | >>> ax.legend(loc='best')
371 | >>> sns.despine()
372 |
373 | """
374 | if not hasattr(distribution, 'cdf'):
375 | raise Exception("Invalid AerosolDistribution.")
376 |
377 | if weight not in ['number', 'surface', 'volume', 'mass']:
378 | raise ValueError("Invalid weight: ['number', 'surface', 'volume', 'mass']")
379 |
380 | # Set the default dp values to plot against
381 | dp = kwargs.pop('dp', np.logspace(-3, 1, 1000))
382 |
383 | # Set the default figure kws
384 | default_fig_kws = dict()
385 | fig_kws = dict(default_fig_kws, **fig_kws)
386 |
387 | # Make a new axis object if one wasnt' set
388 | if ax is None:
389 | plt.figure(**fig_kws)
390 | ax = plt.gca()
391 |
392 | # Set the plot_kws as a mapping of default and kwargs
393 | default_plot_kws = dict(
394 | alpha=1,
395 | linewidth=5
396 | )
397 |
398 | # Set the plot_kws
399 | plot_kws = dict(default_plot_kws, **plot_kws)
400 |
401 | # Plot the compete distribution
402 | cp = sns.color_palette()
403 | cc = map("C{}".format, itertools.cycle(range(len(cp))))
404 |
405 | nc = next(cc)
406 |
407 | # If label kwarg is present, use -> otherwise use default
408 | label = kwargs.pop('label', distribution.label)
409 |
410 | data = distribution.cdf(dp, weight=weight, **kwargs)
411 |
412 | ax.plot(dp, data, color=nc, label=label, **plot_kws)
413 |
414 | ax.semilogx()
415 | ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
416 |
417 | ax.set_xlabel("$D_p \; [\mu m]$")
418 | ax.set_ylabel(YLABEL_CDF[weight])
419 |
420 | # Set the yaxis to show scientific notation when numbers > 1e4
421 | ax.yaxis.set_major_formatter(lrg_number_fmt)
422 |
423 | return ax
424 |
425 |
426 | def calplot(opc, ax=None, plot_kws={}, fig_kws={}, **kwargs):
427 | """
428 | """
429 | # Make a new axis object if one wasnt' set
430 | if ax is None:
431 | plt.figure(**fig_kws)
432 | ax = plt.gca()
433 |
434 | xs = kwargs.pop("dp", np.logspace(-1.5, 1.5, 250))
435 |
436 | # Set the plot_kws as a mapping of default and kwargs
437 | default_plot_kws = dict(alpha=1, linewidth=3)
438 |
439 | # Set the plot_kws
440 | plot_kws = dict(default_plot_kws, **plot_kws)
441 |
442 | # compute the Cscat values
443 | yvals = np.array([cscat(x, wl=opc.wl, refr=opc.calibration_refr,
444 | theta1=opc.theta[0], theta2=opc.theta[1]) for x in xs])
445 |
446 | cp = sns.color_palette()
447 | cc = map("C{}".format, itertools.cycle(range(len(cp))))
448 | nc = next(cc)
449 |
450 | ax.plot(xs, yvals, color=nc, label="Mie", **plot_kws)
451 | ax.plot(opc.bin_boundaries, opc.calibration_vals, "o-",
452 | color=next(cc), label="Calibration")
453 |
454 | ax.semilogx()
455 | ax.semilogy()
456 | ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
457 | ax.set_xlabel("$D_p \; [\mu m]$")
458 | ax.set_ylabel("$C_{scat}\; [cm^2/particle]$")
459 | ax.legend(loc="upper left")
460 |
461 | return ax
462 |
463 |
464 | __all__ = [
465 | 'histplot',
466 | 'pdfplot',
467 | 'cdfplot',
468 | 'calplot'
469 | ]
470 |
--------------------------------------------------------------------------------
/docs/tutorial/aerosol_distributions.rst:
--------------------------------------------------------------------------------
1 | .. _distribution_tutorial:
2 |
3 |
4 | The Aerosol Distribution Tutorial
5 | =================================
6 |
7 | The following tutorial will show you how an aerosol distribution is
8 | represented in the opcsim model. You will learn how to use the sample
9 | datasets and how to create your own distribution from scratch.
10 | Additional visualization tools are also discussed.
11 |
12 | First, we import the python libraries we need and set the styles used
13 | for plotting throughout this tutorial.
14 |
15 | .. code:: ipython3
16 |
17 | # Make imports
18 | import opcsim
19 | import numpy as np
20 | import matplotlib.pyplot as plt
21 | import seaborn as sns
22 |
23 | %matplotlib inline
24 |
25 | # Set mathtext to display equations without italics
26 | plt.rcParams.update({'mathtext.default': 'regular'})
27 |
28 | # turn off warnings temporarily
29 | import warnings
30 | warnings.simplefilter('ignore')
31 |
32 | # Let's set some default seaborn settings
33 | sns.set(context='notebook', style='ticks', palette='dark',
34 | font_scale=1.75, rc={'figure.figsize': (10, 5)})
35 |
36 | The Aerosol Distribution
37 | ========================
38 |
39 | For the purpose of evaluating the performance of low-cost optical
40 | particle counters, we are going to assume that every aerosol
41 | distribution can be described as the sum of :math:`n` lognormal
42 | distributions (S+P 8.54). Thus, it follows that:
43 |
44 | .. math:: n_N^o(logD_p)=\sum_{i=1}^n \frac{N_i}{\sqrt{2\pi} * log\sigma_i}exp\Big(-\frac{(logD_p - logD_{pi})^2}{2log^2\sigma_i}\Big)
45 |
46 | where :math:`N_i` is the number concentration, :math:`D_{pi}` is the
47 | median particle diameter (Geometric Mean), and :math:`\sigma_i` is the
48 | standard deviation. Thus, we need :math:`3n` parameters to describe a
49 | entire aerosol distribution.
50 |
51 | Using the ``opcsim.AerosolDistribution`` class, we can build our own
52 | distributions by defining each aerosol mode as its own lognormal mode.
53 |
54 | Ex: Initialize an Aerosol Distribution with a single mode
55 | (:math:`N=1000`, :math:`D_{pg}=100\;nm`, :math:`\sigma=1.5`)
56 |
57 | .. code:: ipython3
58 |
59 | # Initialize a distribution
60 | sample = opcsim.AerosolDistribution()
61 |
62 | # Add a mode with N=1000, GM=0.1, GSD=1.5
63 | sample.add_mode(n=1000, gm=0.1, gsd=1.5, label="Mode I")
64 |
65 | Most aerosol distributions are composed of several lognormal modes.
66 | Table 8.3 in Seinfeld and Pandis (originally from Jaenicke (1993)) shows
67 | parameters for several model aerosol distributions. The urban aerosol
68 | distribution can be described as follows:
69 |
70 | ==== ========= ============== ===================
71 | Mode :math:`N` :math:`D_{pg}` :math:`log\sigma_i`
72 | ==== ========= ============== ===================
73 | I 7100 0.0117 0.232
74 | II 6320 0.0373 0.250
75 | III 960 0.151 0.204
76 | ==== ========= ============== ===================
77 |
78 | How would we go about building this distribution? We can add as many
79 | modes as we would like, following the same method we used above. Also,
80 | if you look at the API documentation for the
81 | ``opcsim.AerosolDistribution`` class, we see that you can add a label
82 | for the distribution as an argument upon initiation of the class
83 | instance.
84 |
85 | .. code:: ipython3
86 |
87 | # Initiate a new aerosol distribution which is names 'Urban'
88 | urban = opcsim.AerosolDistribution("Urban")
89 |
90 | # Individually add each mode
91 | urban.add_mode(7100, 0.0117, 10**0.232, "Mode I")
92 | urban.add_mode(6320, 0.0373, 10**0.25, "Mode II")
93 | urban.add_mode(960, 0.151, 10**0.204, "Mode III")
94 |
95 | To make things even easier, we have included the model distributions
96 | directly into the package which can be accessed via the
97 | ``opcsim.load_distribution`` function. To import the distribution,
98 | simply call ``opcsim.load_distribution`` and provide the name of the
99 | distribution you would like to use as an argument. Options include:
100 |
101 | - Urban
102 | - Marine
103 | - Rural
104 | - Remote Continental
105 | - Free Troposphere
106 | - Polar
107 | - Desert
108 |
109 | To read in the urban distribution, we would do the following:
110 |
111 | .. code:: ipython3
112 |
113 | urban = opcsim.load_distribution("Urban")
114 |
115 | Incorporating Optical and Chemical Properties of Aerosols
116 | ---------------------------------------------------------
117 |
118 | This library was created for evaluating particle sensors’ response to
119 | aerosol distributions. Thus, it is extremely important to consider the
120 | effects of aerosol optical properties as well. In addition to the size
121 | parameters described above, each mode has a few additional properties
122 | you can set:
123 |
124 | - **kappa**: the :math:`\kappa`-kohler coefficient for describing water
125 | uptake
126 | - **rho**: the particle density, :math:`\rho`
127 | - **refr**: the complex refractive index, :math:`n`
128 |
129 | Each of these is set at the individual mode level and has defaults of
130 | :math:`\kappa=0`, :math:`\rho=1`, and :math:`RI=1.5+0i`.
131 |
132 | Thus, if we wanted to create a distribution with a single mode of
133 | Ammonium Sulfate:
134 |
135 | .. code:: ipython3
136 |
137 | amm_sulf = opcsim.AerosolDistribution("Ammonium Sulfate")
138 |
139 | # add a single mode of ammonium sulfate
140 | amm_sulf.add_mode(n=1000, gm=0.08, gsd=1.5, label="mode_1",
141 | refr=(1.521+0j), rho=1.77, kappa=0.53)
142 |
143 | amm_sulf
144 |
145 |
146 |
147 |
148 | .. parsed-literal::
149 |
150 | AerosolDistribution: Ammonium Sulfate
151 |
152 |
153 |
154 | If you wanted, you could also add multiple modes with different optical
155 | properties. In addition, there are a few helper functions, found in the
156 | utilities, that make it simple to make homogeneous aerosol mixtures. At
157 | this point in time, the library does not support heterogeneous mixtures
158 | or core-shell models, though it may in the future if their is demand.
159 |
160 | Probability Distribution Function
161 | =================================
162 |
163 | Number Distribution
164 | -------------------
165 |
166 | Aerosol distributions are typically depicted using the probabliity
167 | distribution function (PDF). In Seinfeld and Pandis, they refer to it as
168 | the **Number Distribution Function**. When plotted in log-space
169 | (i.e. :math:`dN/dlogD_p`), the area under the curve is the aerosol
170 | number concentration.
171 |
172 | Mathematically, the PDF in number-space looks like the following:
173 |
174 | .. math:: n_N^o(logD_p)=\frac{dN}{dlogD_p}=\frac{N_t}{\sqrt{2\pi} \; log\sigma_g}exp\Big(-\frac{(logD_p - logD_{pg})^2}{2log^2\sigma_g}\Big)
175 |
176 | All three representations of the number distribution are available:
177 |
178 | - :math:`dN/dD_p`: ``opcsim.equations.pdf.dn_ddp``
179 | - :math:`dN/dlnD_p`: ``opcsim.equations.pdf.dn_dlndp``
180 | - :math:`dN/dlogD_p`: ``opcsim.equations.pdf.dn_dlogdp``
181 |
182 | While mathematically, representing the aerosol distribution in any base
183 | {log, log10, none} is equivilant, visually it is not. For this reason,
184 | when plotting, we use the log10-base so that the area under the curve
185 | represents the total aerosol number concentration. For example, if we
186 | were to plot all three bases on the same plot for the Urban
187 | distribution, we would get:
188 |
189 | .. code:: ipython3
190 |
191 | sample = opcsim.AerosolDistribution("Sample")
192 | sample.add_mode(1000, 0.8, 2)
193 |
194 | fig, ax = plt.subplots(1, figsize=(10, 6))
195 |
196 | ax = opcsim.plots.pdfplot(sample, ax=ax, weight='number', base='none', label='$n_N(D_p)$')
197 | ax = opcsim.plots.pdfplot(sample, ax=ax, weight='number', base='log', label='$n_N^e(lnD_p)$')
198 | ax = opcsim.plots.pdfplot(sample, ax=ax, weight='number', base='log10', label='$n_N^0(logD_p)$')
199 |
200 | ax.legend(loc='upper left')
201 | ax.set_ylim(0, None)
202 | ax.set_xlim(0.01, 10)
203 | sns.despine()
204 |
205 |
206 |
207 | .. image:: aerosol_distributions_files/aerosol_distributions_13_0.png
208 |
209 |
210 | Surface Area Distribution
211 | -------------------------
212 |
213 | It is also quite useful to look at the surface area and volume
214 | distributions. The surface area probability distribution can easily be
215 | obtained by relating to the number probability distribution in the
216 | following way:
217 |
218 | .. math:: n_S^o(logD_p)=\pi D_p^2 n_N^o(logD_p)=\frac{dS}{dlogD_p}=\frac{\pi D_p^2 N_t}{\sqrt{2\pi} \; log\sigma_g}exp\Big(-\frac{(logD_p - logD_{pg})^2}{2log^2\sigma_g}\Big)
219 |
220 | All three representations of the surface area distribution are
221 | available:
222 |
223 | - :math:`dS/dD_p`: ``opcsim.equations.pdf.ds_ddp``
224 | - :math:`dS/dlnD_p`: ``opcsim.equations.pdf.ds_dlndp``
225 | - :math:`dS/dlogD_p`: ``opcsim.equations.pdf.ds_dlogdp``
226 |
227 | Volume Distribution
228 | -------------------
229 |
230 | Likewise, for the volume distribution, we get:
231 |
232 | .. math:: n_V^o(logD_p)=\frac{\pi}{6} D_p^3 n_N^o(logD_p)=\frac{dV}{dlogD_p}=\frac{\pi D_p^3 N_t}{6\sqrt{2\pi} \; log\sigma_g}exp\Big(-\frac{(logD_p - logD_{pg})^2}{2log^2\sigma_g}\Big)
233 |
234 | All three representations of the volume distribution are available:
235 |
236 | - :math:`dV/dD_p`: ``opcsim.equations.pdf.dv_ddp``
237 | - :math:`dV/dlnD_p`: ``opcsim.equations.pdf.dv_dlndp``
238 | - :math:`dV/dlogD_p`: ``opcsim.equations.pdf.dv_dlogdp``
239 |
240 | Evaluating the PDF
241 | ~~~~~~~~~~~~~~~~~~
242 |
243 | ``opcsim`` provides the ``AerosolDistribution.pdf`` method to easily
244 | calculate the distribution at any particle diameter. The arguments of
245 | the function are the particle diameter (``dp``), the base (``none``,
246 | ``log``, or ``log10``), the weight (``number``, ``surface``, ``volume``,
247 | or ``mass``), and an optional ``mode`` parameter in case you would like
248 | to examine only one of the modes of the distribution at a time. The
249 | default arguments are set to be the most useful/common ones
250 | (i.e. ``weight='number'``, ``base='log10'``). If calculating the
251 | mass-weighted PDF, you can also provide an optional keyword argument
252 | ``rho``; the default value for particle density is :math:`1\;gcm^{-3}`.
253 |
254 | To calculate the number probability for the urban aerosol distribution
255 | at :math:`0.1 \; \mu m`, we can do the following:
256 |
257 | .. code:: ipython3
258 |
259 | urban.pdf(0.1)
260 |
261 |
262 |
263 |
264 | .. parsed-literal::
265 |
266 | 3606.2139576648124
267 |
268 |
269 |
270 | This gives us the number concentration probability at 1 micron in units
271 | of :math:`particles\;cm^{-3}`. We can also calculate a whole range of
272 | values by providing an array for the ``dp`` value:
273 |
274 | .. code:: ipython3
275 |
276 | urban.pdf(np.array([0.1, 0.2, 0.3]))
277 |
278 |
279 |
280 |
281 | .. parsed-literal::
282 |
283 | array([3606.21395766, 1712.82519467, 659.56432207])
284 |
285 |
286 |
287 | To calculate the volume-weighted PDF at some particle diameter
288 | (:math:`dV/dlogD_p`), we could do the following:
289 |
290 | .. code:: ipython3
291 |
292 | urban.pdf(0.1, weight='volume')
293 |
294 |
295 |
296 |
297 | .. parsed-literal::
298 |
299 | 1.8882092127787917
300 |
301 |
302 |
303 | This returns :math:`dV/dlogDp` at particle diameter
304 | :math:`D_p=0.1\;\mu m` in units of :math:`\mu m^3 cm^{-3}`.
305 |
306 | Visualizing the Distribution
307 | ============================
308 |
309 | Visualizing the PDF for an aerosol distribution is extremely helpful.
310 | The function ``opcsim.plots.pdfplot`` has been included to make this
311 | simple.
312 |
313 | To plot the pdf of an aerosol distribution, the only required input is
314 | the ``opcsim.AerosolDistribution`` object. The function returns a
315 | matplotlib axis object which makes it extremely easy to add to modify
316 | the plot using normal matplotlib syntax.
317 |
318 | Let’s plot the urban distribution we built earlier.
319 |
320 | .. code:: ipython3
321 |
322 | ax = opcsim.plots.pdfplot(urban)
323 |
324 | # Set the y-lim to start at 0
325 | ax.set_ylim(0, None)
326 |
327 | # Remove the right and top spines
328 | sns.despine();
329 |
330 |
331 |
332 | .. image:: aerosol_distributions_files/aerosol_distributions_23_0.png
333 |
334 |
335 | kwargs for the PDF Plot
336 | -----------------------
337 |
338 | We can also send a number of kwargs to the PDF plot to change its
339 | appearance. We can add ``plot_kws`` to the matplotlib plot call (things
340 | like linewidth, color, etc). We can add ``fig_kws`` which are sent when
341 | creating the figure (think figsize, etc). We can set ``fill_kws`` that
342 | are sent to the matplotlib fill_between call if and only if
343 | ``fill=True``.
344 |
345 | .. code:: ipython3
346 |
347 | ax = opcsim.plots.pdfplot(urban, fill=True, fill_kws=dict(alpha=.3), plot_kws=dict(linewidth=1))
348 |
349 | # Set the y-lim to start at 0
350 | ax.set_ylim(0, None)
351 |
352 | # Remove the right and top spines
353 | sns.despine();
354 |
355 |
356 |
357 | .. image:: aerosol_distributions_files/aerosol_distributions_25_0.png
358 |
359 |
360 | We can also go ahead and plot each individual mode along with the entire
361 | distribution using the ``with_modes`` argument:
362 |
363 | .. code:: ipython3
364 |
365 | ax = opcsim.plots.pdfplot(urban, with_modes=True)
366 |
367 | ax.legend(loc='best')
368 |
369 | # Set the y-lim to start at 0
370 | ax.set_ylim(0, None)
371 |
372 | # Remove the right and top spines
373 | sns.despine();
374 |
375 |
376 |
377 | .. image:: aerosol_distributions_files/aerosol_distributions_27_0.png
378 |
379 |
380 | Still staying in number space, we can go ahead and plot all of the
381 | available sample distributions to get a feel for just how different they
382 | are!
383 |
384 | .. code:: ipython3
385 |
386 | fig, ax = plt.subplots(1, figsize=(12,6))
387 |
388 | # Iterate over every sample in the library
389 | for i, sample in enumerate(opcsim.distributions.DISTRIBUTION_DATA.keys()):
390 | # Load the sample dataset
391 | _sample = opcsim.load_distribution(sample)
392 |
393 | # if we've used more colors than we have available in this palette, change the linestyle
394 | ls = '-' if i < 6 else '--'
395 |
396 | opcsim.plots.pdfplot(_sample, ax=ax, plot_kws={'linestyle': ls}, dp=np.logspace(-4, 0, 1000))
397 |
398 | # Add a legend
399 | ax.legend(loc='upper left')
400 |
401 | # Set the y-lim
402 | ax.set_ylim(0, None)
403 |
404 | # remove the spine
405 | sns.despine()
406 |
407 |
408 |
409 | .. image:: aerosol_distributions_files/aerosol_distributions_29_0.png
410 |
411 |
412 | Finally, we can also go ahead and look at one distribution in number,
413 | surface area, and volume weighted views:
414 |
415 | .. code:: ipython3
416 |
417 | fig, ax = plt.subplots(3, figsize=(10,9), sharex=True)
418 |
419 | opcsim.plots.pdfplot(urban, weight='number', ax=ax[0])
420 | opcsim.plots.pdfplot(urban, weight='surface', ax=ax[1])
421 | opcsim.plots.pdfplot(urban, weight='volume', ax=ax[2])
422 |
423 | fig.subplots_adjust(hspace=0)
424 |
425 | ax[0].set_ylabel("Number")
426 | ax[1].set_ylabel("Surface Area")
427 | ax[2].set_ylabel("Volume")
428 |
429 | sns.despine()
430 |
431 | plt.tight_layout(h_pad=0)
432 | plt.show()
433 |
434 |
435 |
436 | .. image:: aerosol_distributions_files/aerosol_distributions_31_0.png
437 |
438 |
439 | Visualizing the Effects of Relative Humidity on Particle Growth
440 | ---------------------------------------------------------------
441 |
442 | If we define a distribution that has a non-zero kappa value, we can
443 | visualize changes in particle size due to water uptake as follows:
444 |
445 | .. code:: ipython3
446 |
447 | amm_sulf = opcsim.AerosolDistribution("Ammonium Sulfate")
448 |
449 | # add a single mode of ammonium sulfate
450 | amm_sulf.add_mode(n=1000, gm=0.08, gsd=1.5, label="mode_1",
451 | refr=complex(1.521, 0), rho=1.77, kappa=0.53)
452 |
453 | # set up a range of rh's to evaluate and plot
454 | rh = np.linspace(5, 95, 10)
455 |
456 | # define a color palette with the right number of colors
457 | cpal = sns.color_palette("GnBu_d", len(rh))
458 |
459 | # set up a figure
460 | fig, ax = plt.subplots(1, figsize=(8, 6))
461 |
462 | # iterate over each rh and plot
463 | for i, each in enumerate(rh):
464 | ax = opcsim.plots.pdfplot(amm_sulf, rh=each, plot_kws=dict(color=cpal[i]),
465 | ax=ax, weight='volume', label="RH={:.0f}%".format(each))
466 |
467 | # tidy up
468 | ax.set_ylim(0, None)
469 | ax.legend(bbox_to_anchor=(1.01, 1))
470 | sns.despine()
471 |
472 |
473 |
474 | .. image:: aerosol_distributions_files/aerosol_distributions_33_0.png
475 |
476 |
477 | Cumulative Distribution Function
478 | ================================
479 |
480 | We can easily obtain the integrated value for number of particles, total
481 | surface area, total volume, or total mass by integrating the correct
482 | CDF.
483 |
484 | Number CDF
485 | ----------
486 |
487 | The total number of particles between two particle diameters can be
488 | found by completing the following integration
489 |
490 | .. math:: N_t=\int_{D_{min}}^{D_{max}}n_N(D_p)dD_p
491 |
492 | Surface Area CDF
493 | ----------------
494 |
495 | We can find the total particle surface area between two diameters using
496 | the following integral:
497 |
498 | .. math:: S_t=\pi \int_{D_{min}}^{D_{max}}D_p^2 n_N(D_p)dD_p
499 |
500 | Volume CDF
501 | ----------
502 |
503 | We can find the total particle volume between two diameters using the
504 | following integral:
505 |
506 | .. math:: V_t=\frac{\pi}{6} \int_{D_{min}}^{D_{max}}D_p^3 n_N(D_p)dD_p
507 |
508 | To evaluate the CDF, we use the ``opcsim.AerosolDistribution.cdf``
509 | method. For example, to evaluate the number of particles with diameter
510 | less than :math:`D_p=2.5\;\mu m`, we do the following:
511 |
512 | .. code:: ipython3
513 |
514 | urban.cdf(dmax=2.5)
515 |
516 |
517 |
518 |
519 | .. parsed-literal::
520 |
521 | 14379.999998896783
522 |
523 |
524 |
525 | If we want to calculate the total number of particles within some size
526 | range, we can add the ``dmin`` argument. For example, let’s find the
527 | total number of particles between 1 and 2.5 microns:
528 |
529 | .. code:: ipython3
530 |
531 | urban.cdf(dmin=1, dmax=2.5)
532 |
533 |
534 |
535 |
536 | .. parsed-literal::
537 |
538 | 0.027425959342281203
539 |
540 |
541 |
542 | What about the total mass of particles less than :math:`D_p=1 \; \mu m`?
543 | (i.e. :math:`PM_1`)
544 |
545 | .. code:: ipython3
546 |
547 | urban.cdf(dmax=1, weight='volume')
548 |
549 |
550 |
551 |
552 | .. parsed-literal::
553 |
554 | 5.434530991819962
555 |
556 |
557 |
558 | Last, how about the total mass of particles in the Urban distribution if
559 | we set the particle density :math:`\rho=1.65\;gcm^{-3}`:
560 |
561 | .. code:: ipython3
562 |
563 | urban.cdf(dmax=10, weight='mass', rho=1.65)
564 |
565 |
566 |
567 |
568 | .. parsed-literal::
569 |
570 | 9.001358556308155
571 |
572 |
573 |
574 | Although we wouldn’t normally plot the CDF, we easily can to visualize
575 | where most of the [number, surface area, mass] is within the
576 | distribution using the ``opcsim.plots.cdfplot`` function:
577 |
578 | .. code:: ipython3
579 |
580 | ax = opcsim.plots.cdfplot(urban)
581 |
582 | ax.set_ylim(0, None)
583 | sns.despine()
584 |
585 |
586 |
587 | .. image:: aerosol_distributions_files/aerosol_distributions_43_0.png
588 |
589 |
590 | Lastly, we can plot the total volume CDF to get an idea of where the
591 | mass is distributed:
592 |
593 | .. code:: ipython3
594 |
595 | ax = opcsim.plots.cdfplot(urban, weight='mass', rho=1.65)
596 |
597 | ax.set_ylim(0, None)
598 | sns.despine()
599 |
600 |
601 |
602 | .. image:: aerosol_distributions_files/aerosol_distributions_45_0.png
603 |
604 |
605 |
--------------------------------------------------------------------------------
/src/opcsim/models.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import math
4 | import scipy
5 |
6 | from .distributions import AerosolDistribution
7 | from .utils import make_bins, midpoints, squash_dips, power_law_fit, \
8 | ri_eff, rho_eff, k_kohler
9 | from .mie import cscat
10 | import functools
11 |
12 | RI_COMMON = {
13 | "psl": complex(1.59, 0),
14 | "ammonium_sulfate": complex(1.521, 0),
15 | "sodium_chloride": complex(1.5405, 0),
16 | "sodium_nitrate": complex(1.448, 0),
17 | "black_carbon": complex(1.95, 0.79),
18 | "sulfuric_acid": complex(1.427, 0),
19 | "soa": complex(1.4, 0.002),
20 | "h2o": complex(1.333, 0.),
21 | "urban_low": complex(1.6, 0.034),
22 | "urban_high": complex(1.73, 0.086)
23 | }
24 |
25 |
26 | class OPC(object):
27 | """Define an Optical Particle Counter (OPC) with unique properties
28 | for wavelength, bins, and viewing angle.
29 | """
30 | def __init__(self, wl, bins=None, n_bins=5, dmin=0.5,
31 | dmax=2.5, theta=(30., 90.), **kwargs):
32 | """
33 |
34 | Parameters
35 | ----------
36 | wl: float
37 | The laser wavelength in units of microns.
38 | bins: 3xn array
39 | An array of bin diameters containing the (left boundary, midpoint, right boundary).
40 | n_bins: int
41 | The number of desired bins. This should be used with dmin and dmax to generate
42 | a 3xn array of bins.
43 | dmin: float
44 | The left-most bin boundary of the OPC.
45 | dmax: float
46 | The right-most bin boundary of the OPC.
47 | theta: tuple of floats
48 | The viewing range in units of degrees.
49 |
50 | Returns
51 | -------
52 | OPC:
53 | An instance of the OPC class.
54 |
55 | Examples
56 | --------
57 |
58 | Initialize an OPC with 5 bins between 0.5 - 2.5 microns.
59 |
60 | >>> opc = opcsim.OPC(wl=0.658, n_bins=5, dmin=0.5, dmax=2.5, theta=(30., 90.))
61 |
62 | Initialize an OPC with known bins as defined by its bin boundaries.
63 |
64 | >>> opc = opcsim.OPC(wl=0.658, bins=[0.38, 0.54, 0.78, 1.05, 1.5, 2.5], theta=(32., 88.))
65 |
66 | """
67 | # set some params
68 | self.n_bins = n_bins
69 | self.dmin = dmin
70 | self.dmax = dmax
71 | self.bins = bins
72 | self.wl = wl
73 | self.theta = theta
74 | self.label = kwargs.pop("label", None)
75 | self.calibration_function = None
76 | self.calibration_refr = None
77 | self.calibration_vals = None
78 | self._cscat_boundaries = None
79 |
80 | # generate the bins
81 | if self.bins is None:
82 | self.bins = make_bins(self.dmin, self.dmax, self.n_bins)
83 | else:
84 | self.bins = np.asarray(self.bins)
85 |
86 | if self.bins.ndim == 1:
87 | self.bins = midpoints(np.array([self.bins[0:-1], self.bins[1:]]).T)
88 |
89 | # reset a few fields
90 | self.n_bins = self.bins.shape[0]
91 | self.dmin = self.bins[0, 0]
92 | self.dmax = self.bins[-1, -1]
93 |
94 | self.bin_boundaries = np.append(self.bins[:, 0], self.bins[-1, -1])
95 |
96 | return
97 |
98 | @property
99 | def dlogdp(self):
100 | """Log-weighted bin width in microns"""
101 | return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])
102 |
103 | @property
104 | def ddp(self):
105 | """Bin width in microns"""
106 | return self.bins[:, -1] - self.bins[:, 0]
107 |
108 | @property
109 | def midpoints(self):
110 | """Bin midpoints in microns"""
111 | return self.bins[:, 1]
112 |
113 | def calibrate(self, material, method='spline', mie_kws={}, fit_kws={}):
114 | """Calibrate the OPC assuming a specific material.
115 |
116 | By calibration, we mean a method used to relate the peak height,
117 | which is related to the scattering cross-section (Cscat) to the
118 | particle size. At its simplest, we use this function to determine
119 | which bin a particle belongs to based on its Cscat value. Once
120 | calibrated, a calibration function is saved as a digitizer which
121 | will take as an input a Cscat value and return the bin number of
122 | the OPC it belongs to.
123 |
124 | Parameters
125 | ----------
126 | material: string or complex number
127 | Either a string containing the material available in the
128 | lookup table, or the complex refractive index. The option for
129 | lookup values are: ['ammonium_sulfate', 'bc', 'psl']. Since
130 | the refractive index is wavelength dependant, it is
131 | recommended you define the refractive index at your OPC's
132 | wavelength if you are looking for the best results.
133 | method: string or callable
134 | The method to use for creating a calibration curve. Options
135 | include (1) 'spline' which removes any non-monotonicly increasing
136 | points from the Cscat to Dp curve; or (2) 'linear' fits a linear model
137 | (in log-log space) between Cscat and Dp.
138 | mie_kws: dict
139 | Optional dictionary containing keyword arguments that is sent
140 | directly to opcsim.mie.cscat when computing the scattering
141 | cross section values used in the optimization.
142 | fit_kws: dict
143 | Optional dictionary containing keyword arguments that is
144 | sent directly to scipy.optimize.curve_fit when generating
145 | a fit using the 'fit_linear' approach. Please see the
146 | scipy.optimize.curve_fit docs for more details.
147 |
148 | Examples
149 | --------
150 |
151 | Calibrate an OPC using PSL's
152 |
153 | >>> opc = opcsim.OPC(n_bins=5)
154 | >>> opc.calibrate(material="psl", method="spline")
155 |
156 | Calibrate an OPC using a custom material
157 |
158 | >>> opc = opcsim.OPC(n_bins=5)
159 | >>> opc.calibrate(material=complex(1.9, 0.4), method="spline")
160 |
161 | Calibrate an OPC where the calibration curve is a fitted line (PSL's)
162 |
163 | >>> opc = opcsim.OPC(n_bins=5)
164 | >>> opc.calibrate(material="psl", method="linear")
165 |
166 | """
167 | # determine the complex refractive index
168 | if type(material) == str:
169 | try:
170 | refr = RI_COMMON[material.lower()]
171 | except:
172 | raise ValueError("This material does not exist in our (limited) database.")
173 | else:
174 | if type(material) != complex:
175 | refr = complex(material, 0)
176 | else:
177 | refr = material
178 |
179 | # calculate Cscat at all bin boundaries
180 | yvals = np.array([cscat(dp=dp, wl=self.wl, refr=refr, theta1=self.theta[0],
181 | theta2=self.theta[1], **mie_kws) for dp in self.bin_boundaries])
182 |
183 | # generate the fitted Cscat values based on the method chosen
184 | if method == "spline":
185 | yvals = squash_dips(yvals)
186 | elif method == "linear":
187 | # define the function we fit dp to Cscat
188 | def f(dp, a, b):
189 | return a * np.power(dp, b)
190 |
191 | # if log-weight, set sigma to weight in log-units
192 | fit_kws["sigma"] = np.power(10, np.log10(yvals) + 1)
193 |
194 | # fit the data with optional kwargs
195 | popt, _ = scipy.optimize.curve_fit(f, self.bin_boundaries, yvals, **fit_kws)
196 |
197 | # set the yvals to the fitted values
198 | yvals = f(self.bin_boundaries, *popt)
199 | elif method == "piecewise":
200 | # define the function to fit
201 | def f(x, x0, a1, a2, b1, b2):
202 | return np.piecewise(x, [x < x0], [
203 | lambda x: a1*np.power(x, b1),
204 | lambda x: a2*np.power(x, b2)
205 | ])
206 |
207 | # if log-weight, set sigma
208 | fit_kws["sigma"] = np.power(10, np.log10(yvals) + 1)
209 |
210 | # fit the data with optional kwargs
211 | popt, _ = scipy.optimize.curve_fit(f, self.bin_boundaries, yvals, **fit_kws)
212 |
213 | # set the yvals to the fitted vals
214 | yvals = f(self.bin_boundaries, *popt)
215 | else:
216 | raise ValueError("The `method` chosen is not currently supported.")
217 |
218 | # generate the digitizer and set as the calibration function
219 | # the calibration function must take just one argument (cscat(s)) and return
220 | # a bin number(s)
221 | self.calibration_function = functools.partial(self._digitize_opc_bins, cscat_boundaries=yvals)
222 | self.calibration_refr = refr
223 | self.calibration_vals = yvals
224 |
225 | # save the fitted boundaries for potential future use
226 | self._cscat_boundaries = yvals
227 |
228 | def evaluate(self, distribution, rh=0., **kwargs):
229 | """Return the total number of particles in each bin for a given AerosolDistribution.
230 |
231 | We evaluate an OPC for a given distribution by calculating the Cscat value
232 | for every particle in the distribution and assigning it to a bin of the OPC.
233 | Since we are beginning with a PDF and not a distribution, we must first
234 | discretize our PDF - we can choose the number of bins to convert of PDF
235 | into which allows us to limit the computation needed by only performing
236 | calculations for a small subset of the actual particles (i.e. we can do one
237 | calculation for a tiny bin and then replicate it without needing to re-do the
238 | Mie calculations).
239 |
240 | Parameters
241 | ----------
242 | distribution: AerosolDistribution
243 | A valid instance of the AerosolDistribution class.
244 | rh: float
245 | The relative humidity in % (0-100)
246 |
247 | Returns
248 | -------
249 | dN: array
250 | The number of particles in each OPC bin (size is the number of bins)
251 |
252 | Examples
253 | --------
254 |
255 | Evaluate an OPC for the Urban distribution
256 |
257 | >>> opc = opcsim.OPC(wl=0.658, n_bins=5)
258 | >>> opc.calibrate(material="psl")
259 | >>> d = opcsim.load_distribution("urban")
260 | >>> vals = opc.evaluate(d, rh=0.)
261 |
262 | Evaluate a distribution of Ammonium Sulfate at various RH's
263 |
264 | >>> opc = opcsim.OPC(wl=0.658, n_bins=5)
265 | >>> d = opcsim.AerosolDistribution()
266 | >>> d.add_mode(n=1000, gm=500e-3, gsd=1.5, kappa=0.53, refr=complex(1.521, 0), rho=1.77)
267 | >>> vals_0 = opc.evaluate(d, rh=0.)
268 | >>> vals_50 = opc.evaluate(d, rh=50.)
269 | >>> als_100 = opc.evaluate(d, rh=100.)
270 |
271 | """
272 | if not self.calibration_function:
273 | raise Exception("The OPC must be calibrated before computing a histogram.")
274 |
275 | # create the bins for the distribution PDF
276 | bounds = kwargs.pop("bounds", (self.dmin / 2, 10.))
277 |
278 | # calculate the total number of particles in the distribution
279 | ntot = distribution.cdf(dmin=0, dmax=100., weight='number')
280 |
281 | bounds = np.logspace(start=np.log10(bounds[0]), stop=np.log10(
282 | bounds[1]), num=kwargs.pop("n_bins", int(min(ntot/3, 250))))
283 |
284 | # we need to calculate the midpoint of the new bounds to compute the cscats!
285 | diams = np.mean([bounds[:-1], bounds[1:]], axis=0)
286 |
287 | # for each mode...
288 | rv = np.zeros(self.n_bins)
289 |
290 | for m in distribution.modes:
291 | # divide our PDF into bins to make the computations a bit easier
292 | n = np.array([distribution.cdf(dmin=a, dmax=b, mode=m["label"], rh=rh)
293 | for a, b in zip(bounds[0:-1], bounds[1:])])
294 |
295 | # calculate the % dry based on hygroscopic growth
296 | pct_dry = 1. / (k_kohler(diam_dry=1., kappa=m["kappa"], rh=rh)**3)
297 |
298 | refr = ri_eff([m["refr"], RI_COMMON['h2o']], weights=[pct_dry, 1-pct_dry])
299 |
300 | # iterate over each bin and calculate the Cscat value and build an array
301 | for dp, dn in zip(diams, n):
302 | # ammend the RI based on the RH
303 | v = cscat(
304 | dp, wl=self.wl, refr=refr, theta1=self.theta[0], theta2=self.theta[1])
305 |
306 | bin_assign = self.calibration_function(values=[v])
307 |
308 | if len(bin_assign) > 0:
309 | rv[bin_assign] += dn
310 |
311 | return rv
312 |
313 | def histogram(self, distribution, weight="number", base="log10", rh=0., **kwargs):
314 | """Return a histogram containing the [weight] of particles in each OPC bin.
315 |
316 | This represents what the OPC 'sees'. All calculations are made assuming the
317 | center of the bin is reprentative of the entire bin.
318 |
319 | Parameters
320 | ----------
321 | distribution: AerosolDistribution
322 | weight : {'number' | 'surface' | 'volume'}
323 | Choose how to weight the pdf. Default is `number`.
324 | base : {'none' | 'log10'}
325 | Base algorithm to use. Default is 'log10'.
326 | rh: float
327 | The relative humidity in percent (0-100).
328 |
329 | Returns
330 | -------
331 | d[weight]/d[base]Dp : array
332 | Returns an array with the evaluated
333 | PDF. This data can be
334 | directly plotted as a histogram using matplotlib bar plots. By
335 | default, dN/dlogDp is returned.
336 |
337 | Examples
338 | --------
339 |
340 | Evaluate an OPC for the Urban distribution and return dN/dlogDp
341 |
342 | >>> opc = opcsim.OPC(n_bins=5)
343 | >>> opc.calibrate(material="psl")
344 | >>> d = opcsim.load_distribution("urban")
345 | >>> hist = opc.histogram(d, weight="number", rh=0.)
346 |
347 | Evaluate an OPC for the Urban distribution and return dV/dlogDp
348 |
349 | >>> opc = opcsim.OPC(n_bins=5)
350 | >>> opc.calibrate(material="psl")
351 | >>> d = opcsim.load_distribution("urban")
352 | >>> hist = opc.histogram(d, weight="volume", rh=0.)
353 |
354 | Evaluate an OPC for the Urban distribution and return dN/dDp
355 |
356 | >>> opc = opcsim.OPC(n_bins=5)
357 | >>> opc.calibrate(material="psl")
358 | >>> d = opcsim.load_distribution("urban")
359 | >>> hist = opc.histogram(d, weight="number", base=None, rh=0.)
360 |
361 | Evaluate a distribution of Ammonium Sulfate at 50% RH
362 |
363 | >>> opc = opcsim.OPC(n_bins=5)
364 | >>> d = opcsim.AerosolDistribution()
365 | >>> d.add_mode(n=1000, gm=500e-3, gsd=1.5, kappa=0.53, refr=complex(1.521, 0), rho=1.77)
366 | >>> hist = opc.histogram(d, weight="number", base="log10", rh=50.)
367 |
368 | """
369 | # get the density if needed [units of g/cc]
370 | rho = kwargs.pop("rho", 1.65)
371 |
372 | # get the binned values in units of dN/bin
373 | rv = self.evaluate(distribution, rh=rh, **kwargs)
374 |
375 | if weight not in ["number", "surface", "volume", "mass"]:
376 | raise ValueError("Invalid `weight` parameter")
377 |
378 | # convert the values into the proper weight
379 | if weight == "surface":
380 | rv *= (np.pi * self.midpoints**2)
381 | elif weight == "volume":
382 | rv *= (self.midpoints**3*np.pi/6.) # dp3 * pi/6*N
383 | elif weight == "mass":
384 | rv *= rho * (self.midpoints**3*np.pi/6.) # rho*dp3 * pi/6*N
385 | else:
386 | pass
387 |
388 | # divide the values by the proper base
389 | if base == "log10":
390 | rv = rv / self.dlogdp
391 | else:
392 | rv = rv / self.ddp
393 |
394 | return rv
395 |
396 | def integrate(self, distribution, dmin=0., dmax=1., weight="number", rh=0., **kwargs):
397 | """Integrate the distribution according to the OPC for any [weight].
398 |
399 | By default, this method returns the total number of particles between 0-1 microns.
400 | It can be used to calculate PM values at any breakpoint. When calculating a PM value
401 | for a breakpoint that falls within a bin, a simple percentage of the bin is used. For
402 | example, if you calculate PM1 and the bin that contains 1-micron actually covers 0.5
403 | to 1.5 microns, then 1/2 the mass from that bin will count towards the PM1 value.
404 |
405 | Parameters
406 | ----------
407 | distribution: AerosolDistribution
408 | The aerosol distribution to be evaluated.
409 | dmin: float
410 | The minimum particle diameter [microns] to integrate from.
411 | dmax: float
412 | The maximum particle diameter [microns] to integrate to.
413 | weight : {'number' | 'surface' | 'volume'}
414 | Choose how to weight the pdf. Default is `number`.
415 | rh: float
416 | The relative humidity in percent (0-100).
417 |
418 | Returns
419 | -------
420 | rv: float
421 | The total [weight] between dmin and dmax. By default,
422 | the total number of particles (i.e. weight='number')
423 | are returned.
424 |
425 | Examples
426 | --------
427 |
428 | Calculate the total number of particles between 0-1 microns
429 | for the Urban distribution.
430 |
431 | >>> opc = opcsim.OPC(n_bins=5)
432 | >>> opc.calibrate(material="psl")
433 | >>> d = opcsim.load_distribution("urban")
434 | >>> ntot = opc.integrate(d, dmin=0., dmax=1., weight="number", rh=0.)
435 |
436 | Calculate PM1 for the Urban Distribution when RH = 0%
437 |
438 | >>> opc = opcsim.OPC(n_bins=5)
439 | >>> opc.calibrate(material="psl")
440 | >>> d = opcsim.load_distribution("urban")
441 | >>> ntot = opc.integrate(d, dmin=0., dmax=1., weight="mass", rh=0., rho=1.5)
442 |
443 | Calculate PM2.5 for the Urban Distribution when RH = 50%
444 |
445 | >>> opc = opcsim.OPC(n_bins=5)
446 | >>> opc.calibrate(material="psl")
447 | >>> d = opcsim.load_distribution("urban")
448 | >>> ntot = opc.integrate(d, dmin=0., dmax=2.5, weight="mass", rh=50., rho=1.5)
449 |
450 | """
451 | rho = kwargs.pop("rho", 1.65)
452 |
453 | # calculate dN
454 | rv = self.evaluate(distribution, rh=rh, **kwargs)
455 |
456 | if weight not in ["number", "surface", "volume", "mass"]:
457 | raise ValueError("Invalid argument for `weight`")
458 |
459 | if weight == "surface":
460 | # convert dN to dS
461 | rv *= (np.pi * self.midpoints**2)
462 | elif weight == "volume":
463 | # convert dN to dV
464 | rv *= (self.midpoints**3*np.pi/6.)
465 | elif weight == "mass":
466 | # convert dN to dM
467 | rv *= rho*(self.midpoints**3*np.pi/6.)
468 | else:
469 | pass
470 |
471 | # cleave the array
472 | # for bins that are outside diameters of interest, set to 0
473 | # for bins that are partially within the range of diams, set to
474 | # a fraction based on % within range
475 | factors = np.zeros(self.bins.shape[0])
476 | for i, b in enumerate(self.bins):
477 | if (b[0] >= dmin) and (b[-1] <= dmax):
478 | factors[i] = 1.
479 | elif (dmin >= b[0]) and (dmax <= b[-1]):
480 | factors[i] = (dmax - dmin) / (b[-1] - b[0])
481 | elif (dmin >= b[0]) and (dmax >= b[-1]):
482 | factors[i] = (b[-1] - dmin) / (b[-1] - b[0])
483 | elif (dmax >= b[0]) and (dmax <= b[-1]):
484 | factors[i] = (dmax - b[-1]) / (b[-1] / b[0])
485 | else:
486 | factors[i] = 0.
487 |
488 | # return the sum
489 | return (rv*factors).sum()
490 |
491 | def _digitize_opc_bins(self, cscat_boundaries, values):
492 | """Return the bin (or bins) corresponding to the :math:`C_{scat}` value(s).
493 |
494 | Parameters
495 | ----------
496 | cscat_boundaries: array-like
497 | An array of Cscat values at every bin boundary. The length of
498 | the array should be one greater than the number of bins.
499 | values: array-like
500 | Either a single-value or array of computed Cscat values
501 | that need to be assigned to OPC bins.
502 |
503 | Returns
504 | -------
505 | bins: np.ndarray
506 | An array of assigned bins associated with every value. If
507 | a value was too small (i.e. value < smallest in cscat_boundaries)
508 | or too large, it will be dropped. Thus, the size of bins is less than
509 | or equal to the size of values.
510 |
511 | """
512 | _binb = np.asarray(cscat_boundaries)
513 | _vals = np.asarray(values)
514 |
515 | # create a digitized version of all values. 0 implies it is too small and
516 | # values that are too large will be assigned the value for the length of the array
517 | digitized = np.digitize(_vals, bins=_binb)
518 |
519 | # keep only valid bins
520 | digitized = digitized[np.where(
521 | (digitized != 0) & (digitized != len(_binb)))]
522 |
523 | # subtract 1 from every bin so they are 0-indexed
524 | digitized -= 1
525 |
526 | return digitized
527 |
528 | def __repr__(self): # pragma: no cover
529 | return str(self.__class__)
530 |
531 |
532 | class Nephelometer(object):
533 | """Define a Nephelometer by its wavelength and range of viewing angles.
534 | """
535 | def __init__(self, wl, theta=(7., 173.), **kwargs):
536 | """
537 | Parameters
538 | ----------
539 | wl: float
540 | The wavelength of laser used in the device
541 | theta: tuple of floats
542 | The viewing angle range in degrees
543 |
544 | Returns
545 | -------
546 | Nephelometer
547 | An instance of the opcsim.Nephelometer class
548 |
549 | Examples
550 | --------
551 |
552 | Build a simple nephelometer
553 |
554 | >>> neph = opcsim.Nephelometer(wl=0.658)
555 |
556 | """
557 | self.wl = wl
558 | self.theta = theta
559 | self.pm1_ratio = None
560 | self.pm25_ratio = None
561 | self.pm10_ratio = None
562 |
563 | def calibrate(self, distribution, rh=0., **kwargs):
564 | """Calibrate a Nephelometer using any AerosolDistribution.
565 |
566 | Parameters
567 | ----------
568 | distribution: opcsim.AerosolDistribution
569 | The aerosol distribution used to calibrate the Nephelometer.
570 | rh: float
571 | The relative humidity at which the calibration takes place.
572 | Default is 0 %.
573 |
574 | Returns
575 | -------
576 | None
577 |
578 | Examples
579 | --------
580 |
581 | Build and calibrate a Nephelometer to a simple 1-mode distribution of Ammonium Sulfate.
582 |
583 | >>> neph = Nephelometer(wl=0.6, theta=(7., 173.))
584 | >>>
585 | >>> d = opcsim.AerosolDistribution("AmmSulf")
586 | >>> d.add_mode(n=1000, gm=0.2, gsd=1.25, refr=complex(1.521, 0), rho=1.77, kappa=0.53)
587 | >>>
588 | >>> neph.calibrate(d, rh=0.)
589 |
590 | """
591 | n_bins = kwargs.pop("n_bins", 100)
592 |
593 | # compute the PM1, PM25, and PM10 masses
594 | pm1 = distribution.cdf(dmin=0., dmax=1., weight="mass")
595 | pm25 = distribution.cdf(dmin=0., dmax=2.5, weight="mass")
596 | pm10 = distribution.cdf(dmin=0., dmax=10., weight="mass")
597 |
598 | # compute the total scattered light across the distribution
599 | total_cscat = self._sum_across_distribution(distribution, n_bins=n_bins, rh=rh)
600 |
601 | # set the ratios
602 | self.pm1_ratio = total_cscat / pm1
603 | self.pm25_ratio = total_cscat / pm25
604 | self.pm10_ratio = total_cscat / pm10
605 |
606 | def _sum_across_distribution(self, distribution, n_bins=100, rh=0., **kwargs):
607 | """
608 | """
609 | total_cscat = 0.
610 |
611 | for m in distribution.modes:
612 | gm = m["GM"]
613 | gsd = m["GSD"]
614 | refr = m["refr"]
615 |
616 | # alter the GM and RI per the RH specifed
617 | gm = k_kohler(diam_dry=gm, kappa=m["kappa"], rh=rh)
618 |
619 | pct_dry = m["GM"]**3 / gm**3
620 |
621 | refr = ri_eff(species=[refr, RI_COMMON["h2o"]], weights=[pct_dry, 1-pct_dry])
622 |
623 | # compute the bounds
624 | bounds = np.logspace(start=np.log10(gm/(gsd**4)), stop=np.log10(gm*(gsd**4)), num=n_bins)
625 |
626 | # compute the midpoints of each bin
627 | midpoints = np.mean([bounds[:-1], bounds[1:]], axis=0)
628 |
629 | # compute the number of particles in each bin
630 | n = np.array([distribution.cdf(dmin=a, dmax=b, mode=m["label"], rh=rh)
631 | for a, b in zip(bounds[:-1], bounds[1:])])
632 |
633 | # compute the mean Cscat for each bin
634 | mean_cscat = np.array([cscat(dp, wl=self.wl, refr=refr,
635 | theta1=self.theta[0], theta2=self.theta[1]) for dp in midpoints])
636 |
637 | # add to the running total
638 | total_cscat += (n*mean_cscat).sum()
639 |
640 | return total_cscat
641 |
642 | def evaluate(self, distribution, rh=0., **kwargs):
643 | """Evaluate a Nephelometer for an AerosolDistribution at
644 | a given relative humidity.
645 |
646 | Parameters
647 | ----------
648 | distribution: opcsim.AerosolDistribution
649 | The aerosol distribution used to calibrate the Nephelometer.
650 | rh: float
651 | The relative humidity at which the calibration takes place.
652 | Default is 0 %.
653 |
654 | Returns
655 | -------
656 | cscat: float
657 | The total summed scattering signal across the entire size distribution.
658 | pm1: float
659 | The total PM1 value in ug/m3
660 | pm25: float
661 | The total PM2.5 value in ug/m3
662 | pm10: float
663 | The total PM10 value in ug/m3
664 | Cscat, pm1, pm25, pm10: floats
665 |
666 |
667 | Examples
668 | --------
669 |
670 | Build and calibrate a Nephelometer to a simple 1-mode distribution of Ammonium Sulfate.
671 |
672 | >>> neph = Nephelometer(wl=0.6, theta=(7., 173.))
673 | >>>
674 | >>> d = opcsim.AerosolDistribution("AmmSulf")
675 | >>> d.add_mode(n=1000, gm=0.2, gsd=1.25, refr=complex(1.521, 0), rho=1.77, kappa=0.53)
676 | >>>
677 | >>> neph.calibrate(d, rh=0.)
678 | >>>
679 | >>> neph.evaluate(d, rh=85.)
680 |
681 | """
682 | total_cscat = self._sum_across_distribution(distribution, rh=rh, **kwargs)
683 |
684 | pm1 = total_cscat / self.pm1_ratio
685 | pm25 = total_cscat / self.pm25_ratio
686 | pm10 = total_cscat / self.pm10_ratio
687 |
688 | return total_cscat, pm1, pm25, pm10
689 |
690 |
691 | __all__ = [
692 | 'OPC',
693 | 'Nephelometer'
694 | ]
695 |
--------------------------------------------------------------------------------