├── .github ├── dependabot.yml └── workflows │ ├── continuous_distribution.yml │ ├── continuous_integration.yml │ └── draft-pdf.yml ├── .gitignore ├── .readthedocs.yaml ├── CITATION.cff ├── CONTRIBUTING.md ├── LICENSE.md ├── PythonicDISORT_optional_dependencies.txt ├── README.md ├── all_optional_dependencies.txt ├── docs ├── JOSS_Paper │ ├── paper.bib │ ├── paper.md │ └── sample.md ├── Makefile ├── Pythonic-DISORT.ipynb ├── conf.py ├── index.rst ├── make.bat ├── requirements.txt ├── section6_testresults1.npz ├── section6_testresults2.npz └── source │ └── PythonicDISORT.rst ├── pydisotest ├── 11_test.py ├── 1_test.ipynb ├── 1_test.py ├── 2_test.ipynb ├── 2_test.py ├── 3_test.ipynb ├── 3_test.py ├── 4_test.ipynb ├── 4_test.py ├── 5_test.ipynb ├── 5_test.py ├── 6_test.ipynb ├── 6_test.py ├── 7_test.ipynb ├── 7_test.py ├── 8_test.ipynb ├── 8_test.py ├── 9_test.ipynb ├── 9_test.py ├── ARTS_data │ ├── inpydis.py │ └── pydisort_data.py └── Stamnes_results │ ├── 1a_test.npz │ ├── 1b_test.npz │ ├── 1c_test.npz │ ├── 1d_test.npz │ ├── 1e_test.npz │ ├── 1f_test.npz │ ├── 2a_test.npz │ ├── 2b_test.npz │ ├── 2c_test.npz │ ├── 2d_test.npz │ ├── 3a_test.npz │ ├── 3b_test.npz │ ├── 4a_test.npz │ ├── 4b_test.npz │ ├── 4c_test.npz │ ├── 5a_test.npz │ ├── 5b_test.npz │ ├── 6b_test.npz │ ├── 6c_test.npz │ ├── 6d_test.npz │ ├── 6e_test.npz │ ├── 6f_test.npz │ ├── 6g_test.npz │ ├── 6h_test.npz │ ├── 7a_test.npz │ ├── 7b_test.npz │ ├── 7c_test.npz │ ├── 7d_test.npz │ ├── 7e_test.npz │ ├── 8ARTS_A_test.npy │ ├── 8ARTS_B0_test.npz │ ├── 8ARTS_B1_test.npz │ ├── 8ARTS_B2_test.npz │ ├── 8a_test.npz │ ├── 8b_test.npz │ ├── 8c_test.npz │ ├── 9a_test.npz │ ├── 9b_test.npz │ ├── 9c_test.npz │ └── 9corrections_test.npz ├── pyproject.toml └── src └── PythonicDISORT ├── __init__.py ├── _assemble_intensity_and_fluxes.py ├── _solve_for_coeffs.py ├── _solve_for_gen_and_part_sols.py ├── pydisort.py └── subroutines.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | target-branch: "main" 9 | -------------------------------------------------------------------------------- /.github/workflows/continuous_distribution.yml: -------------------------------------------------------------------------------- 1 | name: Build and Upload PythonicDISORT to PyPI 2 | on: 3 | release: 4 | types: 5 | - published 6 | 7 | jobs: 8 | build-artifacts: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | with: 13 | fetch-depth: 0 14 | - uses: actions/setup-python@v5 15 | name: Install Python 16 | with: 17 | python-version: 3.8 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | python -m pip install setuptools setuptools-scm[toml] build twine 22 | - name: Check python version 23 | run: | 24 | python --version 25 | - name: Build tarball and wheels 26 | run: | 27 | git clean -xdf 28 | git restore -SW . 29 | python -m build 30 | - name: Check built artifacts 31 | run: | 32 | python -m twine check dist/* 33 | pwd 34 | if [ -f dist/PythonicDISORT-0.0.0.tar.gz ]; then 35 | echo "❌ INVALID VERSION NUMBER" 36 | exit 1 37 | else 38 | echo "✅ Looks good" 39 | fi 40 | - uses: actions/upload-artifact@v4 41 | with: 42 | name: releases 43 | path: dist 44 | upload-to-pypi: 45 | needs: build-artifacts 46 | if: "!github.event.release.prerelease" 47 | runs-on: ubuntu-latest 48 | steps: 49 | - uses: actions/download-artifact@v4.1.9 50 | with: 51 | name: releases 52 | path: dist 53 | - name: Publish package to PyPI 54 | uses: pypa/gh-action-pypi-publish@v1.12.4 55 | with: 56 | user: __token__ 57 | password: ${{ secrets.PYPI_TOKEN }} 58 | verbose: true 59 | -------------------------------------------------------------------------------- /.github/workflows/continuous_integration.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Run PyTests 5 | 6 | on: 7 | push: 8 | pull_request: 9 | branches: [ "main" ] 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: ["3.8", "3.9", "3.10", "3.11"] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install . 30 | python -m pip install pytest flake8 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | - name: Lint with flake8 33 | run: | 34 | # stop the build if there are Python syntax errors or undefined names 35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 38 | - name: Test with pytest 39 | run: | 40 | cd pydisotest 41 | pytest 42 | -------------------------------------------------------------------------------- /.github/workflows/draft-pdf.yml: -------------------------------------------------------------------------------- 1 | name: Generate JOSS paper PDF 2 | 3 | on: [push] 4 | 5 | jobs: 6 | paper: 7 | runs-on: ubuntu-latest 8 | name: Paper Draft 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v4 12 | - name: Build draft PDF 13 | uses: openjournals/openjournals-draft-action@master 14 | with: 15 | journal: joss 16 | # This should be the path to the paper within your repo. 17 | paper-path: docs/JOSS_Paper/paper.md 18 | - name: Upload 19 | uses: actions/upload-artifact@v4 20 | with: 21 | name: paper 22 | # This is the output path where Pandoc will write the compiled 23 | # PDF. Note, this should be the same directory as the input 24 | # paper.md 25 | path: docs/JOSS_Paper/paper.pdf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.bundle.* 2 | lib/ 3 | node_modules/ 4 | *.egg-info/ 5 | .ipynb_checkpoints/ 6 | *.tsbuildinfo 7 | 8 | # Created by https://www.gitignore.io/api/python 9 | # Edit at https://www.gitignore.io/?templates=python 10 | 11 | ### Python ### 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/* 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | pip-wheel-metadata/ 35 | share/python-wheels/ 36 | .installed.cfg 37 | *.egg 38 | MANIFEST 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .nox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | _build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # pyenv 77 | .python-version 78 | 79 | # celery beat schedule file 80 | celerybeat-schedule 81 | 82 | # SageMath parsed files 83 | *.sage.py 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | .spyproject 88 | 89 | # Rope project settings 90 | .ropeproject 91 | 92 | # Mr Developer 93 | .mr.developer.cfg 94 | .project 95 | .pydevproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | .dmypy.json 103 | dmypy.json 104 | 105 | # Pyre type checker 106 | .pyre/ 107 | 108 | # OS X stuff 109 | *.DS_Store 110 | 111 | # End of https://www.gitignore.io/api/python 112 | 113 | _temp_extension 114 | junit.xml 115 | [uU]ntitled* 116 | notebook/static/* 117 | !notebook/static/favicons 118 | notebook/labextension 119 | notebook/schemas 120 | source/changelog.md 121 | source/contributing.md 122 | 123 | # playwright 124 | ui-tests/test-results 125 | ui-tests/playwright-report 126 | 127 | # VSCode 128 | .vscode 129 | 130 | # RTC 131 | .jupyter_ystore.db 132 | 133 | # Author's addition 134 | **/Error-analysis-images/** 135 | **/.ipynb_checkpoints/** 136 | **/disort4.0.99_f2py/** -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | python: 28 | install: 29 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: PythonicDISORT 6 | message: >- 7 | If you use this software, please cite it using the 8 | metadata from this file. 9 | type: software 10 | authors: 11 | - given-names: Dion J. X. 12 | family-names: Ho 13 | email: dh3065@columbia.edu 14 | affiliation: Columbia University, Department of Applied Physics and Applied Mathematics 15 | orcid: 'https://orcid.org/0009-0000-5829-5081' 16 | repository-code: 'https://github.com/LDEO-CREW/Pythonic-DISORT' 17 | abstract: >- 18 | PythonicDISORT is a Discrete Ordinates Solver for the (1D) Radiative Transfer 19 | Equation in a single or multi-layer plane-parallel atmosphere. 20 | It is coded entirely in Python 3 and is based on Stamnes' 21 | FORTRAN DISORT (see references in the Jupyter Notebook) 22 | and has its main features. 23 | keywords: 24 | - Python 25 | - Radiative Transfer 26 | - Discrete Ordinates Method 27 | - Atmospheric Science 28 | - Climate Science 29 | - DISORT 30 | license: MIT 31 | date-released: 2023-05-30 32 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributing to PythonicDISORT 3 | 4 | First off, thanks for taking the time to contribute! ❤️ 5 | 6 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 7 | 8 | > And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: 9 | > - Star the project 10 | > - Tweet about it 11 | > - Refer this project in your project's readme 12 | > - Mention the project at local meetups and tell your friends/colleagues 13 | 14 | 15 | ## Table of Contents 16 | 17 | - [I Have a Question](#i-have-a-question) 18 | - [I Want To Contribute](#i-want-to-contribute) 19 | - [Reporting Bugs](#reporting-bugs) 20 | - [Suggesting Enhancements](#suggesting-enhancements) 21 | - [Your First Code Contribution](#your-first-code-contribution) 22 | - [Improving The Documentation](#improving-the-documentation) 23 | - [Styleguides](#styleguides) 24 | - [Commit Messages](#commit-messages) 25 | - [Join The Project Team](#join-the-project-team) 26 | 27 | 28 | 29 | ## I Have a Question 30 | 31 | > If you want to ask a question, we assume that you have read the available [Documentation](https://pythonic-disort.readthedocs.io/en/latest/). 32 | 33 | Before you ask a question, it is best to search for existing [Issues](https://github.com/LDEO-CREW/Pythonic-DISORT/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. 34 | 35 | If you then still feel the need to ask a question and need clarification, we recommend the following: 36 | 37 | - Open an [Issue](https://github.com/LDEO-CREW/Pythonic-DISORT/issues/new). 38 | - Provide as much context as you can about what you're running into. 39 | - Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. 40 | 41 | We will then take care of the issue as soon as possible. 42 | 43 | 57 | 58 | ## I Want To Contribute 59 | 60 | > ### Legal Notice 61 | > When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. 62 | 63 | ### Reporting Bugs 64 | 65 | 66 | #### Before Submitting a Bug Report 67 | 68 | A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. 69 | 70 | - Make sure that you are using the latest version. 71 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://pythonic-disort.readthedocs.io/en/latest/). If you are looking for support, you might want to check [this section](#i-have-a-question)). 72 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/LDEO-CREW/Pythonic-DISORTissues?q=label%3Abug). 73 | - Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. 74 | - Collect information about the bug: 75 | - Stack trace (Traceback) 76 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM) 77 | - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. 78 | - Possibly your input and the output 79 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions? 80 | 81 | 82 | #### How Do I Submit a Good Bug Report? 83 | 84 | > You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . 85 | 86 | 87 | We use GitHub issues to track bugs and errors. If you run into an issue with the project: 88 | 89 | - Open an [Issue](https://github.com/LDEO-CREW/Pythonic-DISORT/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) 90 | - Explain the behavior you would expect and the actual behavior. 91 | - Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. 92 | - Provide the information you collected in the previous section. 93 | 94 | Once it's filed: 95 | 96 | - The project team will label the issue accordingly. 97 | - A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. 98 | - If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be [implemented by someone](#your-first-code-contribution). 99 | 100 | 101 | 102 | 103 | ### Suggesting Enhancements 104 | 105 | This section guides you through submitting an enhancement suggestion for PythonicDISORT, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. 106 | 107 | 108 | #### Before Submitting an Enhancement 109 | 110 | - Make sure that you are using the latest version. 111 | - Read the [documentation](https://pythonic-disort.readthedocs.io/en/latest/) carefully and find out if the functionality is already covered, maybe by an individual configuration. 112 | - Perform a [search](https://github.com/LDEO-CREW/Pythonic-DISORT/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. 113 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. 114 | 115 | 116 | #### How Do I Submit a Good Enhancement Suggestion? 117 | 118 | Enhancement suggestions are tracked as [GitHub issues](https://github.com/LDEO-CREW/Pythonic-DISORT/issues). 119 | 120 | - Use a **clear and descriptive title** for the issue to identify the suggestion. 121 | - Provide a **step-by-step description of the suggested enhancement** in as many details as possible. 122 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. 123 | - You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. 124 | - **Explain why this enhancement would be useful** to most PythonicDISORT users. You may also want to point out the other projects that solved it better and which could serve as inspiration. 125 | 126 | 127 | 128 | 129 | ## Attribution 130 | This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! 131 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 HO Jia Xu Dion 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PythonicDISORT_optional_dependencies.txt: -------------------------------------------------------------------------------- 1 | # Optional dependencies for the PythonicDISORT package 2 | # Install through "pip install -r PythonicDISORT_optional_dependencies.txt" 3 | 4 | PythonicDISORT 5 | pytest -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Run PyTests](https://github.com/LDEO-CREW/Pythonic-DISORT/actions/workflows/continuous_integration.yml/badge.svg)](https://github.com/LDEO-CREW/Pythonic-DISORT/actions/workflows/continuous_integration.yml) 2 | [![DOI](https://joss.theoj.org/papers/10.21105/joss.06442/status.svg)](https://doi.org/10.21105/joss.06442) 3 | 4 | # Introduction 5 | The PythonicDISORT package is a Discrete Ordinates Solver for the (1D) Radiative Transfer Equation 6 | in a plane-parallel, horizontally homogeneous atmosphere. 7 | It is coded entirely in Python 3 and is a reimplementation instead of a wrapper. 8 | While PythonicDISORT has been optimized for speed, it will naturally be slower than similar FORTRAN algorithms. 9 | On the other hand, PythonicDISORT should be easier to install, use, and modify than FORTRAN-based Discrete Ordinates Solvers. 10 | 11 | PythonicDISORT is based on Stamnes' FORTRAN DISORT (see References, in particular [2, 3, 8]) and has its main features: multi-layer solver, 12 | delta-_M_ scaling, Nakajima-Tanaka (NT) corrections, only flux option, direct beam source, isotropic internal source (blackbody emission), 13 | Dirichlet boundary conditions (diffuse flux boundary sources), Bi-Directional Reflectance Function (BDRF) for surface reflection, 14 | and interpolation with respect to polar angle. 15 | In addition, we added a subroutine to compute actinic fluxes to satisfy a user request, and integration with respect to optical depth was also added. 16 | Further feature requests as well as feedback are welcome. 17 | 18 | You may contact me, Dion, through dh3065@columbia.edu. 19 | 20 | The **GitHub repository** is https://github.com/LDEO-CREW/Pythonic-DISORT. 21 | 22 | Accompanying Journal of Open Source Software paper: https://joss.theoj.org/papers/10.21105/joss.06442. 23 | 24 | # Documentation 25 | https://pythonic-disort.readthedocs.io/en/latest/ 26 | 27 | Also see the accompanying Jupyter Notebook `Pythonic-DISORT.ipynb` in the `docs` directory 28 | of our GitHub repository. 29 | This Jupyter Notebook provides comprehensive documentation, suggested inputs, explanations, 30 | mathematical derivations and verification tests. 31 | It is highly recommended that new users read the non-optional parts of sections 1 and 2. 32 | 33 | ## PyTest and examples of how to use PythonicDISORT 34 | 35 | Not only are there verification tests in `Pythonic-DISORT.ipynb`, 36 | most of the test problems in Stamnes' `disotest.f90` (download DISORT 4.0.99 from http://www.rtatmocn.com/disort/) have also been recreated and enhanced. 37 | In these tests, the solutions from PythonicDISORT are compared against solutions 38 | from a F2PY-wrapped Stamnes' DISORT (version 4.0.99; wrapper inspired by https://github.com/kconnour/pyRT_DISORT). With PyTest installed, execute the console command `pytest` 39 | in the `pydisotest` directory to run these tests. The `pydisotest` directory also contains Jupyter Notebooks to show the implementation of each test. 40 | These notebooks double up as examples of how to use PythonicDISORT. The tests which have been implemented are: 41 | 42 | * Test Problem 1: Isotropic Scattering 43 | * Test Problem 2: Rayleigh Scattering, Beam Source 44 | * Test Problem 3: Henyey-Greenstein Scattering 45 | * Test Problem 4: Haze-L Scattering, Beam Source 46 | * Test Problem 5: Cloud C.1 Scattering, Beam Source 47 | * Test Problem 6: No Scattering, Increasingly Complex Sources (relevant for modeling longwave radiation) 48 | * Test Problem 7: Absorption + Scattering + All Possible Sources, Lambertian and Hapke Surface Reflectivities (one layer) 49 | * Test Problem 8: Absorbing / Isotropic-Scattering Medium (multiple layers) 50 | * Test Problem 9: General Emitting / Absorbing / Scattering Medium (multiple layers) 51 | * Test Problem 11: Single-Layer vs. Multiple Layers (no corresponding Jupyter Notebook) 52 | 53 | # Installation 54 | 55 | * From PyPI: `pip install PythonicDISORT` 56 | * From Conda-forge: (TODO: need to first publish on Conda-forge) 57 | * By cloning repository: `pip install .` in the `Pythonic-DISORT` directory; `pip install -r all_optional_dependencies.txt` to install all optional dependencies (see *Requirements to run PythonicDISORT*) 58 | 59 | ## Requirements to run PythonicDISORT 60 | * Python 3.8+ 61 | * `numpy >= 1.8.0` 62 | * `scipy >= 1.8.0` 63 | * (OPTIONAL) `pytest >= 6.2.5` (Required to use the command `pytest`, see *PyTest and examples of how to use PythonicDISORT*) 64 | 65 | ## (OPTIONAL) Additional requirements to run the Jupyter Notebook 66 | * `autograd >= 1.5` 67 | * `jupyter > 1.0.0` 68 | * `notebook > 6.5.2` 69 | * `matplotlib >= 3.6.0` 70 | 71 | In addition, a F2PY-wrapped Stamnes' DISORT, or equivalent, is required to properly run the last section (section 6). 72 | 73 | ## Compatibility 74 | 75 | The PythonicDISORT package should be system agnostic given its minimal dependencies and pure Python code. 76 | Everything in the repository was built and tested on Windows 11. 77 | 78 | # Acknowledgements 79 | 80 | I acknowledge funding from NSF through the Learning the Earth with Artificial intelligence and Physics (LEAP) 81 | Science and Technology Center (STC) (Award #2019625) under which this package was initially created. 82 | 83 | # References 84 | 1) S. Chandrasekhar. 1960. *Radiative Transfer.* 85 | 86 | 2) Knut Stamnes and S-Chee Tsay and Warren Wiscombe and Kolf Jayaweera. 1988. *Numerically stable algorithm for discrete-ordinate-method radiative transfer in multiple scattering and emitting layered media.* http://opg.optica.org/ao/abstract.cfm?URI=ao-27-12-2502. 87 | 88 | 3) Stamnes, S.. 1999. *LLLab disort website.* http://www.rtatmocn.com/disort/. 89 | 90 | 4) Knut Stamnes and Paul Conklin. 1984. *A new multi-layer discrete ordinate approach to radiative transfer in vertically inhomogeneous atmospheres.* https://www.sciencedirect.com/science/article/pii/0022407384900311. 91 | 92 | 5) W. J. Wiscombe. 1977. *The Delta–M Method: Rapid Yet Accurate Radiative Flux Calculations for Strongly Asymmetric Phase Functions.* https://journals.ametsoc.org/view/journals/atsc/34/9/1520-0469_1977_034_1408_tdmrya_2_0_co_2.xml. 93 | 94 | 6) J. H. Joseph and W. J. Wiscombe and J. A. Weinman. 1976. *The Delta-Eddington Approximation for Radiative Flux Transfer.* https://journals.ametsoc.org/view/journals/atsc/33/12/1520-0469_1976_033_2452_tdeafr_2_0_co_2.xml. 95 | 96 | 7) Sykes, J. B.. 1951. *Approximate Integration of the Equation of Transfer.* https://doi.org/10.1093/mnras/111.4.377. 97 | 98 | 8) Stamnes, Knut and Tsay, Si-Chee and Wiscombe, Warren and Laszlo, Istvan and Einaudi, Franco. 2000. *General Purpose Fortran Program for Discrete-Ordinate-Method Radiative Transfer in Scattering and Emitting Layered Media: An Update of DISORT.* 99 | 100 | 9) Z. Lin and S. Stamnes and Z. Jin and I. Laszlo and S.-C. Tsay and W.J. Wiscombe and K. Stamnes. 2015. *Improved discrete ordinate solutions in the presence of an anisotropically reflecting lower boundary: Upgrades of the DISORT computational tool.* https://www.sciencedirect.com/science/article/pii/S0022407315000679. 101 | 102 | 10) Trefethen, L. N.. 1996. *Finite difference and spectral methods for ordinary and partial differential equations.* https://people.maths.ox.ac.uk/trefethen/pdetext.html. 103 | 104 | 11) Knut Stamnes. 1982. *On the computation of angular distributions of radiation in planetary atmospheres.* https://www.sciencedirect.com/science/article/pii/0022407382900966. 105 | 106 | 12) T. Nakajima and M. Tanaka. 1988. *Algorithms for radiative intensity calculations in moderately thick atmospheres using a truncation approximation.* https://www.sciencedirect.com/science/article/pii/0022407388900313. 107 | 108 | 13) Connour, Kyle and Wolff, Michael. 2020. *pyRT_DISORT: A pre-processing front-end to help make DISORT simulations easier in Python.* https://github.com/kconnour/pyRT_DISORT. 109 | -------------------------------------------------------------------------------- /all_optional_dependencies.txt: -------------------------------------------------------------------------------- 1 | # All optional dependencies for the Pythonic DISORT project 2 | # Install through "pip install -r all_optional_dependencies.txt" 3 | 4 | PythonicDISORT 5 | pytest 6 | autograd 7 | jupyter 8 | notebook 9 | matplotlib -------------------------------------------------------------------------------- /docs/JOSS_Paper/paper.bib: -------------------------------------------------------------------------------- 1 | @article{Wis1977, 2 | author = "W. J. Wiscombe", 3 | title = "The Delta–$M$ Method: Rapid Yet Accurate Radiative Flux Calculations for Strongly Asymmetric Phase Functions", 4 | journal = "Journal of Atmospheric Sciences", 5 | year = "1977", 6 | publisher = "American Meteorological Society", 7 | address = "Boston MA, USA", 8 | volume = "34", 9 | number = "9", 10 | doi = "10.1175/1520-0469(1977)034<1408:TDMRYA>2.0.CO;2", 11 | pages= "1408 - 1422", 12 | url = "https://journals.ametsoc.org/view/journals/atsc/34/9/1520-0469_1977_034_1408_tdmrya_2_0_co_2.xml" 13 | } 14 | 15 | @article{NT1988, 16 | title = {Algorithms for radiative intensity calculations in moderately thick atmospheres using a truncation approximation}, 17 | journal = {Journal of Quantitative Spectroscopy and Radiative Transfer}, 18 | volume = {40}, 19 | number = {1}, 20 | pages = {51-69}, 21 | year = {1988}, 22 | issn = {0022-4073}, 23 | doi = {10.1016/0022-4073(88)90031-3}, 24 | url = {https://www.sciencedirect.com/science/article/pii/0022407388900313}, 25 | author = {T. Nakajima and M. Tanaka}, 26 | abstract = {The efficiency of numerical calculations is discussed for selected algorithms employing the discrete ordinate method and the truncation approximation for the solar radiative intensity in moderately thick, plane-parallel scattering atmospheres. It is found that truncation of the phase function causes a significant error in the computed intensity and the magnitude of this error depends significantly on how the intensity is retrieved from the truncated radiative transfer equation. A newly developed retrieval algorithm, the IMS- method, yields the intensity field with an error ⪅1% when the number of discrete path is as small as 10 in the hemisphere for aerosol-laden atmospheres with optical thickness ⪅1.} 27 | } 28 | 29 | @article{HP2024, 30 | author = {Ho, Dion J. X. and Pincus, Robert}, 31 | title = {Two-Streams Revisited: General Equations, Exact Coefficients, and Optimized Closures}, 32 | journal = {Journal of Advances in Modeling Earth Systems}, 33 | volume = {16}, 34 | number = {10}, 35 | pages = {e2024MS004504}, 36 | keywords = {two-stream equations, radiative transfer, atmospheric fluxes, optimization, two-stream closures, plane-parallel atmosphere}, 37 | doi = {10.1029/2024MS004504}, 38 | url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2024MS004504}, 39 | eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2024MS004504}, 40 | note = {e2024MS004504 2024MS004504}, 41 | abstract = {Abstract Two-Stream Equations are the most parsimonious general models for radiative flux transfer with one equation to model each of upward and downward fluxes; these are coupled due to the transfer of fluxes between hemispheres. Standard two-stream approximation of the Radiative Transfer Equation assumes that the ratios of flux transferred (coupling coefficients) are both invariant with optical depth and symmetric with respect to upwelling and downwelling radiation. Two-stream closures are derived by making additional assumptions about the angular distribution of the intensity field, but none currently works well for all parts of the optical parameter space. We determine the exact values of the two-stream coupling coefficients from multi-stream numerical solutions to the Radiative Transfer Equation for shortwave radiation. The resulting unique coefficients accurately reconstruct entire flux profiles but depend on optical depth. More importantly, they generally take on unphysical values when symmetry is assumed. We derive a general form of the Two-Stream Equations for which the four coupling coefficients are guaranteed to be physically explicable. While non-constant coupling coefficients are required to reconstruct entire flux profiles, numerically optimized constant coupling coefficients (which admit analytic solutions) reproduced shortwave reflectance and transmittance with relative errors no greater than 4×10−5 \$4\times 1{0}^{-5}\$ over a large range of optical parameters. The optimized coefficients show a dependence on solar zenith angle and total optical depth that diminishes as the latter increases. This explains why existing coupling coefficients, which often omit the former and mostly neglect the latter, tend to work well for only thin or only thick atmospheres.}, 42 | year = {2024} 43 | } 44 | 45 | @article{TLZWSY2020, 46 | author = {Teng, Shiwen and Liu, Chao and Zhang, Zhibo and Wang, Yuan and Sohn, Byung-Ju and Yung, Yuk L.}, 47 | title = {Retrieval of Ice-Over-Water Cloud Microphysical and Optical Properties Using Passive Radiometers}, 48 | journal = {Geophysical Research Letters}, 49 | volume = {47}, 50 | number = {16}, 51 | pages = {e2020GL088941}, 52 | keywords = {cloud properties, satellite radiometer, vertical distribution}, 53 | doi = {10.1029/2020GL088941}, 54 | url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2020GL088941}, 55 | eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2020GL088941}, 56 | note = {e2020GL088941 2020GL088941}, 57 | abstract = {Abstract Current satellite cloud products from passive radiometers provide effective single-layer cloud properties by assuming a homogeneous cloud in a pixel, resulting in inevitable biases when multiple-layer clouds are present in a vertical column. We devise a novel method to retrieve cloud vertical properties for ice-over-water clouds using passive radiometers. Based on the absorptivity differences of liquid water and ice clouds at four shortwave-infrared channels (centered at 0.87, 1.61, 2.13, and 2.25 μm), cloud optical thicknesses (COT) and effective radii of both upper-layer ice and lower-layer liquid water clouds are inferred simultaneously. The algorithm works most effectively for clouds with ice COT < 7 and liquid water COT > 5. The simulated spectral reflectances based on our retrieved ice-over-water clouds become more consistent with observations than those with a single-layer assumption. This new algorithm will improve our understanding of clouds, and we suggest that these four cloud channels should be all included in future satellite sensors.}, 58 | year = {2020} 59 | } 60 | 61 | @article{TCCGL1999, 62 | author = {Torricella, Francesca and Cattani, Elsa and Cervino, Marco and Guzzi, Rodolfo and Levoni, Chiara}, 63 | title = {Retrieval of aerosol properties over the ocean using Global Ozone Monitoring Experiment measurements: Method and applications to test cases}, 64 | journal = {Journal of Geophysical Research: Atmospheres}, 65 | volume = {104}, 66 | number = {D10}, 67 | pages = {12085-12098}, 68 | doi = {10.1029/1999JD900040}, 69 | url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/1999JD900040}, 70 | eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/1999JD900040}, 71 | abstract = {Satellite monitoring of aerosol properties using passive techniques is widely considered a crucial tool for the study of climatic effects of atmospheric particulate [Kaufman et al., 1997]. Only space-based observations can provide the required global coverage information on spatial distribution and temporal variation of the aerosol field. This paper describes a method for deriving aerosol optical thickness at 500 nm and aerosol type from Global Ozone Monitoring Experiment (GOME) data over the ocean under cloud-free conditions. GOME, flying on board the second European Remote Sensing satellite (ERS 2) since April 1995, is a spectrometer that measures radiation reflected from Earth in the spectral range 240–793 nm. The features of the instrument relevant to the aerosol retrieval task are its high relative radiometric accuracy (better than 1\%), its spectral coverage, and its spectral resolution, which allows wavelengths in spectral regions free of molecular absorption (atmospheric windows) to be selected. The method presented is based on a pseudo-inversion approach in which measured reflectance spectra are fitted to simulated equivalents computed using a suitable radiative transfer model. The crucial aspects of this method are the high accuracy and the nonapproximate nature of the radiative transfer model, which simulates the spectra during the fitting procedure, and careful selection of candidate aerosol classes. A test application of the proposed method to a Saharan dust outbreak which occurred in June 1997 is presented, showing that in spite of the instrument's low spatial resolution, information on both optical thickness and spectral characterization of the aerosol can be retrieved from GOME data. Preliminary comparisons of the results with independent estimates of the aerosol content show that a good correlation exists, encouraging planning of a systematic application of the method.}, 72 | year = {1999} 73 | } 74 | 75 | @ARTICLE{MRO/CRISM2008, 76 | author={McGuire, Patrick C. and Wolff, Michael J. and Smith, Michael D. and Arvidson, Raymond E. and Murchie, Scott L. and Clancy, R. Todd and Roush, Ted L. and Cull, Selby C. and Lichtenberg, Kim A. and Wiseman, Sandra M. and Green, Robert O. and Martin, Terry Z. and Milliken, Ralph E. and Cavender, Peter J. and Humm, David C. and Seelos, Frank P. and Seelos, Kim D. and Taylor, Howard W. and Ehlmann, Bethany L. and Mustard, John F. and Pelkey, Shannon M. and Titus, Timothy N. and Hash, Christopher D. and Malaret, Erick R.}, 77 | journal={IEEE Transactions on Geoscience and Remote Sensing}, 78 | title={MRO/CRISM Retrieval of Surface Lambert Albedos for Multispectral Mapping of Mars With DISORT-Based Radiative Transfer Modeling: Phase 1—Using Historical Climatology for Temperatures, Aerosol Optical Depths, and Atmospheric Pressures}, 79 | year={2008}, 80 | volume={46}, 81 | number={12}, 82 | pages={4020-4040}, 83 | keywords={Mars;Temperature;Aerosols;Optical surface waves;Optical variables control;Atmospheric waves;Surface reconstruction;Ice;Information retrieval;Atmospheric modeling;Atmospheric propagation;infrared spectroscopy;remote sensing;software verification and validation}, 84 | doi={10.1109/TGRS.2008.2000631}} 85 | 86 | @conference{JupyterNotebook, 87 | Title = {Jupyter Notebooks -- a publishing format for reproducible computational workflows}, 88 | Author = {Thomas Kluyver and Benjamin Ragan-Kelley and Fernando P{\'e}rez and Brian Granger and Matthias Bussonnier and Jonathan Frederic and Kyle Kelley and Jessica Hamrick and Jason Grout and Sylvain Corlay and Paul Ivanov and Dami{\'a}n Avila and Safia Abdalla and Carol Willing}, 89 | Booktitle = {Positioning and Power in Academic Publishing: Players, Agents and Agendas}, 90 | Editor = {F. Loizides and B. Schmidt}, 91 | Organization = {IOS Press}, 92 | Pages = {87 - 90}, 93 | Year = {2016} 94 | } 95 | 96 | @ARTICLE{SciPy, 97 | author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and 98 | Haberland, Matt and Reddy, Tyler and Cournapeau, David and 99 | Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and 100 | Bright, Jonathan and {van der Walt}, St{\'e}fan J. and 101 | Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and 102 | Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and 103 | Kern, Robert and Larson, Eric and Carey, C J and 104 | Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and 105 | {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and 106 | Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and 107 | Harris, Charles R. and Archibald, Anne M. and 108 | Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and 109 | {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, 110 | title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific 111 | Computing in Python}}, 112 | journal = {Nature Methods}, 113 | year = {2020}, 114 | volume = {17}, 115 | pages = {261--272}, 116 | adsurl = {https://rdcu.be/b08Wh}, 117 | doi = {10.1038/s41592-019-0686-2}, 118 | } 119 | 120 | @Article{NumPy, 121 | title = {Array programming with {NumPy}}, 122 | author = {Charles R. Harris and K. Jarrod Millman and St{\'{e}}fan J. 123 | van der Walt and Ralf Gommers and Pauli Virtanen and David 124 | Cournapeau and Eric Wieser and Julian Taylor and Sebastian 125 | Berg and Nathaniel J. Smith and Robert Kern and Matti Picus 126 | and Stephan Hoyer and Marten H. van Kerkwijk and Matthew 127 | Brett and Allan Haldane and Jaime Fern{\'{a}}ndez del 128 | R{\'{i}}o and Mark Wiebe and Pearu Peterson and Pierre 129 | G{\'{e}}rard-Marchant and Kevin Sheppard and Tyler Reddy and 130 | Warren Weckesser and Hameer Abbasi and Christoph Gohlke and 131 | Travis E. Oliphant}, 132 | year = {2020}, 133 | month = sep, 134 | journal = {Nature}, 135 | volume = {585}, 136 | number = {7825}, 137 | pages = {357--362}, 138 | doi = {10.1038/s41586-020-2649-2}, 139 | publisher = {Springer Science and Business Media {LLC}}, 140 | url = {https://doi.org/10.1038/s41586-020-2649-2} 141 | } 142 | 143 | @book{Cha1960, 144 | author = "S. Chandrasekhar", 145 | title = "Radiative Transfer", 146 | year = "1960", 147 | publisher = "Dover", 148 | } 149 | 150 | @software{CM2020, 151 | author = {Connour, Kyle and Wolff, Michael}, 152 | license = {BSD-3-Clause}, 153 | title = {{pyRT_DISORT: A pre-processing front-end to help make DISORT simulations easier in Python}}, 154 | url = {https://github.com/kconnour/pyRT_DISORT}, 155 | version = {1.0.0}, 156 | year={2020} 157 | } 158 | 159 | @software{Hu2017, 160 | author={Zoey Hu}, 161 | title={pyDISORT}, 162 | url={https://github.com/chanGimeno/pyDISORT}, 163 | version = {0.8}, 164 | year={2017} 165 | } 166 | 167 | @article{STWJ1988, 168 | author = {Knut Stamnes and S-Chee Tsay and Warren Wiscombe and Kolf Jayaweera}, 169 | journal = {Appl. Opt.}, 170 | keywords = {Electromagnetic radiation; Multiple scattering; Optical depth; Radiative transfer; Reflection; Thermal emission}, 171 | number = {12}, 172 | pages = {2502--2509}, 173 | publisher = {Optica Publishing Group}, 174 | title = {Numerically stable algorithm for discrete-ordinate-method radiative transfer in multiple scattering and emitting layered media}, 175 | volume = {27}, 176 | month = {Jun}, 177 | year = {1988}, 178 | url = {http://opg.optica.org/ao/abstract.cfm?URI=ao-27-12-2502}, 179 | doi = {10.1364/AO.27.002502}, 180 | abstract = {We summarize an advanced, thoroughly documented, and quite general purpose discrete ordinate algorithm for time-independent transfer calculations in vertically inhomogeneous, nonisothermal, plane-parallel media. Atmospheric applications ranging from the UV to the radar region of the electromagnetic spectrum are possible. The physical processes included are thermal emission, scattering, absorption, and bidirectional reflection and emissionat the lower boundary. The medium may be forced at the top boundary by parallel or diffuse radiation and by internal and boundary thermal sources as well. We provide a brief account of the theoretical basis as well as a discussion of the numerical implementation of the theory. The recent advances made by ourselves and our collaborators---advances in both formulation and numerical solution---are all incorporated in the algorithm. Prominent among these advances are the complete conquest of two ill-conditioning problems which afflicted all previous discrete ordinate implementations: (1) the computation of eigenvalues and eigenvectors and (2) the inversion of the matrix determining the constants of integration. Copies of the fortran program on microcomputer diskettes are available for interested users.}, 181 | } 182 | 183 | @misc{Sta1999, 184 | title={LLLab disort website}, 185 | url={http://www.rtatmocn.com/disort/}, 186 | journal={Light and Life Lab (LLLab)}, 187 | author={Stamnes, S.}, 188 | year={1999} 189 | } 190 | 191 | @INPROCEEDINGS{Ber2014, 192 | author={Berk, A. and Conforti, P. and Kennett, R. and Perkins, T. and Hawes, F. and van den Bosch, J.}, 193 | booktitle={2014 6th Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS)}, 194 | title={MODTRAN® 6: A major upgrade of the MODTRAN® radiative transfer code}, 195 | year={2014}, 196 | volume={}, 197 | number={}, 198 | pages={1-4}, 199 | doi={10.1109/WHISPERS.2014.8077573}} 200 | 201 | @article{Key1998, 202 | title = {Tools for atmospheric radiative transfer: Streamer and FluxNet}, 203 | journal = {Computers & Geosciences}, 204 | volume = {24}, 205 | number = {5}, 206 | pages = {443-451}, 207 | year = {1998}, 208 | issn = {0098-3004}, 209 | doi = {10.1016/S0098-3004(97)00130-1}, 210 | url = {https://www.sciencedirect.com/science/article/pii/S0098300497001301}, 211 | author = {Jeffrey R Key and Axel J Schweiger}, 212 | keywords = {Radiative transfer models, Atmospheric processes, Radiation budget, Satellites}, 213 | abstract = {Two tools for the solution of radiative transfer problems are presented. Streamer is a flexible medium spectral resolution radiative transfer model based on the plane-parallel theory of radiative transfer. Capable of computing either fluxes or radiances, it is suitable for studying radiative processes at the surface or within the atmosphere and for the development of remote-sensing algorithms. FluxNet is a fast neural network-based implementation of Streamer for computing surface fluxes. It allows for a sophisticated treatment of radiative processes in the analysis of large data sets and potential integration into geophysical models where computational efficiency is an issue. Documentation and tools for the development of alternative versions of FluxNet are available. Collectively, Streamer and FluxNet solve a wide variety of problems related to radiative transfer: Streamer provides the detail and sophistication needed to perform basic research on most aspects of complex radiative processes, whereas the efficiency and simplicity of FluxNet make it ideal for operational use.} 214 | } 215 | 216 | @article {Ric1998, 217 | author = "Paul Ricchiazzi and Shiren Yang and Catherine Gautier and David Sowle", 218 | title = "SBDART: A Research and Teaching Software Tool for Plane-Parallel Radiative Transfer in the Earth's Atmosphere", 219 | journal = "Bulletin of the American Meteorological Society", 220 | year = "1998", 221 | publisher = "American Meteorological Society", 222 | address = "Boston MA, USA", 223 | volume = "79", 224 | number = "10", 225 | doi = "10.1175/1520-0477(1998)079<2101:SARATS>2.0.CO;2", 226 | pages= "2101 - 2114", 227 | url = "https://journals.ametsoc.org/view/journals/bams/79/10/1520-0477_1998_079_2101_sarats_2_0_co_2.xml" 228 | } 229 | 230 | @article{Syk1951, 231 | author = {Sykes, J. B.}, 232 | title = "{Approximate Integration of the Equation of Transfer}", 233 | journal = {Monthly Notices of the Royal Astronomical Society}, 234 | volume = {111}, 235 | number = {4}, 236 | pages = {377-386}, 237 | year = {1951}, 238 | month = {08}, 239 | abstract = "{The value of numerical integration in obtaining approximate solutions of an equation of transfer, and the different methods at our disposal, are discussed. It is shown that although the Newton-Cotes method, used by Kourganoff, is better than the Gauss method, used by Chandrasekhar, both are inferior to a new method, the double-Gauss, discovered by the author. The errors in the approximate values of the source-function and the limb-darkening in all three methods are tabulated for various approximations, and illustrated by graphs.}", 240 | issn = {0035-8711}, 241 | doi = {10.1093/mnras/111.4.377}, 242 | url = {https://doi.org/10.1093/mnras/111.4.377}, 243 | eprint = {https://academic.oup.com/mnras/article-pdf/111/4/377/8077435/mnras111-0377.pdf}, 244 | } -------------------------------------------------------------------------------- /docs/JOSS_Paper/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'PythonicDISORT: A Python reimplementation of the Discrete Ordinate Radiative Transfer package DISORT' 3 | tags: 4 | - Python 5 | - Radiative Transfer 6 | - Discrete Ordinates Method 7 | - Atmospheric Science 8 | - Climate Models 9 | - DISORT 10 | authors: 11 | - name: Dion J. X. Ho 12 | orcid: 0009-0000-5829-5081 13 | affiliation: "1" 14 | affiliations: 15 | - name: Columbia University, Department of Applied Physics and Applied Mathematics, United States of America 16 | index: 1 17 | date: 11 February 2024 18 | bibliography: paper.bib 19 | --- 20 | 21 | 59 | 60 | # Summary 61 | 70 | 71 | The Radiative Transfer Equation (RTE) models the processes of absorption, scattering and emission 72 | as electromagnetic radiation propagates through a medium. 73 | Consider a plane-parallel, horizontally homogeneous atmosphere with vertical coordinate 74 | $\tau$ (optical depth) increasing from top to bottom, directional coordinates $\phi$ for the azimuthal angle (positive is counterclockwise), 75 | and $\mu=\cos\theta$ for the polar direction ($\theta$ is the polar angle measured from the surface normal) 76 | with $\mu > 0$ pointing up following the convention of @STWJ1988. 77 | Given three possible sources, namely blackbody emission from the atmosphere $s(\tau)$, 78 | scattering from a collimated beam of starlight with intensity $I_0$ as well as incident azimuthal and cosine polar angles $\phi_0$ and $\mu_0$, respectively, 79 | and radiation from other atmospheric layers or the Earth's surface which is modeled by Dirichlet boundary conditions, 80 | the diffuse intensity $u(\tau, \mu, \phi)$ propagating in direction $(\mu, \phi)$ 81 | is described by the 1D RTE [@Cha1960; @STWJ1988] 82 | 83 | \begin{align} 84 | \begin{split} 85 | \mu \frac{\partial u(\tau, \mu, \phi)}{\partial \tau} = u(\tau, \mu, \phi) &-\frac{\omega}{4 \pi} \int_{-1}^{1} \int_{0}^{2 \pi} p\left(\mu, \phi ; \mu', \phi'\right) u\left(\tau, \mu', \phi'\right) \mathrm{d} \phi' \mathrm{d} \mu' \\ 86 | &-\frac{\omega I_0}{4 \pi} p\left(\mu, \phi ;-\mu_{0}, \phi_{0}\right) \exp\left(-\mu_{0}^{-1} \tau\right) - s(\tau) 87 | \end{split} \label{RTE} 88 | \end{align} 89 | 90 | Here $\omega$ is the single-scattering albedo and $p$ the scattering phase function. 91 | These are assumed to be independent of $\tau$, i.e. homogeneous in the atmospheric layer. 92 | An atmosphere with $\tau$-dependent $\omega$ and $p$ can be modeled by 93 | a multi-layer atmosphere with different $\omega$ and $p$ for each layer. 94 | 95 | The RTE is important in many fields of science and engineering, 96 | for example, in the retrieval of optical properties of the medium from measurements [@TCCGL1999; @MRO/CRISM2008; @TLZWSY2020]. 97 | The gold standard for numerically solving the 1D RTE is the Discrete Ordinate Radiative Transfer 98 | package `DISORT` which was written in FORTRAN 77 and first released in 1988 [@STWJ1988; @Sta1999]. 99 | It has been widely used, for example by `MODTRAN` [@Ber2014], `Streamer` [@Key1998], and `SBDART` [@Ric1998], 100 | all of which are comprehensive radiative transfer models that are themselves widely used in atmospheric science, 101 | and by the three retrieval papers: @TCCGL1999, @MRO/CRISM2008, and @TLZWSY2020. 102 | `DISORT` implements the Discrete Ordinates Method which has two key steps. 103 | First, the diffuse intensity function $u$ and phase function $p$ are expanded as the Fourier cosine series and Legendre series, respectively, 104 | 105 | $$ 106 | \begin{aligned} 107 | u\left(\tau, \mu, \phi\right) &\approx \sum_{m=0} u^m\left(\tau, \mu\right)\cos\left(m\left(\phi_0 - \phi\right)\right) \\ 108 | p\left(\mu, \phi ; \mu', \phi'\right) = p\left(\cos\gamma\right) &\approx \sum_{\ell=0} (2\ell + 1) g_\ell P_\ell\left(\cos\gamma\right) 109 | \end{aligned} 110 | $$ 111 | 112 | where $\gamma$ is the scattering angle, $g_\ell$ is the $\ell$th Legendre coefficient of the phase function $p$, 113 | and $P_\ell$ is the Legendre polynomial of order $\ell$. 114 | These address the $\phi'$ integral in (\ref{RTE}) and decompose the problem into solving the equation 115 | 116 | $$ 117 | \mu \frac{\partial u^m(\tau, \mu)}{\partial \tau}=u^m(\tau, \mu)-\int_{-1}^1 D^m\left(\mu, \mu'\right) u^m\left(\tau, \mu'\right) \mathrm{d} \mu' - Q^m(\tau, \mu) - \delta_{0m}s(\tau) 118 | $$ 119 | 120 | for each Fourier mode of $u$. The $D^m$ terms are derived from $p$ 121 | and are thus also independent of $\tau$. The $Q^m$ terms are derived from the direct beam source. 122 | The second key step is to discretize the $\mu'$ integral using a quadrature scheme. 123 | `DISORT` uses the double-Gauss quadrature scheme from @Syk1951. 124 | This results in a system of ordinary differential equations that can be solved using standard methods, 125 | and post-hoc corrections are made to reduce the errors incurred 126 | by the truncation of the phase function Legendre series [@Wis1977; @NT1988]. 127 | 128 | My package `PythonicDISORT` is a Python 3 reimplementation of `DISORT` that replicates 129 | most of its functionality while being easier to install, use and modify, 130 | though at the cost of computational speed. It has `DISORT`'s main features: 131 | multi-layer solver, delta-$M$ scaling, Nakajima-Tanaka corrections, only flux option, 132 | direct beam source, isotropic internal source (blackbody emission), Dirichlet boundary conditions 133 | (diffuse flux boundary sources), Bi-Directional Reflectance Function 134 | for surface reflection, as well as additional features like actinic flux computation 135 | and integration of the solution functions with respect to optical depth. 136 | `PythonicDISORT` has been tested against `DISORT` on `DISORT`'s own test problems. While packages 137 | that wrap `DISORT` in Python already exist [@CM2020; @Hu2017], 138 | `PythonicDISORT` is the first reimplementation of `DISORT` from scratch in Python. 139 | 140 | # Statement of need 141 | 142 | `PythonicDISORT` is not meant to replace `DISORT`. Due to fundamental 143 | differences between Python and FORTRAN, `PythonicDISORT`, though quite optimized, 144 | remains slower than `DISORT`. Thus, projects that 145 | prioritize computational speed should still use `DISORT`. 146 | In addition, `PythonicDISORT` currently lacks `DISORT`'s latest features, 147 | most notably its pseudo-spherical correction. 148 | 149 | `PythonicDISORT` is instead designed with three goals in mind. 150 | First, it is meant to be a pedagogical and exploratory tool. 151 | `PythonicDISORT`'s ease of installation and use makes it a low-barrier 152 | introduction to Radiative Transfer and Discrete Ordinates Solvers. 153 | Even researchers who are experienced in the field may find it useful to experiment 154 | with `PythonicDISORT` before upscaling with `DISORT`. 155 | Installation of `PythonicDISORT` through `pip` should be system agnostic 156 | as `PythonicDISORT`'s core dependencies are only `NumPy` [@NumPy] and `SciPy` [@SciPy]. 157 | In addition, using `PythonicDISORT` is as simple as calling the Python function `pydisort`. In contrast, 158 | `DISORT` requires FORTRAN compilers and manual memory allocation, has a lengthy and system-dependent 159 | installation, and each call requires a shell script for compilation and execution. 160 | 161 | Second, `PythonicDISORT` is designed to be modified by users to suit their needs. 162 | Given that Python is a widely used high-level language, `PythonicDISORT`'s 163 | code should be accessible to more people than `DISORT`'s FORTRAN code. 164 | Moreover, `PythonicDISORT` comes with a Jupyter Notebook [@JupyterNotebook] -- 165 | its [*Comprehensive Documentation*](https://pythonic-disort.readthedocs.io/en/latest/Pythonic-DISORT.html) -- 166 | that breaks down both the mathematics and code behind the solver. 167 | Users can in theory follow the Notebook to recode `PythonicDISORT` from scratch; 168 | it should at least help them make modifications. 169 | 170 | Third, `PythonicDISORT` is intended to be a testbed. 171 | For the same reasons given above, it should be easier 172 | to implement and test experimental features in `PythonicDISORT` than in `DISORT`. 173 | This should expedite research and development for `DISORT` and similar algorithms. 174 | 175 | `PythonicDISORT` was first released on [PyPI](https://pypi.org/project/PythonicDISORT/) 176 | and [GitHub](https://github.com/LDEO-CREW/Pythonic-DISORT) on May 30, 2023. 177 | It was used in @HP2024 and is being used in at least three ongoing projects: 178 | on the Two-Stream Approximations, on atmospheric photolysis, 179 | and on the topographic mapping of Mars through photoclinometry. 180 | I will continue to maintain and upgrade `PythonicDISORT`. The latest version: 181 | `PythonicDISORT v0.9.3` was released on October 13, 2024. 182 | 183 | # Acknowledgements 184 | 185 | I acknowledge funding from NSF through the Learning the Earth with Artificial intelligence and Physics (LEAP) 186 | Science and Technology Center (STC) (Award #2019625). I am also grateful to my Columbia University PhD advisor 187 | Dr. Robert Pincus and co-advisor Dr. Kui Ren for their advice and contributions. Finally, I would like to thank three reviewers and the 188 | associate editor for their helpful feedback. 189 | 190 | # References -------------------------------------------------------------------------------- /docs/JOSS_Paper/sample.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'Gala: A Python package for galactic dynamics' 3 | tags: 4 | - Python 5 | - astronomy 6 | - dynamics 7 | - galactic dynamics 8 | - milky way 9 | authors: 10 | - name: Adrian M. Price-Whelan 11 | orcid: 0000-0000-0000-0000 12 | equal-contrib: true 13 | affiliation: "1, 2" # (Multiple affiliations must be quoted) 14 | - name: Author Without ORCID 15 | equal-contrib: true # (This is how you can denote equal contributions between multiple authors) 16 | affiliation: 2 17 | - name: Author with no affiliation 18 | corresponding: true # (This is how to denote the corresponding author) 19 | affiliation: 3 20 | - given-names: Ludwig 21 | dropping-particle: van 22 | surname: Beethoven 23 | affiliation: 3 24 | affiliations: 25 | - name: Lyman Spitzer, Jr. Fellow, Princeton University, USA 26 | index: 1 27 | - name: Institution Name, Country 28 | index: 2 29 | - name: Independent Researcher, Country 30 | index: 3 31 | date: 13 August 2017 32 | bibliography: paper.bib 33 | 34 | # Optional fields if submitting to a AAS journal too, see this blog post: 35 | # https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing 36 | aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it. 37 | aas-journal: Astrophysical Journal <- The name of the AAS journal. 38 | --- 39 | 40 | # Summary 41 | 42 | The forces on stars, galaxies, and dark matter under external gravitational 43 | fields lead to the dynamical evolution of structures in the universe. The orbits 44 | of these bodies are therefore key to understanding the formation, history, and 45 | future state of galaxies. The field of "galactic dynamics," which aims to model 46 | the gravitating components of galaxies to study their structure and evolution, 47 | is now well-established, commonly taught, and frequently used in astronomy. 48 | Aside from toy problems and demonstrations, the majority of problems require 49 | efficient numerical tools, many of which require the same base code (e.g., for 50 | performing numerical orbit integration). 51 | 52 | # Statement of need 53 | 54 | `Gala` is an Astropy-affiliated Python package for galactic dynamics. Python 55 | enables wrapping low-level languages (e.g., C) for speed without losing 56 | flexibility or ease-of-use in the user-interface. The API for `Gala` was 57 | designed to provide a class-based and user-friendly interface to fast (C or 58 | Cython-optimized) implementations of common operations such as gravitational 59 | potential and force evaluation, orbit integration, dynamical transformations, 60 | and chaos indicators for nonlinear dynamics. `Gala` also relies heavily on and 61 | interfaces well with the implementations of physical units and astronomical 62 | coordinate systems in the `Astropy` package [@astropy] (`astropy.units` and 63 | `astropy.coordinates`). 64 | 65 | `Gala` was designed to be used by both astronomical researchers and by 66 | students in courses on gravitational dynamics or astronomy. It has already been 67 | used in a number of scientific publications [@Pearson:2017] and has also been 68 | used in graduate courses on Galactic dynamics to, e.g., provide interactive 69 | visualizations of textbook material [@Binney:2008]. The combination of speed, 70 | design, and support for Astropy functionality in `Gala` will enable exciting 71 | scientific explorations of forthcoming data releases from the *Gaia* mission 72 | [@gaia] by students and experts alike. 73 | 74 | # Mathematics 75 | 76 | Single dollars ($) are required for inline mathematics e.g. $f(x) = e^{\pi/x}$ 77 | 78 | Double dollars make self-standing equations: 79 | 80 | $$\Theta(x) = \left\{\begin{array}{l} 81 | 0\textrm{ if } x < 0\cr 82 | 1\textrm{ else} 83 | \end{array}\right.$$ 84 | 85 | You can also use plain \LaTeX for equations 86 | \begin{equation}\label{eq:fourier} 87 | \hat f(\omega) = \int_{-\infty}^{\infty} f(x) e^{i\omega x} dx 88 | \end{equation} 89 | and refer to \autoref{eq:fourier} from text. 90 | 91 | # Citations 92 | 93 | Citations to entries in paper.bib should be in 94 | [rMarkdown](http://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html) 95 | format. 96 | 97 | If you want to cite a software repository URL (e.g. something on GitHub without a preferred 98 | citation) then you can do it with the example BibTeX entry below for @fidgit. 99 | 100 | For a quick reference, the following citation commands can be used: 101 | - `@author:2001` -> "Author et al. (2001)" 102 | - `[@author:2001]` -> "(Author et al., 2001)" 103 | - `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)" 104 | 105 | # Figures 106 | 107 | Figures can be included like this: 108 | ![Caption for example figure.\label{fig:example}](figure.png) 109 | and referenced from text using \autoref{fig:example}. 110 | 111 | Figure sizes can be customized by adding an optional second parameter: 112 | ![Caption for example figure.](figure.png){ width=20% } 113 | 114 | # Acknowledgements 115 | 116 | We acknowledge contributions from Brigitta Sipocz, Syrtis Major, and Semyeong 117 | Oh, and support from Kathryn Johnston during the genesis of this project. 118 | 119 | # References -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | project = 'Pythonic DISORT' 10 | copyright = '2023, HO Jia Xu Dion' 11 | author = 'Dion HO Jia Xu' 12 | release = '1.1' 13 | 14 | # -- General configuration --------------------------------------------------- 15 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 16 | 17 | import sys, os 18 | 19 | sys.path.append(os.path.abspath('../src')) 20 | 21 | extensions = [ 22 | 'sphinx.ext.napoleon', 23 | 'nbsphinx' 24 | ] 25 | 26 | napoleon_google_docstring = False 27 | napoleon_numpy_docstring = True 28 | napoleon_use_param = False 29 | napoleon_use_ivar = True 30 | 31 | templates_path = ['_templates'] 32 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints'] 33 | 34 | 35 | 36 | # -- Options for HTML output ------------------------------------------------- 37 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 38 | 39 | 40 | html_theme = 'python_docs_theme' 41 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. PythonicDISORT documentation master file, created by 2 | sphinx-quickstart on Tue Apr 18 10:49:27 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Documentation for PythonicDISORT package 7 | ======================================== 8 | Installation: `pip install PythonicDISORT` 9 | 10 | Modules (Basic Documentation) 11 | ============================= 12 | .. toctree:: 13 | :maxdepth: 4 14 | 15 | source/PythonicDISORT 16 | 17 | Jupyter Notebook (Comprehensive Documentation) 18 | ============================================== 19 | It is highly recommended that new users read the non-optional parts of sections 1 and 2. 20 | 21 | .. toctree:: 22 | :maxdepth: 4 23 | 24 | Pythonic-DISORT 25 | 26 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements for ReadTheDocs, not the PythonicDISORT package 2 | 3 | numpy 4 | scipy 5 | joblib 6 | autograd 7 | ipython 8 | matplotlib 9 | sphinx 10 | nbsphinx 11 | python_docs_theme 12 | 13 | -------------------------------------------------------------------------------- /docs/section6_testresults1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/docs/section6_testresults1.npz -------------------------------------------------------------------------------- /docs/section6_testresults2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/docs/section6_testresults2.npz -------------------------------------------------------------------------------- /docs/source/PythonicDISORT.rst: -------------------------------------------------------------------------------- 1 | PythonicDISORT package 2 | ====================== 3 | 4 | PythonicDISORT.pydisort module 5 | ------------------------------ 6 | 7 | .. automodule:: PythonicDISORT.pydisort 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | PythonicDISORT.subroutines module 13 | --------------------------------- 14 | 15 | .. automodule:: PythonicDISORT.subroutines 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /pydisotest/11_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 11: Single-Layer vs. Multiple Layers 8 | # ====================================================================================================== 9 | 10 | def test_11a(): 11 | ######################################### PYDISORT ARGUMENTS ####################################### 12 | tau_arr = np.arange(16) / 2 + 0.5 13 | NLayers = len(tau_arr) 14 | omega_arr = np.full(NLayers, 1 - 1e-6) 15 | NQuad = 16 16 | Leg_coeffs_all = np.tile(0.75 ** np.arange(32), (NLayers, 1)) 17 | mu0 = 0.6 18 | I0 = pi / mu0 19 | phi0 = 0.9 * pi 20 | 21 | # Optional (used) 22 | f_arr = np.repeat(Leg_coeffs_all[0, NQuad], NLayers) 23 | NT_cor = True 24 | b_neg=1 25 | b_pos=1 26 | BDRF_Fourier_modes=[lambda mu, neg_mup: np.full((len(mu), len(neg_mup)), 1)] 27 | s_poly_coeffs=np.tile(np.array([1, 1]), (NLayers, 1)) 28 | 29 | # Optional (unused) 30 | NLeg=None 31 | NFourier=None 32 | only_flux=False 33 | use_banded_solver_NLayers=10 34 | autograd_compatible=False 35 | 36 | #################################################################################################### 37 | 38 | # Test points 39 | Nphi = int((NQuad * pi) // 2) * 2 + 1 40 | phi_arr, full_weights_phi = PythonicDISORT.subroutines.Clenshaw_Curtis_quad(Nphi) 41 | Ntau = 100 42 | tau_test_arr = np.sort(np.random.random(Ntau) * tau_arr[-1]) 43 | 44 | # Call pydisort function 45 | flux_up_1layer, flux_down_1layer, u0, u_1layer = PythonicDISORT.pydisort( 46 | tau_arr[-1], omega_arr[0], 47 | NQuad, 48 | Leg_coeffs_all[0, :], 49 | mu0, I0, phi0, 50 | b_pos=b_pos, 51 | b_neg=b_neg, 52 | f_arr=f_arr[0], 53 | BDRF_Fourier_modes=BDRF_Fourier_modes, 54 | s_poly_coeffs=s_poly_coeffs[0, :], 55 | NT_cor=True, 56 | )[1:] 57 | 58 | flux_up_16layers, flux_down_16layers, u0, u_16layers = PythonicDISORT.pydisort( 59 | tau_arr, omega_arr, 60 | NQuad, 61 | Leg_coeffs_all, 62 | mu0, I0, phi0, 63 | b_pos=b_pos, 64 | b_neg=b_neg, 65 | f_arr=f_arr, 66 | BDRF_Fourier_modes=BDRF_Fourier_modes, 67 | s_poly_coeffs=s_poly_coeffs, 68 | NT_cor=True, 69 | )[1:] 70 | 71 | assert np.allclose(flux_up_1layer(tau_test_arr), flux_up_16layers(tau_test_arr)) 72 | assert np.allclose(flux_down_1layer(tau_test_arr), flux_down_16layers(tau_test_arr)) 73 | assert np.allclose(u_1layer(tau_test_arr, phi_arr), u_16layers(tau_test_arr, phi_arr)) 74 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/1_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 1: Isotropic Scattering 8 | # ====================================================================================================== 9 | 10 | def test_1a(): 11 | print() 12 | print("################################################ Test 1a ##################################################") 13 | print() 14 | ######################################### PYDISORT ARGUMENTS ####################################### 15 | 16 | tau_arr = 0.03125 17 | omega_arr = 0.2 18 | NQuad = 16 19 | Leg_coeffs_all = np.zeros(17) 20 | Leg_coeffs_all[0] = 1 21 | mu0 = 0.1 22 | I0 = pi / mu0 23 | phi0 = pi 24 | 25 | # Optional (used) 26 | 27 | # Optional (unused) 28 | NLeg=None 29 | NFourier=None 30 | b_pos=0 31 | b_neg=0 32 | only_flux=False 33 | f_arr=0 34 | NT_cor=False 35 | BDRF_Fourier_modes=[] 36 | s_poly_coeffs=np.array([[]]) 37 | use_banded_solver_NLayers=10 38 | autograd_compatible=False 39 | 40 | #################################################################################################### 41 | 42 | # Call pydisort function 43 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 44 | tau_arr, omega_arr, 45 | NQuad, 46 | Leg_coeffs_all, 47 | mu0, I0, phi0, 48 | ) 49 | 50 | # mu_arr is arranged as it is for code efficiency and readability 51 | # For presentation purposes we re-arrange mu_arr from smallest to largest 52 | reorder_mu = np.argsort(mu_arr) 53 | mu_arr_RO = mu_arr[reorder_mu] 54 | 55 | # We may not want to compare intensities around the direct beam 56 | deg_around_beam_to_not_compare = 0 57 | mu_to_compare = ( 58 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 59 | > deg_around_beam_to_not_compare 60 | ) 61 | 62 | 63 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 64 | results = np.load("Stamnes_results/1a_test.npz") 65 | 66 | # Perform the comparisons 67 | ( 68 | diff_flux_up, 69 | ratio_flux_up, 70 | diff_flux_down_diffuse, 71 | ratio_flux_down_diffuse, 72 | diff_flux_down_direct, 73 | ratio_flux_down_direct, 74 | diff, 75 | diff_ratio, 76 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 77 | 78 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 79 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 80 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 81 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 82 | # -------------------------------------------------------------------------------------------------- 83 | 84 | 85 | def test_1b(): 86 | print() 87 | print("################################################ Test 1b ##################################################") 88 | print() 89 | ######################################### PYDISORT ARGUMENTS ####################################### 90 | 91 | tau_arr = 0.03125 92 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 93 | NQuad = 16 94 | Leg_coeffs_all = np.zeros(17) 95 | Leg_coeffs_all[0] = 1 96 | mu0 = 0.1 97 | I0 = pi / mu0 98 | phi0 = pi 99 | 100 | # Optional (used) 101 | 102 | # Optional (unused) 103 | NLeg=None 104 | NFourier=None 105 | b_pos=0 106 | b_neg=0 107 | only_flux=False 108 | f_arr=0 109 | NT_cor=False 110 | BDRF_Fourier_modes=[] 111 | s_poly_coeffs=np.array([[]]) 112 | use_banded_solver_NLayers=10 113 | autograd_compatible=False 114 | autograd_compatible=False 115 | 116 | #################################################################################################### 117 | 118 | # Call pydisort function 119 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 120 | tau_arr, omega_arr, 121 | NQuad, 122 | Leg_coeffs_all, 123 | mu0, I0, phi0, 124 | ) 125 | 126 | # mu_arr is arranged as it is for code efficiency and readability 127 | # For presentation purposes we re-arrange mu_arr from smallest to largest 128 | reorder_mu = np.argsort(mu_arr) 129 | mu_arr_RO = mu_arr[reorder_mu] 130 | 131 | # We may not want to compare intensities around the direct beam 132 | deg_around_beam_to_not_compare = 0 133 | mu_to_compare = ( 134 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 135 | > deg_around_beam_to_not_compare 136 | ) 137 | 138 | 139 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 140 | results = np.load("Stamnes_results/1b_test.npz") 141 | 142 | # Perform the comparisons 143 | ( 144 | diff_flux_up, 145 | ratio_flux_up, 146 | diff_flux_down_diffuse, 147 | ratio_flux_down_diffuse, 148 | diff_flux_down_direct, 149 | ratio_flux_down_direct, 150 | diff, 151 | diff_ratio, 152 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 153 | 154 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 155 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 156 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 157 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 158 | # -------------------------------------------------------------------------------------------------- 159 | 160 | 161 | def test_1c(): 162 | print() 163 | print("################################################ Test 1c ##################################################") 164 | print() 165 | ######################################### PYDISORT ARGUMENTS ####################################### 166 | 167 | tau_arr = 0.03125 168 | omega_arr = 0.99 169 | NQuad = 16 170 | Leg_coeffs_all = np.zeros(17) 171 | Leg_coeffs_all[0] = 1 172 | mu0 = 0.1 173 | I0 = pi / mu0 174 | phi0 = pi 175 | 176 | # Optional (used) 177 | 178 | # Optional (unused) 179 | NLeg=None 180 | NFourier=None 181 | b_pos=0 182 | b_neg=0 183 | only_flux=False 184 | f_arr=0 185 | NT_cor=False 186 | BDRF_Fourier_modes=[] 187 | s_poly_coeffs=np.array([[]]) 188 | use_banded_solver_NLayers=10 189 | autograd_compatible=False 190 | 191 | #################################################################################################### 192 | 193 | # Call pydisort function 194 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 195 | tau_arr, omega_arr, 196 | NQuad, 197 | Leg_coeffs_all, 198 | mu0, I0, phi0, 199 | ) 200 | 201 | # mu_arr is arranged as it is for code efficiency and readability 202 | # For presentation purposes we re-arrange mu_arr from smallest to largest 203 | reorder_mu = np.argsort(mu_arr) 204 | mu_arr_RO = mu_arr[reorder_mu] 205 | 206 | # We may not want to compare intensities around the direct beam 207 | deg_around_beam_to_not_compare = 0 208 | mu_to_compare = ( 209 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 210 | > deg_around_beam_to_not_compare 211 | ) 212 | 213 | 214 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 215 | results = np.load("Stamnes_results/1c_test.npz") 216 | 217 | # Perform the comparisons 218 | ( 219 | diff_flux_up, 220 | ratio_flux_up, 221 | diff_flux_down_diffuse, 222 | ratio_flux_down_diffuse, 223 | diff_flux_down_direct, 224 | ratio_flux_down_direct, 225 | diff, 226 | diff_ratio, 227 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 228 | 229 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 230 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 231 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 232 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 233 | # -------------------------------------------------------------------------------------------------- 234 | 235 | 236 | def test_1d(): 237 | print() 238 | print("################################################ Test 1d ##################################################") 239 | print() 240 | ######################################### PYDISORT ARGUMENTS ####################################### 241 | 242 | tau_arr = 32 243 | omega_arr = 0.2 244 | NQuad = 16 245 | Leg_coeffs_all = np.zeros(17) 246 | Leg_coeffs_all[0] = 1 247 | mu0 = 0.1 248 | I0 = pi / mu0 249 | phi0 = pi 250 | 251 | # Optional (used) 252 | 253 | # Optional (unused) 254 | NLeg=None 255 | NFourier=None 256 | b_pos=0 257 | b_neg=0 258 | only_flux=False 259 | f_arr=0 260 | NT_cor=False 261 | BDRF_Fourier_modes=[] 262 | s_poly_coeffs=np.array([[]]) 263 | use_banded_solver_NLayers=10 264 | autograd_compatible=False 265 | 266 | #################################################################################################### 267 | 268 | # Call pydisort function 269 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 270 | tau_arr, omega_arr, 271 | NQuad, 272 | Leg_coeffs_all, 273 | mu0, I0, phi0, 274 | ) 275 | 276 | # mu_arr is arranged as it is for code efficiency and readability 277 | # For presentation purposes we re-arrange mu_arr from smallest to largest 278 | reorder_mu = np.argsort(mu_arr) 279 | mu_arr_RO = mu_arr[reorder_mu] 280 | 281 | # We may not want to compare intensities around the direct beam 282 | deg_around_beam_to_not_compare = 0 283 | mu_to_compare = ( 284 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 285 | > deg_around_beam_to_not_compare 286 | ) 287 | 288 | 289 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 290 | results = np.load("Stamnes_results/1d_test.npz") 291 | 292 | # Perform the comparisons 293 | ( 294 | diff_flux_up, 295 | ratio_flux_up, 296 | diff_flux_down_diffuse, 297 | ratio_flux_down_diffuse, 298 | diff_flux_down_direct, 299 | ratio_flux_down_direct, 300 | diff, 301 | diff_ratio, 302 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 303 | 304 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 305 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 306 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 307 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 308 | # -------------------------------------------------------------------------------------------------- 309 | 310 | def test_1e(): 311 | print() 312 | print("################################################ Test 1e ##################################################") 313 | print() 314 | ######################################### PYDISORT ARGUMENTS ####################################### 315 | 316 | tau_arr = 32 317 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 318 | NQuad = 16 319 | Leg_coeffs_all = np.zeros(17) 320 | Leg_coeffs_all[0] = 1 321 | mu0 = 0.1 322 | I0 = pi / mu0 323 | phi0 = pi 324 | 325 | # Optional (used) 326 | 327 | # Optional (unused) 328 | NLeg=None 329 | NFourier=None 330 | b_pos=0 331 | b_neg=0 332 | only_flux=False 333 | f_arr=0 334 | NT_cor=False 335 | BDRF_Fourier_modes=[] 336 | s_poly_coeffs=np.array([[]]) 337 | use_banded_solver_NLayers=10 338 | autograd_compatible=False 339 | 340 | #################################################################################################### 341 | 342 | # Call pydisort function 343 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 344 | tau_arr, omega_arr, 345 | NQuad, 346 | Leg_coeffs_all, 347 | mu0, I0, phi0, 348 | ) 349 | 350 | # mu_arr is arranged as it is for code efficiency and readability 351 | # For presentation purposes we re-arrange mu_arr from smallest to largest 352 | reorder_mu = np.argsort(mu_arr) 353 | mu_arr_RO = mu_arr[reorder_mu] 354 | 355 | # We may not want to compare intensities around the direct beam 356 | deg_around_beam_to_not_compare = 0 357 | mu_to_compare = ( 358 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 359 | > deg_around_beam_to_not_compare 360 | ) 361 | 362 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 363 | results = np.load("Stamnes_results/1e_test.npz") 364 | 365 | # Perform the comparisons 366 | ( 367 | diff_flux_up, 368 | ratio_flux_up, 369 | diff_flux_down_diffuse, 370 | ratio_flux_down_diffuse, 371 | diff_flux_down_direct, 372 | ratio_flux_down_direct, 373 | diff, 374 | diff_ratio, 375 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 376 | 377 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 378 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 379 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 380 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 381 | # -------------------------------------------------------------------------------------------------- 382 | 383 | 384 | def test_1f(): 385 | print() 386 | print("################################################ Test 1f ##################################################") 387 | print() 388 | ######################################### PYDISORT ARGUMENTS ####################################### 389 | 390 | tau_arr = 32 391 | omega_arr = 0.99 392 | NQuad = 16 393 | Leg_coeffs_all = np.zeros(17) 394 | Leg_coeffs_all[0] = 1 395 | mu0 = 0.1 396 | I0 = pi / mu0 397 | phi0 = pi 398 | 399 | # Optional (used) 400 | 401 | # Optional (unused) 402 | NLeg=None 403 | NFourier=None 404 | b_pos=0 405 | b_neg=0 406 | only_flux=False 407 | f_arr=0 408 | NT_cor=False 409 | BDRF_Fourier_modes=[] 410 | s_poly_coeffs=np.array([[]]) 411 | use_banded_solver_NLayers=10 412 | autograd_compatible=False 413 | 414 | #################################################################################################### 415 | 416 | # Call pydisort function 417 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 418 | tau_arr, omega_arr, 419 | NQuad, 420 | Leg_coeffs_all, 421 | mu0, I0, phi0, 422 | ) 423 | 424 | # mu_arr is arranged as it is for code efficiency and readability 425 | # For presentation purposes we re-arrange mu_arr from smallest to largest 426 | reorder_mu = np.argsort(mu_arr) 427 | mu_arr_RO = mu_arr[reorder_mu] 428 | 429 | # We may not want to compare intensities around the direct beam 430 | deg_around_beam_to_not_compare = 0 431 | mu_to_compare = ( 432 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 433 | > deg_around_beam_to_not_compare 434 | ) 435 | 436 | 437 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 438 | results = np.load("Stamnes_results/1f_test.npz") 439 | 440 | # Perform the comparisons 441 | ( 442 | diff_flux_up, 443 | ratio_flux_up, 444 | diff_flux_down_diffuse, 445 | ratio_flux_down_diffuse, 446 | diff_flux_down_direct, 447 | ratio_flux_down_direct, 448 | diff, 449 | diff_ratio, 450 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 451 | 452 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 453 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 454 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 455 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 456 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/2_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 2: Rayleigh Scattering, Beam Source 8 | # ====================================================================================================== 9 | 10 | def test_2a(): 11 | print() 12 | print("################################################ Test 2a ##################################################") 13 | print() 14 | ######################################### PYDISORT ARGUMENTS ####################################### 15 | 16 | tau_arr = 0.2 17 | omega_arr = 0.5 18 | NQuad = 16 19 | Leg_coeffs_all = np.zeros(17) 20 | Leg_coeffs_all[0] = 1 21 | Leg_coeffs_all[2] = 0.1 22 | mu0 = 0.080442 23 | I0 = pi 24 | phi0 = pi 25 | 26 | # Optional (used) 27 | 28 | # Optional (unused) 29 | NLeg=None 30 | NFourier=None 31 | b_pos=0 32 | b_neg=0 33 | only_flux=False 34 | f_arr=0 35 | NT_cor=False 36 | BDRF_Fourier_modes=[] 37 | s_poly_coeffs=np.array([[]]) 38 | use_banded_solver_NLayers=10 39 | autograd_compatible=False 40 | 41 | #################################################################################################### 42 | 43 | # Call pydisort function 44 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 45 | tau_arr, omega_arr, 46 | NQuad, 47 | Leg_coeffs_all, 48 | mu0, I0, phi0, 49 | ) 50 | 51 | # mu_arr is arranged as it is for code efficiency and readability 52 | # For presentation purposes we re-arrange mu_arr from smallest to largest 53 | reorder_mu = np.argsort(mu_arr) 54 | mu_arr_RO = mu_arr[reorder_mu] 55 | 56 | # We may not want to compare intensities around the direct beam 57 | deg_around_beam_to_not_compare = 0 58 | mu_to_compare = ( 59 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 60 | > deg_around_beam_to_not_compare 61 | ) 62 | 63 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 64 | results = np.load("Stamnes_results/2a_test.npz") 65 | 66 | # Perform the comparisons 67 | ( 68 | diff_flux_up, 69 | ratio_flux_up, 70 | diff_flux_down_diffuse, 71 | ratio_flux_down_diffuse, 72 | diff_flux_down_direct, 73 | ratio_flux_down_direct, 74 | diff, 75 | diff_ratio, 76 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 77 | 78 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 79 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 80 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 81 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 82 | # -------------------------------------------------------------------------------------------------- 83 | 84 | 85 | def test_2b(): 86 | print() 87 | print("################################################ Test 2b ##################################################") 88 | print() 89 | ######################################### PYDISORT ARGUMENTS ####################################### 90 | 91 | tau_arr = 0.2 92 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 93 | NQuad = 16 94 | Leg_coeffs_all = np.zeros(17) 95 | Leg_coeffs_all[0] = 1 96 | Leg_coeffs_all[2] = 0.1 97 | mu0 = 0.080442 98 | I0 = pi 99 | phi0 = pi 100 | 101 | # Optional (used) 102 | 103 | # Optional (unused) 104 | NLeg=None 105 | NFourier=None 106 | b_pos=0 107 | b_neg=0 108 | only_flux=False 109 | f_arr=0 110 | NT_cor=False 111 | BDRF_Fourier_modes=[] 112 | s_poly_coeffs=np.array([[]]) 113 | use_banded_solver_NLayers=10 114 | autograd_compatible=False 115 | 116 | #################################################################################################### 117 | 118 | # Call pydisort function 119 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 120 | tau_arr, omega_arr, 121 | NQuad, 122 | Leg_coeffs_all, 123 | mu0, I0, phi0, 124 | ) 125 | 126 | # mu_arr is arranged as it is for code efficiency and readability 127 | # For presentation purposes we re-arrange mu_arr from smallest to largest 128 | reorder_mu = np.argsort(mu_arr) 129 | mu_arr_RO = mu_arr[reorder_mu] 130 | 131 | # We may not want to compare intensities around the direct beam 132 | deg_around_beam_to_not_compare = 0 133 | mu_to_compare = ( 134 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 135 | > deg_around_beam_to_not_compare 136 | ) 137 | 138 | 139 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 140 | results = np.load("Stamnes_results/2b_test.npz") 141 | 142 | # Perform the comparisons 143 | ( 144 | diff_flux_up, 145 | ratio_flux_up, 146 | diff_flux_down_diffuse, 147 | ratio_flux_down_diffuse, 148 | diff_flux_down_direct, 149 | ratio_flux_down_direct, 150 | diff, 151 | diff_ratio, 152 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 153 | 154 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 155 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 156 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 157 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 158 | # -------------------------------------------------------------------------------------------------- 159 | 160 | 161 | def test_2c(): 162 | print() 163 | print("################################################ Test 2c ##################################################") 164 | print() 165 | ######################################### PYDISORT ARGUMENTS ####################################### 166 | 167 | tau_arr = 5 168 | omega_arr = 0.5 169 | NQuad = 16 170 | Leg_coeffs_all = np.zeros(17) 171 | Leg_coeffs_all[0] = 1 172 | Leg_coeffs_all[2] = 0.1 173 | mu0 = 0.080442 174 | I0 = pi 175 | phi0 = pi 176 | 177 | # Optional (used) 178 | 179 | # Optional (unused) 180 | NLeg=None 181 | NFourier=None 182 | b_pos=0 183 | b_neg=0 184 | only_flux=False 185 | f_arr=0 186 | NT_cor=False 187 | BDRF_Fourier_modes=[] 188 | s_poly_coeffs=np.array([[]]) 189 | use_banded_solver_NLayers=10 190 | autograd_compatible=False 191 | 192 | #################################################################################################### 193 | 194 | # Call pydisort function 195 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 196 | tau_arr, omega_arr, 197 | NQuad, 198 | Leg_coeffs_all, 199 | mu0, I0, phi0, 200 | ) 201 | 202 | # mu_arr is arranged as it is for code efficiency and readability 203 | # For presentation purposes we re-arrange mu_arr from smallest to largest 204 | reorder_mu = np.argsort(mu_arr) 205 | mu_arr_RO = mu_arr[reorder_mu] 206 | 207 | # We may not want to compare intensities around the direct beam 208 | deg_around_beam_to_not_compare = 0 209 | mu_to_compare = ( 210 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 211 | > deg_around_beam_to_not_compare 212 | ) 213 | 214 | 215 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 216 | results = np.load("Stamnes_results/2c_test.npz") 217 | 218 | # Perform the comparisons 219 | ( 220 | diff_flux_up, 221 | ratio_flux_up, 222 | diff_flux_down_diffuse, 223 | ratio_flux_down_diffuse, 224 | diff_flux_down_direct, 225 | ratio_flux_down_direct, 226 | diff, 227 | diff_ratio, 228 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 229 | 230 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 231 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 232 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 233 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 234 | # -------------------------------------------------------------------------------------------------- 235 | 236 | 237 | def test_2d(): 238 | print() 239 | print("################################################ Test 2d ##################################################") 240 | print() 241 | ######################################### PYDISORT ARGUMENTS ####################################### 242 | 243 | tau_arr = 5 244 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 245 | NQuad = 16 246 | Leg_coeffs_all = np.zeros(17) 247 | Leg_coeffs_all[0] = 1 248 | Leg_coeffs_all[2] = 0.1 249 | mu0 = 0.080442 250 | I0 = pi 251 | phi0 = pi 252 | 253 | # Optional (used) 254 | 255 | # Optional (unused) 256 | NLeg=None 257 | NFourier=None 258 | b_pos=0 259 | b_neg=0 260 | only_flux=False 261 | f_arr=0 262 | NT_cor=False 263 | BDRF_Fourier_modes=[] 264 | s_poly_coeffs=np.array([[]]) 265 | use_banded_solver_NLayers=10 266 | autograd_compatible=False 267 | 268 | #################################################################################################### 269 | 270 | # Call pydisort function 271 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 272 | tau_arr, omega_arr, 273 | NQuad, 274 | Leg_coeffs_all, 275 | mu0, I0, phi0, 276 | ) 277 | 278 | # mu_arr is arranged as it is for code efficiency and readability 279 | # For presentation purposes we re-arrange mu_arr from smallest to largest 280 | reorder_mu = np.argsort(mu_arr) 281 | mu_arr_RO = mu_arr[reorder_mu] 282 | 283 | # We may not want to compare intensities around the direct beam 284 | deg_around_beam_to_not_compare = 0 285 | mu_to_compare = ( 286 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 287 | > deg_around_beam_to_not_compare 288 | ) 289 | 290 | 291 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 292 | results = np.load("Stamnes_results/2d_test.npz") 293 | 294 | # Perform the comparisons 295 | ( 296 | diff_flux_up, 297 | ratio_flux_up, 298 | diff_flux_down_diffuse, 299 | ratio_flux_down_diffuse, 300 | diff_flux_down_direct, 301 | ratio_flux_down_direct, 302 | diff, 303 | diff_ratio, 304 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 305 | 306 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 307 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 308 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 309 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 310 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/3_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 3: Henyey-Greenstein Scattering 8 | # ====================================================================================================== 9 | 10 | def test_3a(): 11 | print() 12 | print("################################################ Test 3a ##################################################") 13 | print() 14 | ######################################### PYDISORT ARGUMENTS ####################################### 15 | 16 | tau_arr = 1 17 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 18 | NQuad = 16 19 | Leg_coeffs_all = 0.75 ** np.arange(32) 20 | mu0 = 1 21 | I0 = pi / mu0 22 | phi0 = pi 23 | 24 | # Optional (used) 25 | f_arr = Leg_coeffs_all[NQuad] 26 | NT_cor = True 27 | 28 | # Optional (unused) 29 | NLeg=None 30 | NFourier=None 31 | b_pos=0 32 | b_neg=0 33 | only_flux=False 34 | BDRF_Fourier_modes=[] 35 | s_poly_coeffs=np.array([[]]) 36 | use_banded_solver_NLayers=10 37 | autograd_compatible=False 38 | 39 | #################################################################################################### 40 | 41 | # Call pydisort function 42 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 43 | tau_arr, omega_arr, 44 | NQuad, 45 | Leg_coeffs_all, 46 | mu0, I0, phi0, 47 | f_arr=f_arr, 48 | NT_cor=NT_cor, 49 | ) 50 | 51 | # mu_arr is arranged as it is for code efficiency and readability 52 | # For presentation purposes we re-arrange mu_arr from smallest to largest 53 | reorder_mu = np.argsort(mu_arr) 54 | mu_arr_RO = mu_arr[reorder_mu] 55 | 56 | # We may not want to compare intensities around the direct beam 57 | deg_around_beam_to_not_compare = 0 58 | mu_to_compare = ( 59 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 60 | > deg_around_beam_to_not_compare 61 | ) 62 | 63 | 64 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 65 | results = np.load("Stamnes_results/3a_test.npz") 66 | 67 | # Perform the comparisons 68 | ( 69 | diff_flux_up, 70 | ratio_flux_up, 71 | diff_flux_down_diffuse, 72 | ratio_flux_down_diffuse, 73 | diff_flux_down_direct, 74 | ratio_flux_down_direct, 75 | diff, 76 | diff_ratio, 77 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 78 | 79 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 80 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 81 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 82 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 83 | # -------------------------------------------------------------------------------------------------- 84 | 85 | 86 | def test_3b(): 87 | print() 88 | print("################################################ Test 3b ##################################################") 89 | print() 90 | ######################################### PYDISORT ARGUMENTS ####################################### 91 | 92 | tau_arr = 8 93 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 94 | NQuad = 16 95 | Leg_coeffs_all = 0.75 ** np.arange(32) 96 | mu0 = 1 97 | I0 = pi / mu0 98 | phi0 = pi 99 | 100 | # Optional (used) 101 | f_arr = Leg_coeffs_all[NQuad] 102 | NT_cor = True 103 | 104 | # Optional (unused) 105 | NLeg=None 106 | NFourier=None 107 | b_pos=0 108 | b_neg=0 109 | only_flux=False 110 | BDRF_Fourier_modes=[] 111 | s_poly_coeffs=np.array([[]]) 112 | use_banded_solver_NLayers=10 113 | autograd_compatible=False 114 | 115 | #################################################################################################### 116 | 117 | # Call pydisort function 118 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 119 | tau_arr, omega_arr, 120 | NQuad, 121 | Leg_coeffs_all, 122 | mu0, I0, phi0, 123 | f_arr=f_arr, 124 | NT_cor=NT_cor, 125 | ) 126 | 127 | # mu_arr is arranged as it is for code efficiency and readability 128 | # For presentation purposes we re-arrange mu_arr from smallest to largest 129 | reorder_mu = np.argsort(mu_arr) 130 | mu_arr_RO = mu_arr[reorder_mu] 131 | 132 | # We may not want to compare intensities around the direct beam 133 | deg_around_beam_to_not_compare = 0 134 | mu_to_compare = ( 135 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 136 | > deg_around_beam_to_not_compare 137 | ) 138 | 139 | 140 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 141 | results = np.load("Stamnes_results/3b_test.npz") 142 | 143 | # Perform the comparisons 144 | ( 145 | diff_flux_up, 146 | ratio_flux_up, 147 | diff_flux_down_diffuse, 148 | ratio_flux_down_diffuse, 149 | diff_flux_down_direct, 150 | ratio_flux_down_direct, 151 | diff, 152 | diff_ratio, 153 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 154 | 155 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 156 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 157 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 158 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 159 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/4_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 4: Haze-L Scattering, Beam Source 8 | # ====================================================================================================== 9 | 10 | Leg_coeffs_ALL = np.array([1, 11 | 2.41260, 3.23047, 3.37296, 3.23150, 2.89350, 12 | 2.49594, 2.11361, 1.74812, 1.44692, 1.17714, 13 | 0.96643, 0.78237, 0.64114, 0.51966, 0.42563, 14 | 0.34688, 0.28351, 0.23317, 0.18963, 0.15788, 15 | 0.12739, 0.10762, 0.08597, 0.07381, 0.05828, 16 | 0.05089, 0.03971, 0.03524, 0.02720, 0.02451, 17 | 0.01874, 0.01711, 0.01298, 0.01198, 0.00904, 18 | 0.00841, 0.00634, 0.00592, 0.00446, 0.00418, 19 | 0.00316, 0.00296, 0.00225, 0.00210, 0.00160, 20 | 0.00150, 0.00115, 0.00107, 0.00082, 0.00077, 21 | 0.00059, 0.00055, 0.00043, 0.00040, 0.00031, 22 | 0.00029, 0.00023, 0.00021, 0.00017, 0.00015, 23 | 0.00012, 0.00011, 0.00009, 0.00008, 0.00006, 24 | 0.00006, 0.00005, 0.00004, 0.00004, 0.00003, 25 | 0.00003, 0.00002, 0.00002, 0.00002, 0.00001, 26 | 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 27 | 0.00001, 0.00001]) 28 | 29 | def test_4a(): 30 | print() 31 | print("################################################ Test 4a ##################################################") 32 | print() 33 | ######################################### PYDISORT ARGUMENTS ####################################### 34 | 35 | tau_arr = 1 36 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 37 | NQuad = 32 38 | Leg_coeffs_all = Leg_coeffs_ALL / (2 * np.arange(83) + 1) 39 | mu0 = 1 40 | I0 = pi 41 | phi0 = pi 42 | 43 | # Optional (used) 44 | f_arr = Leg_coeffs_all[NQuad] 45 | NT_cor = True 46 | 47 | # Optional (unused) 48 | NLeg=None 49 | NFourier=None 50 | b_pos=0 51 | b_neg=0 52 | only_flux=False 53 | BDRF_Fourier_modes=[] 54 | s_poly_coeffs=np.array([[]]) 55 | use_banded_solver_NLayers=10 56 | autograd_compatible=False 57 | 58 | #################################################################################################### 59 | 60 | # Call pydisort function 61 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 62 | tau_arr, omega_arr, 63 | NQuad, 64 | Leg_coeffs_all[: NQuad + 1], # DISORT strangely does not use all moments 65 | mu0, I0, phi0, 66 | f_arr=f_arr, 67 | NT_cor=NT_cor 68 | ) 69 | 70 | # mu_arr is arranged as it is for code efficiency and readability 71 | # For presentation purposes we re-arrange mu_arr from smallest to largest 72 | reorder_mu = np.argsort(mu_arr) 73 | mu_arr_RO = mu_arr[reorder_mu] 74 | 75 | # We may not want to compare intensities around the direct beam 76 | deg_around_beam_to_not_compare = 0 77 | mu_to_compare = ( 78 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 79 | > deg_around_beam_to_not_compare 80 | ) 81 | 82 | 83 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 84 | results = np.load("Stamnes_results/4a_test.npz") 85 | 86 | # Perform the comparisons 87 | ( 88 | diff_flux_up, 89 | ratio_flux_up, 90 | diff_flux_down_diffuse, 91 | ratio_flux_down_diffuse, 92 | diff_flux_down_direct, 93 | ratio_flux_down_direct, 94 | diff, 95 | diff_ratio, 96 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 97 | 98 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 99 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 100 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 101 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 102 | # -------------------------------------------------------------------------------------------------- 103 | 104 | 105 | def test_4b(): 106 | print() 107 | print("################################################ Test 4b ##################################################") 108 | print() 109 | ######################################### PYDISORT ARGUMENTS ####################################### 110 | 111 | tau_arr = 1 112 | omega_arr = 0.9 113 | NQuad = 32 114 | Leg_coeffs_all = Leg_coeffs_ALL / (2 * np.arange(83) + 1) 115 | mu0 = 1 116 | I0 = pi 117 | phi0 = pi 118 | 119 | # Optional (used) 120 | f_arr = Leg_coeffs_all[NQuad] 121 | NT_cor = True 122 | 123 | # Optional (unused) 124 | NLeg=None 125 | NFourier=None 126 | b_pos=0 127 | b_neg=0 128 | only_flux=False 129 | BDRF_Fourier_modes=[] 130 | s_poly_coeffs=np.array([[]]) 131 | use_banded_solver_NLayers=10 132 | autograd_compatible=False 133 | 134 | #################################################################################################### 135 | 136 | # Call pydisort function 137 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 138 | tau_arr, omega_arr, 139 | NQuad, 140 | Leg_coeffs_all[: NQuad + 1], # DISORT strangely does not use all moments 141 | mu0, I0, phi0, 142 | f_arr=f_arr, 143 | NT_cor=NT_cor 144 | ) 145 | 146 | # mu_arr is arranged as it is for code efficiency and readability 147 | # For presentation purposes we re-arrange mu_arr from smallest to largest 148 | reorder_mu = np.argsort(mu_arr) 149 | mu_arr_RO = mu_arr[reorder_mu] 150 | 151 | # We may not want to compare intensities around the direct beam 152 | deg_around_beam_to_not_compare = 0 153 | mu_to_compare = ( 154 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 155 | > deg_around_beam_to_not_compare 156 | ) 157 | 158 | 159 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 160 | results = np.load("Stamnes_results/4b_test.npz") 161 | 162 | # Perform the comparisons 163 | ( 164 | diff_flux_up, 165 | ratio_flux_up, 166 | diff_flux_down_diffuse, 167 | ratio_flux_down_diffuse, 168 | diff_flux_down_direct, 169 | ratio_flux_down_direct, 170 | diff, 171 | diff_ratio, 172 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 173 | 174 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 175 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 176 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 177 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 178 | # -------------------------------------------------------------------------------------------------- 179 | 180 | 181 | def test_4c(): 182 | print() 183 | print("################################################ Test 4c ##################################################") 184 | print() 185 | ######################################### PYDISORT ARGUMENTS ####################################### 186 | 187 | tau_arr = 1 188 | omega_arr = 0.9 189 | NQuad = 32 190 | Leg_coeffs_all = Leg_coeffs_ALL / (2 * np.arange(83) + 1) 191 | mu0 = 0.5 192 | I0 = pi 193 | phi0 = pi 194 | 195 | # Optional (used) 196 | f_arr = Leg_coeffs_all[NQuad] 197 | NT_cor = True 198 | 199 | # Optional (unused) 200 | NLeg=None 201 | NFourier=None 202 | b_pos=0 203 | b_neg=0 204 | only_flux=False 205 | BDRF_Fourier_modes=[] 206 | s_poly_coeffs=np.array([[]]) 207 | use_banded_solver_NLayers=10 208 | autograd_compatible=False 209 | 210 | #################################################################################################### 211 | 212 | # Call pydisort function 213 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 214 | tau_arr, omega_arr, 215 | NQuad, 216 | Leg_coeffs_all[: NQuad + 1], # DISORT strangely does not use all moments 217 | mu0, I0, phi0, 218 | f_arr=f_arr, 219 | NT_cor=NT_cor 220 | ) 221 | 222 | # mu_arr is arranged as it is for code efficiency and readability 223 | # For presentation purposes we re-arrange mu_arr from smallest to largest 224 | reorder_mu = np.argsort(mu_arr) 225 | mu_arr_RO = mu_arr[reorder_mu] 226 | 227 | # We may not want to compare intensities around the direct beam 228 | deg_around_beam_to_not_compare = 0 229 | mu_to_compare = ( 230 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 231 | > deg_around_beam_to_not_compare 232 | ) 233 | 234 | 235 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 236 | results = np.load("Stamnes_results/4c_test.npz") 237 | 238 | # Perform the comparisons 239 | ( 240 | diff_flux_up, 241 | ratio_flux_up, 242 | diff_flux_down_diffuse, 243 | ratio_flux_down_diffuse, 244 | diff_flux_down_direct, 245 | ratio_flux_down_direct, 246 | diff, 247 | diff_ratio, 248 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 249 | 250 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 251 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 252 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 253 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 254 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/5_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 5: Cloud C.1 Scattering, Beam Source 8 | # ====================================================================================================== 9 | 10 | Leg_coeffs_ALL = np.array([1, 11 | 2.544, 3.883, 4.568, 5.235, 5.887, 6.457, 7.177, 7.859, 12 | 8.494, 9.286, 9.856, 10.615, 11.229, 11.851, 12.503, 13.058, 13 | 13.626, 14.209, 14.660, 15.231, 15.641, 16.126, 16.539, 16.934, 14 | 17.325, 17.673, 17.999, 18.329, 18.588, 18.885, 19.103, 19.345, 15 | 19.537, 19.721, 19.884, 20.024, 20.145, 20.251, 20.330, 20.401, 16 | 20.444, 20.477, 20.489, 20.483, 20.467, 20.427, 20.382, 20.310, 17 | 20.236, 20.136, 20.036, 19.909, 19.785, 19.632, 19.486, 19.311, 18 | 19.145, 18.949, 18.764, 18.551, 18.348, 18.119, 17.901, 17.659, 19 | 17.428, 17.174, 16.931, 16.668, 16.415, 16.144, 15.883, 15.606, 20 | 15.338, 15.058, 14.784, 14.501, 14.225, 13.941, 13.662, 13.378, 21 | 13.098, 12.816, 12.536, 12.257, 11.978, 11.703, 11.427, 11.156, 22 | 10.884, 10.618, 10.350, 10.090, 9.827, 9.574, 9.318, 9.072, 23 | 8.822, 8.584, 8.340, 8.110, 7.874, 7.652, 7.424, 7.211, 6.990, 24 | 6.785, 6.573, 6.377, 6.173, 5.986, 5.790, 5.612, 5.424, 5.255, 25 | 5.075, 4.915, 4.744, 4.592, 4.429, 4.285, 4.130, 3.994, 3.847, 26 | 3.719, 3.580, 3.459, 3.327, 3.214, 3.090, 2.983, 2.866, 2.766, 27 | 2.656, 2.562, 2.459, 2.372, 2.274, 2.193, 2.102, 2.025, 1.940, 28 | 1.869, 1.790, 1.723, 1.649, 1.588, 1.518, 1.461, 1.397, 1.344, 29 | 1.284, 1.235, 1.179, 1.134, 1.082, 1.040, 0.992, 0.954, 0.909, 30 | 0.873, 0.832, 0.799, 0.762, 0.731, 0.696, 0.668, 0.636, 0.610, 31 | 0.581, 0.557, 0.530, 0.508, 0.483, 0.463, 0.440, 0.422, 0.401, 32 | 0.384, 0.364, 0.349, 0.331, 0.317, 0.301, 0.288, 0.273, 0.262, 33 | 0.248, 0.238, 0.225, 0.215, 0.204, 0.195, 0.185, 0.177, 0.167, 34 | 0.160, 0.151, 0.145, 0.137, 0.131, 0.124, 0.118, 0.112, 0.107, 35 | 0.101, 0.097, 0.091, 0.087, 0.082, 0.079, 0.074, 0.071, 0.067, 36 | 0.064, 0.060, 0.057, 0.054, 0.052, 0.049, 0.047, 0.044, 0.042, 37 | 0.039, 0.038, 0.035, 0.034, 0.032, 0.030, 0.029, 0.027, 0.026, 38 | 0.024, 0.023, 0.022, 0.021, 0.020, 0.018, 0.018, 0.017, 0.016, 39 | 0.015, 0.014, 0.013, 0.013, 0.012, 0.011, 0.011, 0.010, 0.009, 40 | 0.009, 0.008, 0.008, 0.008, 0.007, 0.007, 0.006, 0.006, 0.006, 41 | 0.005, 0.005, 0.005, 0.005, 0.004, 0.004, 0.004, 0.004, 0.003, 42 | 0.003, 0.003, 0.003, 0.003, 0.003, 0.002, 0.002, 0.002, 0.002, 43 | 0.002, 0.002, 0.002, 0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 44 | 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 45 | 0.001, 0.001, 0.001, 0.001, 0.001]) 46 | 47 | def test_5a(): 48 | print() 49 | print("################################################ Test 5a ##################################################") 50 | print() 51 | ######################################### PYDISORT ARGUMENTS ####################################### 52 | 53 | tau_arr = 64 54 | omega_arr = 1 - 1e-6 # Reduced from 1 because we have not implemented that special case 55 | NQuad = 48 56 | Leg_coeffs_all = Leg_coeffs_ALL / (2 * np.arange(300) + 1) 57 | mu0 = 1 58 | I0 = pi 59 | phi0 = pi 60 | 61 | # Optional (used) 62 | f_arr = Leg_coeffs_all[NQuad] 63 | NT_cor = True 64 | 65 | # Optional (unused) 66 | NLeg=None 67 | NFourier=None 68 | b_pos=0 69 | b_neg=0 70 | only_flux=False 71 | BDRF_Fourier_modes=[] 72 | s_poly_coeffs=np.array([[]]) 73 | use_banded_solver_NLayers=10 74 | autograd_compatible=False 75 | 76 | #################################################################################################### 77 | 78 | # Call pydisort function 79 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 80 | tau_arr, omega_arr, 81 | NQuad, 82 | Leg_coeffs_all, 83 | mu0, I0, phi0, 84 | f_arr=f_arr, 85 | NT_cor=NT_cor, 86 | ) 87 | 88 | # mu_arr is arranged as it is for code efficiency and readability 89 | # For presentation purposes we re-arrange mu_arr from smallest to largest 90 | reorder_mu = np.argsort(mu_arr) 91 | mu_arr_RO = mu_arr[reorder_mu] 92 | 93 | # By default we do not compare intensities 10 degrees around the direct beam 94 | deg_around_beam_to_not_compare = 10 # This parameter changes the size of the region 95 | mu_to_compare = ( 96 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 97 | > deg_around_beam_to_not_compare 98 | ) 99 | 100 | 101 | 102 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 103 | results = np.load("Stamnes_results/5a_test.npz") 104 | 105 | # Perform the comparisons 106 | ( 107 | diff_flux_up, 108 | ratio_flux_up, 109 | diff_flux_down_diffuse, 110 | ratio_flux_down_diffuse, 111 | diff_flux_down_direct, 112 | ratio_flux_down_direct, 113 | diff, 114 | diff_ratio, 115 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 116 | 117 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 118 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 119 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 120 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 121 | # -------------------------------------------------------------------------------------------------- 122 | 123 | 124 | def test_5b(): 125 | print() 126 | print("################################################ Test 5b ##################################################") 127 | print() 128 | ######################################### PYDISORT ARGUMENTS ####################################### 129 | 130 | tau_arr = 64 131 | omega_arr = 0.9 132 | NQuad = 48 133 | Leg_coeffs_all = Leg_coeffs_ALL / (2 * np.arange(300) + 1) 134 | mu0 = 1 135 | I0 = pi 136 | phi0 = pi 137 | 138 | # Optional (used) 139 | f_arr = Leg_coeffs_all[NQuad] 140 | NT_cor = True 141 | 142 | # Optional (unused) 143 | NLeg=None 144 | NFourier=None 145 | b_pos=0 146 | b_neg=0 147 | only_flux=False 148 | BDRF_Fourier_modes=[] 149 | s_poly_coeffs=np.array([[]]) 150 | use_banded_solver_NLayers=10 151 | autograd_compatible=False 152 | 153 | #################################################################################################### 154 | 155 | # Call pydisort function 156 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 157 | tau_arr, omega_arr, 158 | NQuad, 159 | Leg_coeffs_all, 160 | mu0, I0, phi0, 161 | f_arr=f_arr, 162 | NT_cor=NT_cor, 163 | ) 164 | 165 | # mu_arr is arranged as it is for code efficiency and readability 166 | # For presentation purposes we re-arrange mu_arr from smallest to largest 167 | reorder_mu = np.argsort(mu_arr) 168 | mu_arr_RO = mu_arr[reorder_mu] 169 | 170 | # By default we do not compare intensities 10 degrees around the direct beam 171 | deg_around_beam_to_not_compare = 10 # This parameter changes the size of the region 172 | mu_to_compare = ( 173 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 174 | > deg_around_beam_to_not_compare 175 | ) 176 | 177 | 178 | 179 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 180 | results = np.load("Stamnes_results/5b_test.npz") 181 | 182 | # Perform the comparisons 183 | ( 184 | diff_flux_up, 185 | ratio_flux_up, 186 | diff_flux_down_diffuse, 187 | ratio_flux_down_diffuse, 188 | diff_flux_down_direct, 189 | ratio_flux_down_direct, 190 | diff, 191 | diff_ratio, 192 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 193 | 194 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 195 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 196 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 197 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 198 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/7_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sc 3 | import PythonicDISORT 4 | from PythonicDISORT.subroutines import _compare 5 | from math import pi 6 | 7 | from PythonicDISORT.subroutines import generate_s_poly_coeffs 8 | from PythonicDISORT.subroutines import blackbody_contrib_to_BCs 9 | from PythonicDISORT.subroutines import generate_emissivity_from_BDRF 10 | 11 | # =========================================================================================================================== 12 | # Test Problem 7: Absorption + Scattering + All Possible Sources, Lambertian and Hapke Surface Reflectivities (One Layer) 13 | # =========================================================================================================================== 14 | 15 | def test_7a(): 16 | print() 17 | print("################################################ Test 7a ##################################################") 18 | print() 19 | ######################################### PYDISORT ARGUMENTS ####################################### 20 | 21 | tau_arr = 1 # One layer of thickness 1 (medium-thick atmosphere) 22 | omega_arr = 0.1 # Very low scattering 23 | NQuad = 16 # 16 streams (8 quadrature nodes for each hemisphere) 24 | Leg_coeffs_all = 0.05 ** np.arange(NQuad + 1) # Henyey-Greenstein phase function with g = 0.05 25 | mu0 = 0 # No direct beam 26 | I0 = 0 # No direct beam 27 | phi0 = 0 # No direct beam 28 | 29 | # Optional (used) 30 | TEMPER = np.array([200, 300]) 31 | WVNMLO = 300 32 | WVNMHI = 800 33 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 34 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI) * (1 - omega_arr) 35 | 36 | # Optional (unused) 37 | NLeg = None 38 | NFourier = None 39 | b_pos = 0 40 | b_neg = 0 41 | only_flux = False 42 | f_arr = 0 43 | NT_cor = False 44 | BDRF_Fourier_modes = [] 45 | use_banded_solver_NLayers = 10 46 | autograd_compatible=False 47 | 48 | #################################################################################################### 49 | 50 | # Call pydisort function 51 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 52 | tau_arr, omega_arr, 53 | NQuad, 54 | Leg_coeffs_all, 55 | mu0, I0, phi0, 56 | s_poly_coeffs=s_poly_coeffs, 57 | ) 58 | 59 | # Reorder mu_arr from smallest to largest 60 | reorder_mu = np.argsort(mu_arr) 61 | mu_arr_RO = mu_arr[reorder_mu] 62 | 63 | # We may not want to compare intensities around the direct beam 64 | deg_around_beam_to_not_compare = 0 65 | mu_to_compare = ( 66 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 67 | > deg_around_beam_to_not_compare 68 | ) 69 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 70 | 71 | 72 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 73 | results = np.load("Stamnes_results/7a_test.npz") 74 | 75 | # Perform the comparisons 76 | ( 77 | diff_flux_up, 78 | ratio_flux_up, 79 | diff_flux_down_diffuse, 80 | ratio_flux_down_diffuse, 81 | diff_flux_down_direct, 82 | ratio_flux_down_direct, 83 | diff, 84 | diff_ratio, 85 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 86 | 87 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 88 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 89 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 90 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 91 | # -------------------------------------------------------------------------------------------------- 92 | 93 | 94 | def test_7b(): 95 | print() 96 | print("################################################ Test 7b ##################################################") 97 | print() 98 | ######################################### PYDISORT ARGUMENTS ####################################### 99 | 100 | tau_arr = 100 # One layer of thickness 100 (Very thick atmosphere) 101 | omega_arr = 0.95 # High scattering 102 | NQuad = 16 # 16 streams (8 quadrature nodes for each hemisphere) 103 | Leg_coeffs_all = 0.75 ** np.arange(NQuad + 1) # Henyey-Greenstein phase function with g = 0.75 104 | mu0 = 0 # No direct beam 105 | I0 = 0 # No direct beam 106 | phi0 = 0 # No direct beam 107 | 108 | # Optional (used) 109 | TEMPER = np.array([200, 300]) 110 | WVNMLO = 2702.99 111 | WVNMHI = 2703.01 112 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 113 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI) * (1 - omega_arr) 114 | 115 | # Optional (unused) 116 | NLeg = None 117 | NFourier = None 118 | b_pos = 0 119 | b_neg = 0 120 | only_flux = False 121 | f_arr = 0 122 | NT_cor = False 123 | BDRF_Fourier_modes = [] 124 | use_banded_solver_NLayers = 10 125 | autograd_compatible=False 126 | 127 | #################################################################################################### 128 | 129 | # Call pydisort function 130 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 131 | tau_arr, omega_arr, 132 | NQuad, 133 | Leg_coeffs_all, 134 | mu0, I0, phi0, 135 | s_poly_coeffs=s_poly_coeffs, 136 | ) 137 | 138 | # Reorder mu_arr from smallest to largest 139 | reorder_mu = np.argsort(mu_arr) 140 | mu_arr_RO = mu_arr[reorder_mu] 141 | 142 | # We may not want to compare intensities around the direct beam 143 | deg_around_beam_to_not_compare = 0 144 | mu_to_compare = ( 145 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 146 | > deg_around_beam_to_not_compare 147 | ) 148 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 149 | 150 | 151 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 152 | results = np.load("Stamnes_results/7b_test.npz") 153 | 154 | # Perform the comparisons 155 | ( 156 | diff_flux_up, 157 | ratio_flux_up, 158 | diff_flux_down_diffuse, 159 | ratio_flux_down_diffuse, 160 | diff_flux_down_direct, 161 | ratio_flux_down_direct, 162 | diff, 163 | diff_ratio, 164 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 165 | 166 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 167 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 168 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 169 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 170 | # -------------------------------------------------------------------------------------------------- 171 | 172 | 173 | def test_7c(): 174 | print() 175 | print("################################################ Test 7c ##################################################") 176 | print() 177 | ######################################### PYDISORT ARGUMENTS ####################################### 178 | 179 | tau_arr = 1 # One layer of thickness 1 (Medium-thick atmosphere) 180 | omega_arr = 0.5 # Low scattering 181 | NQuad = 12 # 12 streams (6 quadrature nodes for each hemisphere) 182 | Leg_coeffs_all = 0.8 ** np.arange(NQuad * 2) # Henyey-Greenstein phase function with g = 0.8 183 | mu0 = 0.5 # Cosine of solar zenith angle (directly downwards) 184 | I0 = 200 # Intensity of direct beam 185 | phi0 = 0 # Azimuthal angle of direct beam 186 | 187 | # Optional (used) 188 | TEMPER = np.array([300, 200]) 189 | WVNMLO = 0 190 | WVNMHI = 80000 191 | BTEMP = 320 192 | TTEMP = 100 193 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 194 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI, epsrel=1e-15) * (1 - omega_arr) 195 | b_pos = blackbody_contrib_to_BCs(BTEMP, WVNMLO, WVNMHI, epsrel=1e-15) # Emissivity 1 196 | b_neg = blackbody_contrib_to_BCs(TTEMP, WVNMLO, WVNMHI, epsrel=1e-15) + 100 # Emissivity 1 197 | 198 | f_arr = Leg_coeffs_all[NQuad] 199 | NT_cor = True 200 | 201 | # Optional (unused) 202 | NLeg = None 203 | NFourier = None 204 | only_flux = False 205 | BDRF_Fourier_modes = [] 206 | use_banded_solver_NLayers = 10 207 | autograd_compatible=False 208 | 209 | #################################################################################################### 210 | 211 | # Call pydisort function 212 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 213 | tau_arr, omega_arr, 214 | NQuad, 215 | Leg_coeffs_all, 216 | mu0, I0, phi0, 217 | b_pos=b_pos, 218 | b_neg=b_neg, 219 | s_poly_coeffs=s_poly_coeffs, 220 | f_arr=f_arr, 221 | NT_cor=NT_cor, 222 | ) 223 | 224 | # Reorder mu_arr from smallest to largest 225 | reorder_mu = np.argsort(mu_arr) 226 | mu_arr_RO = mu_arr[reorder_mu] 227 | 228 | # We may not want to compare intensities around the direct beam 229 | deg_around_beam_to_not_compare = 0 230 | mu_to_compare = ( 231 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 232 | > deg_around_beam_to_not_compare 233 | ) 234 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 235 | 236 | 237 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 238 | results = np.load("Stamnes_results/7c_test.npz") 239 | 240 | # Perform the comparisons 241 | ( 242 | diff_flux_up, 243 | ratio_flux_up, 244 | diff_flux_down_diffuse, 245 | ratio_flux_down_diffuse, 246 | diff_flux_down_direct, 247 | ratio_flux_down_direct, 248 | diff, 249 | diff_ratio, 250 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 251 | 252 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 253 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 254 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 255 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 256 | # -------------------------------------------------------------------------------------------------- 257 | 258 | 259 | def test_7d(): 260 | print() 261 | print("################################################ Test 7d ##################################################") 262 | print() 263 | ######################################### PYDISORT ARGUMENTS ####################################### 264 | 265 | tau_arr = 1 # One layer of thickness 1 (Medium-thick atmosphere) 266 | omega_arr = 0.5 # Low scattering 267 | NQuad = 12 # 12 streams (6 quadrature nodes for each hemisphere) 268 | Leg_coeffs_all = 0.8 ** np.arange(NQuad * 2) # Henyey-Greenstein phase function with g = 0.8 269 | mu0 = 0.5 # Cosine of solar zenith angle (directly downwards) 270 | I0 = 200 # Intensity of direct beam 271 | phi0 = 0 # Azimuthal angle of direct beam 272 | 273 | # Optional (used) 274 | TEMPER = np.array([300, 200]) 275 | WVNMLO = 0 276 | WVNMHI = 80000 277 | BTEMP = 320 278 | TTEMP = 100 279 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 280 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI, epsrel=1e-15) * (1 - omega_arr) 281 | b_neg = blackbody_contrib_to_BCs(TTEMP, WVNMLO, WVNMHI, epsrel=1e-15) + 100 # Emissivity 1 282 | omega_s = 1 283 | BDRF_Fourier_modes=[lambda mu, neg_mup: np.full((len(mu), len(neg_mup)), omega_s)] 284 | 285 | f_arr = Leg_coeffs_all[NQuad] 286 | NT_cor = True 287 | 288 | # Optional (unused) 289 | NLeg = None 290 | NFourier = None 291 | b_pos = 0 292 | only_flux = False 293 | use_banded_solver_NLayers = 10 294 | autograd_compatible=False 295 | 296 | #################################################################################################### 297 | 298 | # Call pydisort function 299 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 300 | tau_arr, omega_arr, 301 | NQuad, 302 | Leg_coeffs_all, 303 | mu0, I0, phi0, 304 | b_neg=b_neg, 305 | s_poly_coeffs=s_poly_coeffs, 306 | BDRF_Fourier_modes=BDRF_Fourier_modes, 307 | f_arr=f_arr, 308 | NT_cor=NT_cor, 309 | ) 310 | 311 | # Reorder mu_arr from smallest to largest 312 | reorder_mu = np.argsort(mu_arr) 313 | mu_arr_RO = mu_arr[reorder_mu] 314 | 315 | # We may not want to compare intensities around the direct beam 316 | deg_around_beam_to_not_compare = 0 317 | mu_to_compare = ( 318 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 319 | > deg_around_beam_to_not_compare 320 | ) 321 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 322 | 323 | 324 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 325 | results = np.load("Stamnes_results/7d_test.npz") 326 | 327 | # Perform the comparisons 328 | ( 329 | diff_flux_up, 330 | ratio_flux_up, 331 | diff_flux_down_diffuse, 332 | ratio_flux_down_diffuse, 333 | diff_flux_down_direct, 334 | ratio_flux_down_direct, 335 | diff, 336 | diff_ratio, 337 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 338 | 339 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 340 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 341 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 342 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 343 | # -------------------------------------------------------------------------------------------------- 344 | 345 | 346 | def test_7e(): 347 | print() 348 | print("################################################ Test 7e ##################################################") 349 | print() 350 | def Hapke(mu, neg_mup, dphi, B0, HH, W): 351 | cos_alpha = (mu[:, None] * neg_mup[None, :] - np.sqrt(1 - mu**2)[:, None] * np.sqrt( 352 | (1 - neg_mup**2)[None, :] 353 | ) * np.cos(dphi)).clip(min=-1, max=1) 354 | alpha = np.arccos(cos_alpha) 355 | 356 | P = 1 + cos_alpha / 2 357 | B = B0 * HH / (HH + np.tan(alpha / 2)) 358 | 359 | gamma = np.sqrt(1 - W) 360 | H0 = ((1 + 2 * neg_mup) / (1 + 2 * neg_mup * gamma))[None, :] 361 | H = ((1 + 2 * mu) / (1 + 2 * mu * gamma))[:, None] 362 | 363 | return W / 4 / (mu[:, None] + neg_mup[None, :]) * ((1 + B) * P + H0 * H - 1) 364 | 365 | ######################################### PYDISORT ARGUMENTS ####################################### 366 | 367 | tau_arr = 1 # One layer of thickness 1 (Medium-thick atmosphere) 368 | omega_arr = 0.5 # Low scattering 369 | NQuad = 12 # 12 streams (6 quadrature nodes for each hemisphere) 370 | Leg_coeffs_all = 0.8 ** np.arange(NQuad * 2) # Henyey-Greenstein phase function with g = 0.8 371 | mu0 = 0.5 # Cosine of solar zenith angle (directly downwards) 372 | I0 = 200 # Intensity of direct beam 373 | phi0 = 0 # Azimuthal angle of direct beam 374 | 375 | # Optional (used) 376 | B0, HH, W = 1, 0.06, 0.6 377 | BDRF_Fourier_modes = [ 378 | lambda mu, neg_mup, m=m: (sc.integrate.quad_vec( 379 | lambda dphi: Hapke(mu, neg_mup, dphi, B0, HH, W) * np.cos(m * dphi), 380 | 0, 381 | 2 * pi, 382 | )[0] / ((1 + (m == 0)) * pi)) 383 | for m in range(NQuad) 384 | ] 385 | TEMPER = np.array([300, 200]) 386 | WVNMLO = 0 387 | WVNMHI = 80000 388 | BTEMP = 320 389 | TTEMP = 100 390 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 391 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI, epsrel=1e-15) * (1 - omega_arr) 392 | # The emissivity of the surface should be consistent with the BDRF 393 | # in accordance with Kirchoff's law of thermal radiation 394 | emissivity = generate_emissivity_from_BDRF(NQuad // 2, BDRF_Fourier_modes[0]) 395 | b_pos = emissivity * blackbody_contrib_to_BCs(BTEMP, WVNMLO, WVNMHI) 396 | b_neg = blackbody_contrib_to_BCs(TTEMP, WVNMLO, WVNMHI, epsrel=1e-15) + 100 # Emissivity 1 397 | 398 | f_arr = Leg_coeffs_all[NQuad] 399 | only_flux = True 400 | 401 | # Optional (unused) 402 | NLeg = None 403 | NFourier = None 404 | NT_cor = False 405 | use_banded_solver_NLayers = 10 406 | autograd_compatible=False 407 | 408 | #################################################################################################### 409 | 410 | # Call pydisort function 411 | mu_arr, flux_up, flux_down, u0 = PythonicDISORT.pydisort( 412 | tau_arr, omega_arr, 413 | NQuad, 414 | Leg_coeffs_all, 415 | mu0, I0, phi0, 416 | b_pos=b_pos, 417 | b_neg=b_neg, 418 | s_poly_coeffs=s_poly_coeffs, 419 | BDRF_Fourier_modes=BDRF_Fourier_modes, 420 | f_arr=f_arr, 421 | only_flux=only_flux, 422 | ) 423 | 424 | # Reorder mu_arr from smallest to largest 425 | reorder_mu = np.argsort(mu_arr) 426 | mu_arr_RO = mu_arr[reorder_mu] 427 | 428 | # We may not want to compare intensities around the direct beam 429 | deg_around_beam_to_not_compare = 0 430 | mu_to_compare = ( 431 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 432 | > deg_around_beam_to_not_compare 433 | ) 434 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 435 | 436 | 437 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 438 | results = np.load("Stamnes_results/7e_test.npz") 439 | 440 | # Perform the comparisons 441 | ( 442 | diff_flux_up, 443 | ratio_flux_up, 444 | diff_flux_down_diffuse, 445 | ratio_flux_down_diffuse, 446 | diff_flux_down_direct, 447 | ratio_flux_down_direct, 448 | #diff, 449 | #diff_ratio, 450 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down) 451 | 452 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 453 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 454 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 455 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/8_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ====================================================================================================== 7 | # Test Problem 8: Absorbing/Isotropic-Scattering Medium With Two Computational Layers 8 | # ====================================================================================================== 9 | 10 | def test_8a(): 11 | print() 12 | print("################################################ Test 8a ##################################################") 13 | print() 14 | ######################################### PYDISORT ARGUMENTS ####################################### 15 | 16 | tau_arr = np.array([0.25, 0.5]) 17 | omega_arr = np.array([0.5, 0.3]) 18 | NQuad = 8 19 | Leg_coeffs_all = np.zeros((2, 9)) 20 | Leg_coeffs_all[:, 0] = 1 21 | mu0 = 0 22 | I0 = 0 23 | phi0 = 0 24 | 25 | # Optional (used) 26 | b_neg = 1 / pi 27 | 28 | # Optional (unused) 29 | NLeg = None 30 | NFourier = None 31 | b_pos = 0 32 | only_flux = False 33 | f_arr = 0 34 | NT_cor = False 35 | BDRF_Fourier_modes = [] 36 | s_poly_coeffs = np.array([[]]) 37 | use_banded_solver_NLayers = 10 38 | 39 | #################################################################################################### 40 | 41 | # Call pydisort function 42 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 43 | tau_arr, omega_arr, 44 | NQuad, 45 | Leg_coeffs_all, 46 | mu0, I0, phi0, 47 | b_neg=b_neg, 48 | ) 49 | 50 | # mu_arr is arranged as it is for code efficiency and readability 51 | # For presentation purposes we re-arrange mu_arr from smallest to largest 52 | reorder_mu = np.argsort(mu_arr) 53 | mu_arr_RO = mu_arr[reorder_mu] 54 | 55 | # By default we do not compare intensities 10 degrees around the direct beam 56 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 57 | mu_to_compare = ( 58 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 59 | > deg_around_beam_to_not_compare 60 | ) 61 | 62 | 63 | 64 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 65 | results = np.load("Stamnes_results/8a_test.npz") 66 | 67 | # Perform the comparisons 68 | ( 69 | diff_flux_up, 70 | ratio_flux_up, 71 | diff_flux_down_diffuse, 72 | ratio_flux_down_diffuse, 73 | diff_flux_down_direct, 74 | ratio_flux_down_direct, 75 | diff, 76 | diff_ratio, 77 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 78 | 79 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 80 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 81 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 82 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 83 | # -------------------------------------------------------------------------------------------------- 84 | 85 | 86 | def test_8b(): 87 | print() 88 | print("################################################ Test 8b ##################################################") 89 | print() 90 | ######################################### PYDISORT ARGUMENTS ####################################### 91 | 92 | tau_arr = np.array([0.25, 0.5]) 93 | omega_arr = np.array([0.8, 0.95]) 94 | NQuad = 8 95 | Leg_coeffs_all = np.zeros((2, 9)) 96 | Leg_coeffs_all[:, 0] = 1 97 | mu0 = 0 98 | I0 = 0 99 | phi0 = 0 100 | 101 | # Optional (used) 102 | b_neg = 1 / pi 103 | 104 | # Optional (unused) 105 | NLeg = None 106 | NFourier = None 107 | b_pos = 0 108 | only_flux = False 109 | f_arr = 0 110 | NT_cor = False 111 | BDRF_Fourier_modes = [] 112 | s_poly_coeffs = np.array([[]]) 113 | use_banded_solver_NLayers = 10 114 | 115 | #################################################################################################### 116 | 117 | # Call pydisort function 118 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 119 | tau_arr, omega_arr, 120 | NQuad, 121 | Leg_coeffs_all, 122 | mu0, I0, phi0, 123 | b_neg=b_neg, 124 | ) 125 | 126 | # mu_arr is arranged as it is for code efficiency and readability 127 | # For presentation purposes we re-arrange mu_arr from smallest to largest 128 | reorder_mu = np.argsort(mu_arr) 129 | mu_arr_RO = mu_arr[reorder_mu] 130 | 131 | # By default we do not compare intensities 10 degrees around the direct beam 132 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 133 | mu_to_compare = ( 134 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 135 | > deg_around_beam_to_not_compare 136 | ) 137 | 138 | 139 | 140 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 141 | results = np.load("Stamnes_results/8b_test.npz") 142 | 143 | # Perform the comparisons 144 | ( 145 | diff_flux_up, 146 | ratio_flux_up, 147 | diff_flux_down_diffuse, 148 | ratio_flux_down_diffuse, 149 | diff_flux_down_direct, 150 | ratio_flux_down_direct, 151 | diff, 152 | diff_ratio, 153 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 154 | 155 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 156 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 157 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 158 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 159 | # -------------------------------------------------------------------------------------------------- 160 | 161 | 162 | def test_8c(): 163 | print() 164 | print("################################################ Test 8c ##################################################") 165 | print() 166 | ######################################### PYDISORT ARGUMENTS ####################################### 167 | 168 | tau_arr = np.array([1, 3]) 169 | omega_arr = np.array([0.8, 0.95]) 170 | NQuad = 8 171 | Leg_coeffs_all = np.zeros((2, 9)) 172 | Leg_coeffs_all[:, 0] = 1 173 | mu0 = 0 174 | I0 = 0 175 | phi0 = 0 176 | 177 | # Optional (used) 178 | b_neg = 1 / pi 179 | 180 | # Optional (unused) 181 | NLeg = None 182 | NFourier = None 183 | b_pos = 0 184 | only_flux = False 185 | f_arr = 0 186 | NT_cor = False 187 | BDRF_Fourier_modes = [] 188 | s_poly_coeffs = np.array([[]]) 189 | use_banded_solver_NLayers = 10 190 | 191 | #################################################################################################### 192 | 193 | # Call pydisort function 194 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 195 | tau_arr, omega_arr, 196 | NQuad, 197 | Leg_coeffs_all, 198 | mu0, I0, phi0, 199 | b_neg=b_neg, 200 | ) 201 | 202 | # mu_arr is arranged as it is for code efficiency and readability 203 | # For presentation purposes we re-arrange mu_arr from smallest to largest 204 | reorder_mu = np.argsort(mu_arr) 205 | mu_arr_RO = mu_arr[reorder_mu] 206 | 207 | # By default we do not compare intensities 10 degrees around the direct beam 208 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 209 | mu_to_compare = ( 210 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 211 | > deg_around_beam_to_not_compare 212 | ) 213 | 214 | 215 | 216 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 217 | results = np.load("Stamnes_results/8c_test.npz") 218 | 219 | # Perform the comparisons 220 | ( 221 | diff_flux_up, 222 | ratio_flux_up, 223 | diff_flux_down_diffuse, 224 | ratio_flux_down_diffuse, 225 | diff_flux_down_direct, 226 | ratio_flux_down_direct, 227 | diff, 228 | diff_ratio, 229 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 230 | 231 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 232 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 233 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 234 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 235 | # -------------------------------------------------------------------------------------------------- 236 | 237 | 238 | def test_8ARTS_A(): 239 | print() 240 | print("################################################ Test 8ARTS_A ##################################################") 241 | print() 242 | from ARTS_data.inpydis import src, tau 243 | 244 | nv = len(src) 245 | pyth = np.empty((nv, 20, 8)) 246 | 247 | for i in range(nv): 248 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 249 | tau_arr=tau[i], 250 | omega_arr=tau[i] * 0, 251 | NQuad=8, 252 | Leg_coeffs_all=np.ones((len(tau[i]), 1)), 253 | I0=0.0, 254 | mu0=0.0, 255 | phi0=0.0, 256 | NLeg=1, 257 | NFourier=1, 258 | s_poly_coeffs=src[i] * 1e15, 259 | ) 260 | 261 | pyth[i] = u(tau[i], 0.0).T 262 | 263 | 264 | # Unused optional arguments 265 | NLeg = None 266 | NFourier = None 267 | b_pos = 0 268 | b_neg = 0 269 | only_flux = False 270 | f_arr = 0 271 | NT_cor = False 272 | BDRF_Fourier_modes = [] 273 | use_banded_solver_NLayers = 10 274 | autograd_compatible = False 275 | 276 | ARTS_results = np.load("Stamnes_results/8ARTS_A_test.npy") 277 | assert np.max(np.abs(pyth[:, -1, -1] - ARTS_results) / ARTS_results) < 1e-2 278 | # -------------------------------------------------------------------------------------------------- 279 | 280 | 281 | def test_8ARTS_B(): 282 | print() 283 | print("################################################ Test 8ARTS_B ##################################################") 284 | print() 285 | from ARTS_data.pydisort_data import ( 286 | optical_thicknesses, 287 | single_scattering_albedo, 288 | quadrature_dimension, 289 | legendre_coefficients, 290 | TEMPER, 291 | ) 292 | from scipy.constants import speed_of_light 293 | 294 | freqs = [31.5e9, 165e9, 666e9] 295 | WVNM = np.array(freqs) / (100.0 * speed_of_light) 296 | WVNMHI = np.ones(len(freqs)) * 50000 297 | WVNMLO = np.zeros(len(freqs)) 298 | 299 | for ifreq in range(len(freqs)): 300 | 301 | ######################################### PYDISORT ############################################## 302 | 303 | tau_arr = optical_thicknesses[ifreq] 304 | omega_arr = single_scattering_albedo[ifreq] 305 | NQuad = quadrature_dimension 306 | # Stamnes' DISORT needs an extra coefficient but by our settings it will not be used 307 | Leg_coeffs_all = np.hstack((legendre_coefficients[ifreq], np.zeros((len(tau_arr), 1)))) 308 | mu0 = 0 309 | I0 = 0 # No direct beam 310 | phi0 = 0 311 | 312 | # Optional (used) 313 | s_poly_coeffs = PythonicDISORT.subroutines.generate_s_poly_coeffs( 314 | tau_arr, TEMPER, WVNMLO[ifreq], WVNMHI[ifreq], np.array(omega_arr) 315 | ) 316 | b_pos = PythonicDISORT.subroutines.blackbody_contrib_to_BCs( 317 | np.mean(TEMPER), WVNMLO[ifreq], WVNMHI[ifreq] 318 | ) # Using an arbitrary temperature since surface temperature data is missing 319 | b_neg = PythonicDISORT.subroutines.blackbody_contrib_to_BCs( 320 | np.median(TEMPER), WVNMLO[ifreq], WVNMHI[ifreq] 321 | ) # Using an arbitrary temperature since upper boundary temperature data is missing 322 | 323 | # Call pydisort function 324 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 325 | tau_arr, omega_arr, 326 | NQuad, 327 | Leg_coeffs_all, 328 | mu0, I0, phi0, 329 | b_pos=b_pos, 330 | b_neg=b_neg, 331 | s_poly_coeffs=s_poly_coeffs 332 | ) 333 | 334 | ################################################################################################# 335 | ######################################### SETUP FOR TESTS ####################################### 336 | 337 | # Reorder mu_arr from smallest to largest 338 | reorder_mu = np.argsort(mu_arr) 339 | mu_arr_RO = mu_arr[reorder_mu] 340 | 341 | # We may not want to compare intensities around the direct beam 342 | deg_around_beam_to_not_compare = 0 343 | mu_to_compare = ( 344 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 345 | > deg_around_beam_to_not_compare 346 | ) 347 | mu_test_arr_RO = mu_arr_RO[mu_to_compare] 348 | 349 | ######################################### COMPARE RESULTS ####################################### 350 | ################################################################################################# 351 | 352 | # Load saved results from Stamnes' DISORT 353 | results = np.load("Stamnes_results/8ARTS_B" + str(ifreq) + "_test.npz") 354 | 355 | ( 356 | diff_flux_up, 357 | ratio_flux_up, 358 | diff_flux_down_diffuse, 359 | ratio_flux_down_diffuse, 360 | diff_flux_down_direct, 361 | ratio_flux_down_direct, 362 | diff, 363 | diff_ratio, 364 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 365 | 366 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 367 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 368 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 369 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 370 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/9_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PythonicDISORT 3 | from PythonicDISORT.subroutines import _compare 4 | from math import pi 5 | 6 | # ======================================================================================================= 7 | # Test Problem 9: General Emitting/Absorbing/Scattering Medium with Every Computational Layer Different 8 | # ======================================================================================================= 9 | def test_9a(): 10 | print() 11 | print("################################################ Test 9a ##################################################") 12 | print() 13 | ######################################### PYDISORT ARGUMENTS ####################################### 14 | 15 | tau_arr = np.empty(6) 16 | for i in range(6): 17 | tau_arr[i] = np.sum(np.arange(i + 2)) 18 | omega_arr = 0.6 + np.arange(1, 7) * 0.05 19 | NQuad = 8 20 | Leg_coeffs_all = np.zeros((6, 9)) 21 | Leg_coeffs_all[:, 0] = 1 22 | mu0 = 0 23 | I0 = 0 24 | phi0 = 0 25 | 26 | # Optional (used) 27 | b_neg = 1 / pi 28 | 29 | # Optional (unused) 30 | NLeg = None 31 | NFourier = None 32 | b_pos = 0 33 | only_flux = False 34 | f_arr = 0 35 | NT_cor = False 36 | BDRF_Fourier_modes = [] 37 | s_poly_coeffs = np.array([[]]) 38 | use_banded_solver_NLayers = 10 39 | 40 | #################################################################################################### 41 | 42 | # Call pydisort function 43 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 44 | tau_arr, omega_arr, 45 | NQuad, 46 | Leg_coeffs_all, 47 | mu0, I0, phi0, 48 | b_neg=b_neg, 49 | ) 50 | 51 | # mu_arr is arranged as it is for code efficiency and readability 52 | # For presentation purposes we re-arrange mu_arr from smallest to largest 53 | reorder_mu = np.argsort(mu_arr) 54 | mu_arr_RO = mu_arr[reorder_mu] 55 | 56 | # By default we do not compare intensities 10 degrees around the direct beam 57 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 58 | mu_to_compare = ( 59 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 60 | > deg_around_beam_to_not_compare 61 | ) 62 | 63 | 64 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 65 | results = np.load("Stamnes_results/9a_test.npz") 66 | 67 | # Perform the comparisons 68 | ( 69 | diff_flux_up, 70 | ratio_flux_up, 71 | diff_flux_down_diffuse, 72 | ratio_flux_down_diffuse, 73 | diff_flux_down_direct, 74 | ratio_flux_down_direct, 75 | diff, 76 | diff_ratio, 77 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 78 | 79 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 80 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 81 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 82 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 83 | # -------------------------------------------------------------------------------------------------- 84 | 85 | 86 | def test_9b(): 87 | print() 88 | print("################################################ Test 9b ##################################################") 89 | print() 90 | ######################################### PYDISORT ARGUMENTS ####################################### 91 | 92 | tau_arr = np.empty(6) 93 | for i in range(6): 94 | tau_arr[i] = np.sum(np.arange(i + 2)) 95 | omega_arr = 0.6 + np.arange(1, 7) * 0.05 96 | NQuad = 8 97 | Leg_coeffs_all = np.tile( 98 | np.array( 99 | [1, 2.00916, 1.56339, 0.67407, 0.22215, 0.04725, 0.00671, 0.00068, 0.00005] 100 | ) 101 | / (2 * np.arange(9) + 1), 102 | (6, 1), 103 | ) 104 | mu0 = 0 105 | I0 = 0 106 | phi0 = 0 107 | 108 | # Optional (used) 109 | b_neg = 1 / pi 110 | 111 | # Optional (unused) 112 | NLeg = None 113 | NFourier = None 114 | b_pos = 0 115 | only_flux = False 116 | f_arr = 0 117 | NT_cor = False 118 | BDRF_Fourier_modes = [] 119 | s_poly_coeffs = np.array([[]]) 120 | use_banded_solver_NLayers = 10 121 | 122 | #################################################################################################### 123 | 124 | # Call pydisort function 125 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 126 | tau_arr, omega_arr, 127 | NQuad, 128 | Leg_coeffs_all, 129 | mu0, I0, phi0, 130 | b_neg=b_neg, 131 | ) 132 | 133 | # mu_arr is arranged as it is for code efficiency and readability 134 | # For presentation purposes we re-arrange mu_arr from smallest to largest 135 | reorder_mu = np.argsort(mu_arr) 136 | mu_arr_RO = mu_arr[reorder_mu] 137 | 138 | # By default we do not compare intensities 10 degrees around the direct beam 139 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 140 | mu_to_compare = ( 141 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 142 | > deg_around_beam_to_not_compare 143 | ) 144 | 145 | 146 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 147 | results = np.load("Stamnes_results/9b_test.npz") 148 | 149 | # Perform the comparisons 150 | ( 151 | diff_flux_up, 152 | ratio_flux_up, 153 | diff_flux_down_diffuse, 154 | ratio_flux_down_diffuse, 155 | diff_flux_down_direct, 156 | ratio_flux_down_direct, 157 | diff, 158 | diff_ratio, 159 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 160 | 161 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 162 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 163 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 164 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 165 | # -------------------------------------------------------------------------------------------------- 166 | 167 | 168 | from PythonicDISORT.subroutines import blackbody_contrib_to_BCs 169 | from PythonicDISORT.subroutines import generate_s_poly_coeffs 170 | 171 | def test_9c(): 172 | print() 173 | print("################################################ Test 9c ##################################################") 174 | print() 175 | ######################################### PYDISORT ARGUMENTS ####################################### 176 | 177 | tau_arr = np.empty(6) 178 | for i in range(6): 179 | tau_arr[i] = np.sum(np.arange(i + 2)) 180 | omega_arr = 0.6 + np.arange(1, 7) * 0.05 181 | NQuad = 8 182 | Leg_coeffs_all = np.vstack([(l / 7) ** np.arange(NQuad + 1) for l in np.arange(1, 7)]) 183 | mu0 = 0.5 184 | I0 = pi 185 | phi0 = 0 186 | 187 | # Optional (used) 188 | omega_s = 0.5 189 | BDRF_Fourier_modes=[lambda mu, neg_mup: np.full((len(mu), len(neg_mup)), omega_s)] 190 | 191 | TEMPER = 600 + np.arange(7) * 10 192 | WVNMLO = 999 193 | WVNMHI = 1000 194 | BTEMP = 700 195 | TTEMP = 550 196 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 197 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI) * (1 - omega_arr)[:, None] 198 | b_pos = blackbody_contrib_to_BCs(BTEMP, WVNMLO, WVNMHI) * (1 - omega_s) 199 | b_neg = blackbody_contrib_to_BCs(TTEMP, WVNMLO, WVNMHI) + 1 # Emissivity 1 200 | 201 | # Optional (unused) 202 | NLeg = None 203 | NFourier = None 204 | only_flux = False 205 | f_arr = 0 206 | NT_cor = False 207 | use_banded_solver_NLayers=10 208 | autograd_compatible=False 209 | 210 | #################################################################################################### 211 | 212 | # Call pydisort function 213 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 214 | tau_arr, omega_arr, 215 | NQuad, 216 | Leg_coeffs_all, 217 | mu0, I0, phi0, 218 | b_pos=b_pos, 219 | b_neg=b_neg, 220 | s_poly_coeffs=s_poly_coeffs, 221 | BDRF_Fourier_modes=BDRF_Fourier_modes, 222 | ) 223 | 224 | # mu_arr is arranged as it is for code efficiency and readability 225 | # For presentation purposes we re-arrange mu_arr from smallest to largest 226 | reorder_mu = np.argsort(mu_arr) 227 | mu_arr_RO = mu_arr[reorder_mu] 228 | 229 | # By default we do not compare intensities 10 degrees around the direct beam 230 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 231 | mu_to_compare = ( 232 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 233 | > deg_around_beam_to_not_compare 234 | ) 235 | 236 | 237 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 238 | results = np.load("Stamnes_results/9c_test.npz") 239 | 240 | # Perform the comparisons 241 | ( 242 | diff_flux_up, 243 | ratio_flux_up, 244 | diff_flux_down_diffuse, 245 | ratio_flux_down_diffuse, 246 | diff_flux_down_direct, 247 | ratio_flux_down_direct, 248 | diff, 249 | diff_ratio, 250 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 251 | 252 | assert np.max(ratio_flux_up[diff_flux_up > 1e-3], initial=0) < 1e-3 253 | assert np.max(ratio_flux_down_diffuse[diff_flux_down_diffuse > 1e-3], initial=0) < 1e-3 254 | assert np.max(ratio_flux_down_direct[diff_flux_down_direct > 1e-3], initial=0) < 1e-3 255 | assert np.max(diff_ratio[diff > 1e-3], initial=0) < 1e-2 256 | # -------------------------------------------------------------------------------------------------- 257 | 258 | 259 | def test_9corrections(): 260 | print() 261 | print("################################################ Test 9c ##################################################") 262 | print() 263 | ######################################### PYDISORT ARGUMENTS ####################################### 264 | 265 | tau_arr = np.empty(6) 266 | for i in range(6): 267 | tau_arr[i] = np.sum(np.arange(i + 2)) 268 | omega_arr = 0.9 + np.arange(1, 7) * 0.01 269 | NQuad = 4 270 | Leg_coeffs_all = np.vstack([((l / 3 + 4) / 7) ** np.arange(NQuad * 5) for l in np.arange(1, 7)]) 271 | mu0 = 0.5 272 | I0 = pi 273 | phi0 = 0 274 | 275 | # Optional (used) 276 | omega_s = 0.5 277 | BDRF_Fourier_modes=[lambda mu, neg_mup: np.full((len(mu), len(neg_mup)), omega_s)] 278 | 279 | TEMPER = 600 + np.arange(7) * 10 280 | WVNMLO = 999 281 | WVNMHI = 1000 282 | BTEMP = 700 283 | TTEMP = 550 284 | # Emissivity is (1 - omega_arr) by Kirchoff's law of thermal radiation 285 | s_poly_coeffs=generate_s_poly_coeffs(tau_arr, TEMPER, WVNMLO, WVNMHI) * (1 - omega_arr)[:, None] 286 | b_pos = blackbody_contrib_to_BCs(BTEMP, WVNMLO, WVNMHI) * (1 - omega_s) 287 | b_neg = blackbody_contrib_to_BCs(TTEMP, WVNMLO, WVNMHI) + 1 # Emissivity 1 288 | 289 | # Optional (unused) 290 | NLeg = None 291 | NFourier = None 292 | only_flux = False 293 | #f_arr = 0 294 | #NT_cor = False 295 | use_banded_solver_NLayers=10 296 | autograd_compatible=False 297 | 298 | #################################################################################################### 299 | 300 | # Call pydisort function 301 | 302 | mu_arr, flux_up, flux_down, u0, u = PythonicDISORT.pydisort( 303 | tau_arr, omega_arr, 304 | NQuad, 305 | Leg_coeffs_all, 306 | mu0, I0, phi0, 307 | b_pos=b_pos, 308 | b_neg=b_neg, 309 | s_poly_coeffs=s_poly_coeffs, 310 | BDRF_Fourier_modes=BDRF_Fourier_modes, 311 | # No corrections 312 | ) 313 | 314 | mu_arr, flux_up_dM, flux_down_dM, u0, u_NT = PythonicDISORT.pydisort( 315 | tau_arr, omega_arr, 316 | NQuad, 317 | Leg_coeffs_all, 318 | mu0, I0, phi0, 319 | b_pos=b_pos, 320 | b_neg=b_neg, 321 | s_poly_coeffs=s_poly_coeffs, 322 | BDRF_Fourier_modes=BDRF_Fourier_modes, 323 | # Corrections 324 | f_arr=Leg_coeffs_all[:, NQuad], 325 | NT_cor=True, 326 | ) 327 | 328 | # mu_arr is arranged as it is for code efficiency and readability 329 | # For presentation purposes we re-arrange mu_arr from smallest to largest 330 | reorder_mu = np.argsort(mu_arr) 331 | mu_arr_RO = mu_arr[reorder_mu] 332 | 333 | # By default we do not compare intensities 10 degrees around the direct beam 334 | deg_around_beam_to_not_compare = 0 # Changed to 0 since this test problem has no direct beam 335 | mu_to_compare = ( 336 | np.abs(np.arccos(np.abs(mu_arr_RO)) - np.arccos(mu0)) * 180 / pi 337 | > deg_around_beam_to_not_compare 338 | ) 339 | 340 | 341 | # Load results from version 4.0.99 of Stamnes' DISORT for comparison 342 | results = np.load("Stamnes_results/9corrections_test.npz") 343 | 344 | # Perform the comparisons 345 | ( 346 | diff_flux_up, 347 | ratio_flux_up, 348 | diff_flux_down_diffuse, 349 | ratio_flux_down_diffuse, 350 | diff_flux_down_direct, 351 | ratio_flux_down_direct, 352 | diff, 353 | diff_ratio, 354 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up, flux_down, u) 355 | 356 | ( 357 | diff_flux_up_dM, 358 | ratio_flux_up_dM, 359 | diff_flux_down_diffuse_dM, 360 | ratio_flux_down_diffuse_dM, 361 | diff_flux_down_direct_dM, 362 | ratio_flux_down_direct_dM, 363 | diff_NT, 364 | diff_ratio_NT, 365 | ) = _compare(results, mu_to_compare, reorder_mu, flux_up_dM, flux_down_dM, u_NT) 366 | 367 | # Check whether the corrections improve accuracy on average 368 | assert np.mean(diff_flux_up - diff_flux_up_dM) > 0 369 | assert np.mean(diff_flux_down_diffuse - diff_flux_down_diffuse_dM) > 0 370 | assert np.mean(diff - diff_NT) > 0 371 | # -------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1d_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1d_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1e_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1e_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/1f_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/1f_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/2a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/2a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/2b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/2b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/2c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/2c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/2d_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/2d_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/3a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/3a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/3b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/3b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/4a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/4a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/4b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/4b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/4c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/4c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/5a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/5a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/5b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/5b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6d_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6d_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6e_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6e_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6f_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6f_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6g_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6g_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/6h_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/6h_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/7a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/7a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/7b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/7b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/7c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/7c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/7d_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/7d_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/7e_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/7e_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8ARTS_A_test.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8ARTS_A_test.npy -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8ARTS_B0_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8ARTS_B0_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8ARTS_B1_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8ARTS_B1_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8ARTS_B2_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8ARTS_B2_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/8c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/8c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/9a_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/9a_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/9b_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/9b_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/9c_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/9c_test.npz -------------------------------------------------------------------------------- /pydisotest/Stamnes_results/9corrections_test.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LDEO-CREW/Pythonic-DISORT/f6b251373489192c0bf08b99d1a6b54ca93ba524/pydisotest/Stamnes_results/9corrections_test.npz -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | dependencies = [ 7 | "numpy>=1.8.0", 8 | "scipy>=1.8.0", 9 | ] 10 | name = "PythonicDISORT" 11 | version = "1.1" 12 | authors = [ 13 | { name="Dion HO Jia Xu", email="dh3065@columbia.edu" }, 14 | ] 15 | description = "Discrete Ordinates Solver for the (1D) Radiative Transfer Equation in a single or multi-layer atmosphere." 16 | readme = "README.md" 17 | requires-python = ">=3.8" 18 | classifiers = [ 19 | "Programming Language :: Python :: 3", 20 | "License :: OSI Approved :: MIT License", 21 | "Operating System :: OS Independent", 22 | ] 23 | 24 | [project.optional-dependencies] 25 | pytest = ["pytest >= 6.2.5"] 26 | notebook_dependencies = [ 27 | "autograd>=1.5", 28 | "jupyter>1.0.0", 29 | "notebook>6.5.2", 30 | ] 31 | 32 | [project.urls] 33 | "Homepage" = "https://github.com/LDEO-CREW/Pythonic-DISORT" -------------------------------------------------------------------------------- /src/PythonicDISORT/__init__.py: -------------------------------------------------------------------------------- 1 | from PythonicDISORT import subroutines 2 | from PythonicDISORT.pydisort import pydisort -------------------------------------------------------------------------------- /src/PythonicDISORT/_solve_for_coeffs.py: -------------------------------------------------------------------------------- 1 | from PythonicDISORT.subroutines import _mathscr_v 2 | from PythonicDISORT.subroutines import to_diag_ordered_form 3 | 4 | import numpy as np 5 | import scipy as sc 6 | from math import pi 7 | 8 | 9 | def _solve_for_coeffs( 10 | NFourier, # Number of intensity Fourier modes 11 | G_collect, # Eigenvector matrices 12 | K_collect, # Eigenvalues 13 | B_collect, # Coefficients vectors for particular solutions 14 | G_inv_collect_0, # Inverse of eigenvector matrix for the 0th Fourier mode 15 | scaled_tau_arr_with_0, # Delta-scaled lower boundary of layers with 0 inserted in front 16 | mu_arr, mu_arr_pos, mu_arr_pos_times_W, # Quadrature nodes for 1) both 2) upper hemispheres; 3) upper hemisphere quadrature nodes times weights 17 | N, NQuad, # Number of 1) upper 2) both hemispheres quadrature nodes 18 | NLayers, NBDRF, # Number of 1) layers; 2) BDRF Fourier modes 19 | is_atmos_multilayered, # Is the atmosphere multilayered? 20 | BDRF_Fourier_modes, # BDRF Fourier modes 21 | mu0, I0, # Properties of the direct beam 22 | there_is_beam_source, # Is there a beam source? 23 | b_pos, b_neg, # Dirichlet BCs 24 | b_pos_is_scalar, b_neg_is_scalar, # Is each Dirichlet BCs scalar and so isotropic? 25 | b_pos_is_vector, b_neg_is_vector, # Is each Dirichlet BCs vector? 26 | Nscoeffs, # Number of isotropic source polynomial coefficients 27 | scaled_s_poly_coeffs, # Polynomial coefficients of isotropic source 28 | there_is_iso_source, # Is there an isotropic source? 29 | use_banded_solver_NLayers, # Number of layers above or equal which to use `scipy.linalg.solve_banded` 30 | ): 31 | """ 32 | Uses the boundary conditions to solve for the unknown coefficients 33 | of the general solution to the system of ordinary differential equations for each Fourier mode. 34 | Returns the product of the coefficients and the eigenvectors. 35 | This function is wrapped by the `_assemble_intensity_and_fluxes` function. 36 | It has many seemingly redundant arguments to maximize precomputation in the `pydisort` function. 37 | See the Jupyter Notebook, especially section 3, for documentation, explanation and derivation. 38 | The labels in this file reference labels in the Jupyter Notebook, especially sections 3 and 4. 39 | 40 | Arguments of _solve_for_coeffs 41 | | Variable | Type / Shape | 42 | | ------------------------------ | -------------------------------------------- | 43 | | `NFourier` | scalar | 44 | | `G_collect` | `NFourier x NLayers x NQuad x NQuad` | 45 | | `K_collect` | `NFourier x NLayers x NQuad` | 46 | | `B_collect` | `NFourier x NLayers x NQuad` or `None` | 47 | | `G_inv_collect_0` | `NLayers x NQuad x NQuad` or `None` | 48 | | `scaled_tau_arr_with_0` | `NLayers + 1` | 49 | | `mu_arr` | `NQuad` | 50 | | `mu_arr_pos` | `NQuad/2` | 51 | | `mu_arr_pos_times_W` | `NQuad/2` | 52 | | `N` | scalar | 53 | | `NQuad` | scalar | 54 | | `NLayers` | scalar | 55 | | `NBDRF` | scalar | 56 | | `is_atmos_multilayered` | boolean | 57 | | `BDRF_Fourier_modes` | `NBDRF` | 58 | | `mu0` | scalar | 59 | | `I0` | scalar | 60 | | `there_is_beam_source` | boolean | 61 | | `b_pos` | `NQuad/2 x NFourier` or `NQuad/2` or scalar | 62 | | `b_neg` | `NQuad/2 x NFourier` or `NQuad/2` or scalar | 63 | | `b_pos_is_scalar` | boolean | 64 | | `b_neg_is_scalar` | boolean | 65 | | `b_pos_is_vector` | boolean | 66 | | `b_neg_is_vector` | boolean | 67 | | `Nscoeffs` | scalar | 68 | | `scaled_s_poly_coeffs` | `NLayers x Nscoeffs` | 69 | | `there_is_iso_source` | boolean | 70 | | `use_banded_solver_NLayers` | scalar | 71 | 72 | Notable internal variables of _solve_for_coeffs 73 | | Variable | Shape | 74 | | ------------ | ------------------------------------ | 75 | | `GC_collect` | `NFourier x NLayers x NQuad x NQuad` | 76 | 77 | """ 78 | ################################## Solve for coefficients of homogeneous solution ########################################## 79 | ############# Refer to section 3.6.2 (single-layer) and 4 (multi-layer) of the Comprehensive Documentation ################# 80 | 81 | dim = NLayers * NQuad 82 | GC_collect = np.empty((NFourier, NLayers, NQuad, NQuad)) 83 | use_banded_solver = (NLayers >= use_banded_solver_NLayers) 84 | if use_banded_solver: 85 | Nsupsubdiags = 3 * N - 1 # The number of super-diagonals equals the number of sub-diagonals 86 | 87 | # The following loops can easily be parallelized, but the speed-up is unlikely to be worth the overhead 88 | for m in range(NFourier): 89 | m_equals_0 = (m == 0) 90 | there_is_BDRF_mode = (NBDRF > m) 91 | 92 | G_collect_m = G_collect[m, :, :, :] 93 | K_collect_m = K_collect[m, :, :] 94 | if there_is_beam_source: 95 | B_collect_m = B_collect[m, :, :] 96 | 97 | # Generate mathscr_D and mathscr_X (BDRF terms) 98 | # Just for this part, refer to section 3.4.2 of the Comprehensive Documentation 99 | # -------------------------------------------------------------------------------------------------------------------------- 100 | if there_is_BDRF_mode: 101 | BDRF_Fourier_modes_m = BDRF_Fourier_modes[m] 102 | if np.isscalar(BDRF_Fourier_modes_m): 103 | mathscr_D_neg = (1 + m_equals_0 * 1) * BDRF_Fourier_modes_m 104 | R = mathscr_D_neg * mu_arr_pos_times_W[None, :] 105 | if there_is_beam_source: 106 | mathscr_X_pos = (mu0 * I0 / pi) * BDRF_Fourier_modes_m 107 | else: 108 | mathscr_D_neg = (1 + m_equals_0 * 1) * BDRF_Fourier_modes_m(mu_arr_pos, mu_arr_pos) 109 | R = mathscr_D_neg * mu_arr_pos_times_W[None, :] 110 | if there_is_beam_source: 111 | mathscr_X_pos = (mu0 * I0 / pi) * BDRF_Fourier_modes_m( 112 | mu_arr_pos, np.array([mu0]) 113 | )[:, 0] 114 | # -------------------------------------------------------------------------------------------------------------------------- 115 | 116 | # Assemble RHS 117 | # -------------------------------------------------------------------------------------------------------------------------- 118 | # Ensure the BCs are of the correct shape 119 | if b_pos_is_scalar and m_equals_0: 120 | b_pos_m = np.full(N, b_pos) 121 | elif b_pos_is_vector and m_equals_0: 122 | b_pos_m = b_pos 123 | elif (b_pos_is_scalar or b_pos_is_vector) and not m_equals_0: 124 | b_pos_m = np.zeros(N) 125 | else: 126 | b_pos_m = b_pos[:, m] 127 | 128 | if b_neg_is_scalar and m_equals_0: 129 | b_neg_m = np.full(N, b_neg) 130 | elif b_neg_is_vector and m_equals_0: 131 | b_neg_m = b_neg 132 | elif (b_neg_is_scalar or b_neg_is_vector) and not m_equals_0: 133 | b_neg_m = np.zeros(N) 134 | else: 135 | b_neg_m = b_neg[:, m] 136 | 137 | # _mathscr_v_contribution 138 | if m_equals_0 and there_is_iso_source: 139 | _mathscr_v_contribution_top = -_mathscr_v( 140 | np.array([0]), 141 | np.array([0]), 142 | Nscoeffs, 143 | scaled_s_poly_coeffs[[0], :], 144 | G_collect_m[[0], N:, :], 145 | K_collect_m[[0], :], 146 | G_inv_collect_0[[0], :, :], 147 | mu_arr, 148 | ).ravel() 149 | 150 | _mathscr_v_contribution_middle = np.array([]) 151 | if is_atmos_multilayered: 152 | indices = np.arange(NLayers - 1) 153 | _mathscr_v_contribution_middle = ( 154 | _mathscr_v( 155 | scaled_tau_arr_with_0[1:-1], 156 | indices, 157 | Nscoeffs, 158 | scaled_s_poly_coeffs[indices + 1], 159 | G_collect_m[indices + 1], 160 | K_collect_m[indices + 1], 161 | G_inv_collect_0[indices + 1], 162 | mu_arr, 163 | ) 164 | - _mathscr_v( 165 | scaled_tau_arr_with_0[1:-1], 166 | indices, 167 | Nscoeffs, 168 | scaled_s_poly_coeffs[indices], 169 | G_collect_m[indices], 170 | K_collect_m[indices], 171 | G_inv_collect_0[indices], 172 | mu_arr, 173 | ) 174 | ).ravel(order='F') 175 | 176 | _mathscr_v_contribution_bottom = -_mathscr_v( 177 | scaled_tau_arr_with_0[[-1]], 178 | np.array([0]), 179 | Nscoeffs, 180 | scaled_s_poly_coeffs[[-1], :], 181 | G_collect_m[[-1], :N, :], 182 | K_collect_m[[-1], :], 183 | G_inv_collect_0[[-1], :, :], 184 | mu_arr 185 | ).ravel() 186 | if NBDRF > 0: 187 | _mathscr_v_contribution_bottom = ( 188 | _mathscr_v_contribution_bottom 189 | + R 190 | @ _mathscr_v( 191 | scaled_tau_arr_with_0[[-1]], 192 | np.array([0]), 193 | Nscoeffs, 194 | scaled_s_poly_coeffs[[-1], :], 195 | G_collect_m[[-1], N:, :], 196 | K_collect_m[[-1], :], 197 | G_inv_collect_0[[-1], :, :], 198 | mu_arr 199 | ).ravel() 200 | ) 201 | 202 | _mathscr_v_contribution = np.concatenate( 203 | [ 204 | _mathscr_v_contribution_top, 205 | _mathscr_v_contribution_middle, 206 | _mathscr_v_contribution_bottom, 207 | ] 208 | ) 209 | else: 210 | _mathscr_v_contribution = 0 211 | 212 | if there_is_beam_source: 213 | RHS_middle = np.array([]) 214 | if is_atmos_multilayered: 215 | l_range = np.arange(1, NLayers) 216 | RHS_middle = ( 217 | (B_collect_m[l_range, :] - B_collect_m[l_range - 1, :]) 218 | * np.exp(-scaled_tau_arr_with_0 / mu0)[l_range, None] 219 | ).ravel() 220 | 221 | if there_is_BDRF_mode: 222 | RHS = ( 223 | np.concatenate( 224 | [ 225 | b_neg_m - B_collect_m[0, N:], 226 | RHS_middle, 227 | b_pos_m 228 | + (mathscr_X_pos + R @ B_collect_m[-1, N:] - B_collect_m[-1, :N]) 229 | * np.exp(-scaled_tau_arr_with_0[-1] / mu0), 230 | ] 231 | ) 232 | + _mathscr_v_contribution 233 | ) 234 | else: 235 | RHS = ( 236 | np.concatenate( 237 | [ 238 | b_neg_m - B_collect_m[0, N:], 239 | RHS_middle, 240 | b_pos_m 241 | - B_collect_m[-1, :N] * np.exp(-scaled_tau_arr_with_0[-1] / mu0), 242 | ] 243 | ) 244 | + _mathscr_v_contribution 245 | ) 246 | else: 247 | RHS_middle = np.zeros(NQuad * (NLayers - 1)) 248 | RHS = np.concatenate([b_neg_m, RHS_middle, b_pos_m]) + _mathscr_v_contribution 249 | # -------------------------------------------------------------------------------------------------------------------------- 250 | 251 | # Assemble LHS (much of this code is replicated in section 4 of the Comprehensive Documentation) 252 | G_0_nn = G_collect_m[0, N:, :N] 253 | G_0_np = G_collect_m[0, N:, N:] 254 | G_L_pn = G_collect_m[-1, :N, :N] 255 | G_L_nn = G_collect_m[-1, N:, :N] 256 | G_L_pp = G_collect_m[-1, :N, N:] 257 | G_L_np = G_collect_m[-1, N:, N:] 258 | E_Lm1L = np.exp( 259 | K_collect_m[-1, :N] * (scaled_tau_arr_with_0[-1] - scaled_tau_arr_with_0[-2]) 260 | ) 261 | 262 | LHS = np.zeros((dim, dim)) 263 | # BCs for the entire atmosphere 264 | LHS[:N, :N] = G_0_nn 265 | LHS[:N, N : NQuad] = ( 266 | G_0_np 267 | * np.exp(K_collect_m[0, :N] * scaled_tau_arr_with_0[1])[None, :] 268 | ) 269 | if there_is_BDRF_mode: 270 | LHS[-N:, -NQuad : -N] = (G_L_pn - R @ G_L_nn) * E_Lm1L[None, :] 271 | LHS[-N:, -N:] = G_L_pp - R @ G_L_np 272 | else: 273 | LHS[-N:, -NQuad : -N] = G_L_pn * E_Lm1L[None, :] 274 | LHS[-N:, -N:] = G_L_pp 275 | 276 | # Interlayer / continuity BCs 277 | for l in range(NLayers - 1): 278 | G_l_pn = G_collect_m[l, :N, :N] 279 | G_l_nn = G_collect_m[l, N:, :N] 280 | G_l_ap = G_collect_m[l, :, N:] 281 | G_lp1_an = G_collect_m[l + 1, :, :N] 282 | G_lp1_pp = G_collect_m[l + 1, :N, N:] 283 | G_lp1_np = G_collect_m[l + 1, N:, N:] 284 | scaled_tau_arr_lm1 = scaled_tau_arr_with_0[l] 285 | scaled_tau_arr_l = scaled_tau_arr_with_0[l + 1] 286 | scaled_tau_arr_lp1 = scaled_tau_arr_with_0[l + 2] 287 | # Postive eigenvalues 288 | K_l_pos = K_collect_m[l, N:] 289 | K_lp1_pos = K_collect_m[l + 1, N:] 290 | E_lm1l = np.exp(K_l_pos * (scaled_tau_arr_lm1 - scaled_tau_arr_l)) 291 | E_llp1 = np.exp(K_lp1_pos * (scaled_tau_arr_l - scaled_tau_arr_lp1)) 292 | 293 | start_row = N + l * NQuad 294 | start_col = l * NQuad 295 | LHS[start_row : N + start_row, start_col : N + start_col] = G_l_pn * E_lm1l[None, :] 296 | LHS[N + start_row : 2 * N + start_row, start_col : N + start_col] = G_l_nn * E_lm1l[None, :] 297 | LHS[start_row : 2 * N + start_row, N + start_col : 2 * N + start_col] = G_l_ap 298 | LHS[start_row : 2 * N + start_row, 2 * N + start_col : 3 * N + start_col] = -G_lp1_an 299 | LHS[start_row : N + start_row, 3 * N + start_col : 4 * N + start_col] = -G_lp1_pp * E_llp1[None, :] 300 | LHS[N + start_row : 2 * N + start_row, 3 * N + start_col : 4 * N + start_col] = -G_lp1_np * E_llp1[None, :] 301 | 302 | # -------------------------------------------------------------------------------------------------------------------------- 303 | 304 | # Solve the system 305 | if use_banded_solver: 306 | C_m = sc.linalg.solve_banded( 307 | (Nsupsubdiags, Nsupsubdiags), 308 | to_diag_ordered_form(LHS, Nsupsubdiags, Nsupsubdiags), 309 | RHS, 310 | True, 311 | True, 312 | False, 313 | ) 314 | else: 315 | C_m = np.linalg.solve(LHS, RHS) 316 | 317 | GC_collect[m, :, :, :] = G_collect_m * C_m.reshape(NLayers, NQuad)[:, None, :] 318 | 319 | return GC_collect -------------------------------------------------------------------------------- /src/PythonicDISORT/_solve_for_gen_and_part_sols.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sc 3 | from math import pi 4 | 5 | 6 | def _solve_for_gen_and_part_sols( 7 | NFourier, # Number of intensity Fourier modes 8 | scaled_omega_arr, # Delta-scaled single-scattering albedos 9 | mu_arr_pos, mu_arr, # Quadrature nodes for 1) upper 2) both hemispheres 10 | M_inv, W, # 1) 1 / mu; 2) quadrature weights for each hemisphere 11 | N, NQuad, NLeg, # Number of 1) upper 2) both hemispheres quadrature nodes; 3) phase function Legendre coefficients 12 | NLayers, # Number of layers 13 | weighted_scaled_Leg_coeffs, # Weighted and delta-scaled Legendre coefficients 14 | mu0, I0, # Properties of the direct beam 15 | there_is_beam_source, # Is there a beam source? 16 | Nscoeffs, # Number of isotropic source polynomial coefficients 17 | there_is_iso_source, # Is there an isotropic source? 18 | ): 19 | """ 20 | Diagonalizes the coefficient matrix of the system of ordinary differential equations (ODEs) 21 | for each Fourier mode and returns the eigenpairs which give the general solution up to unknown coefficients. 22 | Also solves for the particular solution to each system of ODEs and returns its coefficient vector. 23 | This function is wrapped by the `_assemble_intensity_and_fluxes` function. 24 | It has many seemingly redundant arguments to maximize precomputation in the `pydisort` function. 25 | See the Jupyter Notebook, especially section 3, for documentation, explanation and derivation. 26 | The labels in this file reference labels in the Jupyter Notebook, especially sections 3 and 4. 27 | 28 | Arguments of _solve_for_gen_and_part_sols 29 | | Variable | Type / Shape | 30 | | ------------------------------ | ---------------------------------- | 31 | | `NFourier` | scalar | 32 | | `scaled_omega_arr` | `NLayers` | 33 | | `mu_arr_pos` | `NQuad/2` | 34 | | `mu_arr` | `NQuad` | 35 | | `M_inv` | `NQuad/2` | 36 | | `W` | `NQuad/2` | 37 | | `N` | scalar | 38 | | `NQuad` | scalar | 39 | | `NLeg` | scalar | 40 | | `NLayers` | scalar | 41 | | `weighted_scaled_Leg_coeffs` | `NLayers x NLeg` | 42 | | `mu0` | scalar | 43 | | `I0` | scalar | 44 | | `there_is_beam_source` | boolean | 45 | | `Nscoeffs` | scalar | 46 | | `there_is_iso_source` | boolean | 47 | 48 | Notable internal variables of _solve_for_gen_and_part_sols 49 | | Variable | Type / Shape | 50 | | ------------------- | -------------------------------------- | 51 | | `ells_all` | `NLeg` | 52 | | `G_collect` | `NFourier*NLayers x NQuad x NQuad` | Reshaped to NFourier x NLayers x NQuad x NQuad 53 | | `K_collect` | `NFourier*NLayers x NQuad` | Reshaped to NFourier x NLayers x NQuad 54 | | `alpha_arr` | `NFourier*NLayers x NQuad/2 x NQuad/2` | Reshaped to NFourier x NLayers x NQuad/2 x NQuad/2 55 | | `beta_arr` | `NFourier*NLayers x NQuad/2 x NQuad/2` | Reshaped to NFourier x NLayers x NQuad/2 x NQuad/2 56 | | `B_collect` | `NFourier*NLayers x NQuad` or `None` | Reshaped to NFourier x NLayers x NQuad 57 | | `eigenvecs_GpG_arr` | `NFourier*NLayers x NQuad/2 x NQuad | 58 | | `eigenvecs_GmG_arr` | `NFourier*NLayers x NQuad/2 x NQuad | 59 | | `G_inv_collect_0` | `NLayers x NQuad x NQuad` or `None` | 60 | 61 | """ 62 | ############################### Assemble system and diagonalize coefficient matrix ######################################### 63 | ########################### Refer to section 3.4.2 of the Comprehensive Documentation ##################################### 64 | 65 | # Initialization 66 | # -------------------------------------------------------------------------------------------------------------------------- 67 | ells_all = np.arange(NLeg) 68 | ind = 0 69 | 70 | G_collect = np.empty((NFourier * NLayers, NQuad, NQuad)) 71 | K_collect = np.empty((NFourier * NLayers, NQuad)) 72 | alpha_arr = np.empty((NFourier * NLayers, N, N)) 73 | beta_arr = np.empty((NFourier * NLayers, N, N)) 74 | no_shortcut_indices = [] 75 | no_shortcut_indices_0 = [] 76 | 77 | if there_is_beam_source: 78 | B_collect = np.zeros((NFourier * NLayers, NQuad)) 79 | if there_is_iso_source: 80 | G_inv_collect_0 = np.empty((NLayers, NQuad, NQuad)) 81 | # -------------------------------------------------------------------------------------------------------------------------- 82 | 83 | # Loop over NFourier Fourier modes 84 | # These can easily be parallelized, but the speed-up is unlikely to be worth the overhead 85 | # -------------------------------------------------------------------------------------------------------------------------- 86 | for m in range(NFourier): 87 | # Setup 88 | # -------------------------------------------------------------------------------------------------------------------------- 89 | m_equals_0 = (m == 0) 90 | ells = ells_all[m:] 91 | degree_tile = np.tile(ells, (N, 1)).T 92 | fac = sc.special.poch(ells + m + 1, -2 * m) 93 | signs = np.ones(NLeg - m) 94 | signs[1::2] = -1 95 | 96 | asso_leg_term_pos = sc.special.lpmv(m, degree_tile, mu_arr_pos) 97 | asso_leg_term_neg = asso_leg_term_pos * signs[:, None] 98 | asso_leg_term_mu0 = sc.special.lpmv(m, ells, -mu0) 99 | # -------------------------------------------------------------------------------------------------------------------------- 100 | 101 | # Loop over NLayers atmospheric layers 102 | # -------------------------------------------------------------------------------------------------------------------------- 103 | for l in range(NLayers): 104 | weighted_scaled_Leg_coeffs_l = weighted_scaled_Leg_coeffs[l, :][ells] 105 | scaled_omega_l = scaled_omega_arr[l] 106 | omega_times_Leg_coeffs = (scaled_omega_l / 2) * weighted_scaled_Leg_coeffs_l 107 | 108 | if np.any(np.abs(omega_times_Leg_coeffs) > 1e-8): # There are shortcuts if multiple scattering is insignificant 109 | 110 | # Generate D 111 | # -------------------------------------------------------------------------------------------------------------------------- 112 | D_temp = omega_times_Leg_coeffs[None, :] * (fac[None, :] * asso_leg_term_pos.T) 113 | D_pos = D_temp @ asso_leg_term_pos 114 | D_neg = D_temp @ asso_leg_term_neg 115 | 116 | # -------------------------------------------------------------------------------------------------------------------------- 117 | 118 | 119 | # Assemble the coefficient matrix and additional terms 120 | # -------------------------------------------------------------------------------------------------------------------------- 121 | DW = D_pos * W[None, :] 122 | np.fill_diagonal(DW, np.diag(DW) - 1) 123 | alpha = M_inv[:, None] * DW 124 | beta = M_inv[:, None] * D_neg * W[None, :] 125 | 126 | # -------------------------------------------------------------------------------------------------------------------------- 127 | 128 | # Particular solution for the direct beam source (refer to section 3.6.1 of the Comprehensive Documentation) 129 | # -------------------------------------------------------------------------------------------------------------------------- 130 | if there_is_beam_source: 131 | # Generate X 132 | X_temp = ( 133 | (scaled_omega_l * I0 * (2 - (m == 0)) / (4 * pi)) 134 | * weighted_scaled_Leg_coeffs_l 135 | * (fac * asso_leg_term_mu0) 136 | ) 137 | X_pos = X_temp @ asso_leg_term_pos 138 | X_neg = X_temp @ asso_leg_term_neg 139 | X_tilde = np.concatenate([-M_inv * X_pos, M_inv * X_neg]) 140 | 141 | A = np.concatenate( 142 | [ 143 | np.concatenate([-alpha, -beta], axis=1), 144 | np.concatenate([beta, alpha], axis=1), 145 | ], 146 | axis=0, 147 | ) 148 | np.fill_diagonal(A, np.diag(A) + 1 / mu0) 149 | B_collect[ind, :] = -np.linalg.solve(A, X_tilde) # We moved the minus sign out 150 | 151 | # -------------------------------------------------------------------------------------------------------------------------- 152 | 153 | alpha_arr[ind, :, :] = alpha 154 | beta_arr[ind, :, :] = beta 155 | no_shortcut_indices.append(ind) 156 | if m_equals_0 and there_is_iso_source: # Keep the list empty if there is no isotropic source 157 | no_shortcut_indices_0.append(l) 158 | 159 | else: 160 | # This is a shortcut to the diagonalization results 161 | G = np.zeros((NQuad, NQuad)) 162 | np.fill_diagonal(G[N:, :N], 1) 163 | np.fill_diagonal(G[:N, N:], 1) 164 | 165 | G_collect[ind, :, :] = G 166 | K_collect[ind, :] = -1 / mu_arr 167 | if m_equals_0 and there_is_iso_source: 168 | G_inv_collect_0[l, :, :] = G 169 | 170 | ind += 1 171 | 172 | if len(no_shortcut_indices) > 0: 173 | 174 | # Diagonalization of coefficient matrix (refer to section 3.4.2 of the Comprehensive Documentation) 175 | # -------------------------------------------------------------------------------------------------------------------------- 176 | alpha_arr = alpha_arr[no_shortcut_indices, :, :] 177 | beta_arr = beta_arr[no_shortcut_indices, :, :] 178 | 179 | K_squared_arr, eigenvecs_GpG_arr = np.linalg.eig( 180 | np.einsum( 181 | "lij, ljk -> lik", alpha_arr - beta_arr, alpha_arr + beta_arr, optimize=True 182 | ), 183 | ) 184 | 185 | # Eigenvalues arranged negative then positive, from largest to smallest magnitude 186 | K_arr = np.concatenate((-np.sqrt(K_squared_arr), np.sqrt(K_squared_arr)), axis=1) 187 | eigenvecs_GpG_arr = np.concatenate((eigenvecs_GpG_arr, eigenvecs_GpG_arr), axis=2) 188 | eigenvecs_GmG_arr = ( 189 | np.einsum( 190 | "lij, ljk -> lik", alpha_arr + beta_arr, eigenvecs_GpG_arr, optimize=True 191 | ) 192 | / K_arr[:, None, :] 193 | ) 194 | 195 | # Eigenvector matrices 196 | G_arr = np.concatenate( 197 | ( 198 | (eigenvecs_GpG_arr - eigenvecs_GmG_arr) / 2, 199 | (eigenvecs_GpG_arr + eigenvecs_GmG_arr) / 2, 200 | ), 201 | axis=1, 202 | ) 203 | 204 | G_collect[no_shortcut_indices, :, :] = G_arr 205 | K_collect[no_shortcut_indices, :] = K_arr 206 | if len(no_shortcut_indices_0) > 0: # If there is no isotropic source this list will be empty 207 | G_inv_collect_0[no_shortcut_indices_0, :, :] = np.linalg.inv( 208 | G_collect[no_shortcut_indices_0, :, :] 209 | ) 210 | 211 | # -------------------------------------------------------------------------------------------------------------------------- 212 | 213 | 214 | if there_is_beam_source and there_is_iso_source: 215 | return ( 216 | G_collect.reshape((NFourier, NLayers, NQuad, NQuad)), 217 | K_collect.reshape((NFourier, NLayers, NQuad)), 218 | B_collect.reshape((NFourier, NLayers, NQuad)), 219 | G_inv_collect_0, 220 | ) 221 | elif there_is_beam_source and not there_is_iso_source: 222 | return ( 223 | G_collect.reshape((NFourier, NLayers, NQuad, NQuad)), 224 | K_collect.reshape((NFourier, NLayers, NQuad)), 225 | B_collect.reshape((NFourier, NLayers, NQuad)), 226 | ) 227 | elif not there_is_beam_source and there_is_iso_source: 228 | return ( 229 | G_collect.reshape((NFourier, NLayers, NQuad, NQuad)), 230 | K_collect.reshape((NFourier, NLayers, NQuad)), 231 | G_inv_collect_0, 232 | ) 233 | else: 234 | return ( 235 | G_collect.reshape((NFourier, NLayers, NQuad, NQuad)), 236 | K_collect.reshape((NFourier, NLayers, NQuad)), 237 | ) --------------------------------------------------------------------------------