├── .gitignore ├── .gitmodules ├── .rtd-environment.yml ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── README.md ├── ah_bootstrap.py ├── docs ├── Makefile ├── _templates │ └── autosummary │ │ ├── base.rst │ │ ├── class.rst │ │ └── module.rst ├── conf.py ├── index.rst └── make.bat ├── examples ├── README.txt ├── define_and_run_trivial_analytical_model.py ├── define_and_run_trivial_preprocessor_and_extrapolator.py ├── gaussian_example_data.py ├── potential_extrapolation_of_example_data.py ├── potential_extrapolation_of_hmi_data.py ├── potential_extrapolation_of_hmi_fits_file.py ├── potential_extrapolation_of_images.py └── potential_extrapolation_performance_tests.py ├── ez_setup.py ├── licenses ├── LICENSE.rst └── README.rst ├── pytest.ini ├── readthedocs.yml ├── setup.cfg ├── setup.py └── solarbextrapolation ├── __init__.py ├── _astropy_init.py ├── analyticalmodels ├── __init__.py ├── base.py └── titov_demoulin_equilibrium.py ├── conftest.py ├── data ├── __init__.py ├── _sample.py └── sample.py ├── example_data_generator.py ├── extrapolators ├── __init__.py ├── base.py ├── potential_field_extrapolator.py ├── potential_field_extrapolator_numba.py └── potential_field_extrapolator_python.py ├── map3dclasses.py ├── mayavi_seed_streamlines.py ├── preprocessors ├── __init__.py └── base.py ├── temp_gradient.py ├── tests ├── __init__.py ├── coveragerc ├── setup_package.py └── test_scripts.py ├── utilities.py └── visualisation_functions.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | docs/api/ 56 | solarbextrapolation/version.py 57 | docs/auto_examples/* 58 | 59 | # PyBuilder 60 | target/ 61 | 62 | cython_version.py 63 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "astropy_helpers"] 2 | path = astropy_helpers 3 | url = https://github.com/astropy/astropy-helpers.git 4 | -------------------------------------------------------------------------------- /.rtd-environment.yml: -------------------------------------------------------------------------------- 1 | name: solarbextrapolation-rtd 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3 6 | - astropy 7 | - numpy=1.12 8 | - matplotlib 9 | - mayavi 10 | - mock 11 | - scipy 12 | - sphinx 13 | - sphinx-gallery 14 | - sphinx_rtd_theme 15 | - sunpy 16 | - pip: 17 | - sunpy-sphinx-theme 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # The language in this case has no bearing - we are going to be making use of "conda" for a 2 | # python distribution for the scientific python stack. 3 | language: python 4 | sudo: false 5 | env: 6 | global: 7 | - PYTHON_VERSION=3.7 8 | - NUMPY_VERSION='stable' 9 | - ASTROPY_VERSION='stable' 10 | - MAIN_CMD='python setup.py' 11 | - CONDA_DEPENDENCIES='scipy matplotlib pytest sunpy sphinx mayavi sphinx-gallery' 12 | - PIP_DEPENDENCIES='sunpy-sphinx-theme' 13 | - CONDA_CHANNELS='conda-forge' 14 | - NUMPY_VERSION='1.12' 15 | - SETUP_XVFB=True 16 | matrix: 17 | - SETUP_CMD='install' 18 | - SETUP_CMD='build_docs' 19 | - SETUP_CMD='test' 20 | install: 21 | - git clone https://github.com/astropy/ci-helpers.git 22 | - source ci-helpers/travis/setup_conda_${TRAVIS_OS_NAME}.sh 23 | script: 24 | - $MAIN_CMD $SETUP_CMD 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 SunPy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include CHANGES.rst 3 | 4 | include ez_setup.py 5 | include ah_bootstrap.py 6 | include setup.cfg 7 | 8 | recursive-include *.pyx *.c *.pxd 9 | recursive-include docs * 10 | recursive-include licenses * 11 | recursive-include cextern * 12 | recursive-include scripts * 13 | 14 | prune build 15 | prune docs/_build 16 | prune docs/api 17 | 18 | recursive-include astropy_helpers * 19 | exclude astropy_helpers/.git 20 | exclude astropy_helpers/.gitignore 21 | 22 | global-exclude *.pyc *.o 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This package is no longer functional. 2 | 3 | It has been without a maintainer for a significant period of time, 4 | and is in need of sufficient work that any future coordinated efforts to build 5 | a framework for general extrapolation techniques would be better to start from 6 | scratch. 7 | 8 | If you wish to do full sun potential field extrapolations, you should look at 9 | [pfsspy](https://pfsspy.readthedocs.io/). 10 | 11 | # solarbextrapolation 12 | 13 | solarbextrapolation was a library for extrapolating 3D magnetic fields from line-of-sight magnetograms. 14 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = -W 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | #This is needed with git because git doesn't create a dir if it's empty 18 | $(shell [ -d "_static" ] || mkdir -p _static) 19 | 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and a HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " devhelp to make HTML files and a Devhelp project" 30 | @echo " epub to make an epub" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " text to make text files" 34 | @echo " man to make manual pages" 35 | @echo " changes to make an overview of all changed/added/deprecated items" 36 | @echo " linkcheck to check all external links for integrity" 37 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 38 | 39 | clean: 40 | -rm -rf $(BUILDDIR) 41 | -rm -rf api 42 | 43 | html: 44 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 45 | @echo 46 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 47 | 48 | dirhtml: 49 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 50 | @echo 51 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 52 | 53 | singlehtml: 54 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 55 | @echo 56 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 57 | 58 | pickle: 59 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 60 | @echo 61 | @echo "Build finished; now you can process the pickle files." 62 | 63 | json: 64 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 65 | @echo 66 | @echo "Build finished; now you can process the JSON files." 67 | 68 | htmlhelp: 69 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 70 | @echo 71 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 72 | ".hhp project file in $(BUILDDIR)/htmlhelp." 73 | 74 | qthelp: 75 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 76 | @echo 77 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 78 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 79 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Astropy.qhcp" 80 | @echo "To view the help file:" 81 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Astropy.qhc" 82 | 83 | devhelp: 84 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 85 | @echo 86 | @echo "Build finished." 87 | @echo "To view the help file:" 88 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Astropy" 89 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Astropy" 90 | @echo "# devhelp" 91 | 92 | epub: 93 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 94 | @echo 95 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 96 | 97 | latex: 98 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 99 | @echo 100 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 101 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 102 | "(use \`make latexpdf' here to do that automatically)." 103 | 104 | latexpdf: 105 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 106 | @echo "Running LaTeX files through pdflatex..." 107 | make -C $(BUILDDIR)/latex all-pdf 108 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 109 | 110 | text: 111 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 112 | @echo 113 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 114 | 115 | man: 116 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 117 | @echo 118 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 119 | 120 | changes: 121 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 122 | @echo 123 | @echo "The overview file is in $(BUILDDIR)/changes." 124 | 125 | linkcheck: 126 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 127 | @echo 128 | @echo "Link check complete; look for any errors in the above output " \ 129 | "or in $(BUILDDIR)/linkcheck/output.txt." 130 | 131 | doctest: 132 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 133 | @echo "Testing of doctests in the sources finished, look at the " \ 134 | "results in $(BUILDDIR)/doctest/output.txt." 135 | -------------------------------------------------------------------------------- /docs/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/base.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/class.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/module.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 3 | # 4 | # Astropy documentation build configuration file. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this file. 9 | # 10 | # All configuration values have a default. Some values are defined in 11 | # the global Astropy configuration which is loaded here before anything else. 12 | # See astropy.sphinx.conf for which values are set there. 13 | 14 | # If extensions (or modules to document with autodoc) are in another directory, 15 | # add these directories to sys.path here. If the directory is relative to the 16 | # documentation root, use os.path.abspath to make it absolute, like shown here. 17 | # sys.path.insert(0, os.path.abspath('..')) 18 | # IMPORTANT: the above commented section was generated by sphinx-quickstart, but 19 | # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left 20 | # commented out with this explanation to make it clear why this should not be 21 | # done. If the sys.path entry above is added, when the astropy.sphinx.conf 22 | # import occurs, it will import the *source* version of astropy instead of the 23 | # version installed (if invoked as "make html" or directly with sphinx), or the 24 | # version in the build directory (if "python setup.py build_sphinx" is used). 25 | # Thus, any C-extensions that are needed to build the documentation will *not* 26 | # be accessible, and the documentation will not build correctly. 27 | 28 | import datetime 29 | import os 30 | import sys 31 | 32 | try: 33 | import astropy_helpers 34 | except ImportError: 35 | # Building from inside the docs/ directory? 36 | if os.path.basename(os.getcwd()) == 'docs': 37 | a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers')) 38 | if os.path.isdir(a_h_path): 39 | sys.path.insert(1, a_h_path) 40 | 41 | # Load all of the global Astropy configuration 42 | from sphinx_astropy.conf import * 43 | 44 | # Get configuration information from setup.cfg 45 | try: 46 | from ConfigParser import ConfigParser 47 | except ImportError: 48 | from configparser import ConfigParser 49 | conf = ConfigParser() 50 | conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')]) 51 | setup_cfg = dict(conf.items('metadata')) 52 | 53 | # -- Read the Docs Setup ----------------------------------------------------- 54 | 55 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 56 | 57 | if on_rtd: 58 | os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/solarbextrapolation/' 59 | os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/solarbextrapolation/' 60 | modules = {} 61 | from mock import Mock 62 | mock = Mock() 63 | modules.update({'mayavi':mock, 'mayavi.mlab':mock.module}) 64 | sys.modules.update(modules) 65 | else: 66 | # -- Sphinx Gallery ------------------------------------------------------------ 67 | 68 | extensions += ['sphinx_gallery.gen_gallery'] 69 | 70 | try: 71 | from mayavi import mlab 72 | find_mlab_figures = True 73 | mlab.options.offscreen = True 74 | except ImportError: 75 | find_mlab_figures = False 76 | 77 | sphinx_gallery_conf = {'find_mayavi_figures': find_mlab_figures, 78 | 'gallery_dirs': 'auto_examples', 79 | 'examples_dirs': '../examples'} 80 | 81 | 82 | # -- General configuration ---------------------------------------------------- 83 | 84 | # If your documentation needs a minimal Sphinx version, state it here. 85 | #needs_sphinx = '1.2' 86 | 87 | # To perform a Sphinx version check that needs to be more specific than 88 | # major.minor, call `check_sphinx_version("x.y.z")` here. 89 | # check_sphinx_version("1.2.1") 90 | 91 | # List of patterns, relative to source directory, that match files and 92 | # directories to ignore when looking for source files. 93 | exclude_patterns.append('_templates') 94 | 95 | # This is added to the end of RST files - a good place to put substitutions to 96 | # be used globally. 97 | rst_epilog += """ 98 | """ 99 | 100 | # -- Project information ------------------------------------------------------ 101 | 102 | # This does not *have* to match the package name, but typically does 103 | project = setup_cfg['package_name'] 104 | author = setup_cfg['author'] 105 | copyright = '{0}, {1}'.format(datetime.datetime.now().year, 106 | setup_cfg['author']) 107 | 108 | # The version info for the project you're documenting, acts as replacement for 109 | # |version| and |release|, also used in various other places throughout the 110 | # built documents. 111 | 112 | __import__(setup_cfg['package_name']) 113 | package = sys.modules[setup_cfg['package_name']] 114 | 115 | # The short X.Y version. 116 | version = package.__version__.split('-', 1)[0] 117 | # The full version, including alpha/beta/rc tags. 118 | release = package.__version__ 119 | 120 | 121 | # -- Options for HTML output --------------------------------------------------- 122 | 123 | # A NOTE ON HTML THEMES 124 | # The global astropy configuration uses a custom theme, 'bootstrap-astropy', 125 | # which is installed along with astropy. A different theme can be used or 126 | # the options for this theme can be modified by overriding some of the 127 | # variables set in the global configuration. The variables set in the 128 | # global configuration are listed below, commented out. 129 | 130 | # Add any paths that contain custom themes here, relative to this directory. 131 | # To use a different custom theme, add the directory containing the theme. 132 | #html_theme_path = [] 133 | 134 | # The theme to use for HTML and HTML Help pages. See the documentation for 135 | # a list of builtin themes. To override the custom theme, set this to the 136 | # name of a builtin theme or the name of a custom theme in html_theme_path. 137 | from sunpy_sphinx_theme.conf import * 138 | # Custom sidebar templates, maps document names to template names. 139 | #html_sidebars = {} 140 | 141 | # The name of an image file (within the static path) to use as favicon of the 142 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 143 | # pixels large. 144 | html_favicon = '' 145 | 146 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 147 | # using the given strftime format. 148 | #html_last_updated_fmt = '' 149 | 150 | # The name for this set of Sphinx documents. If None, it defaults to 151 | # " v documentation". 152 | html_title = '{0} v{1}'.format(project, release) 153 | 154 | # Output file base name for HTML help builder. 155 | htmlhelp_basename = project + 'doc' 156 | 157 | # -- Options for LaTeX output -------------------------------------------------- 158 | 159 | # Grouping the document tree into LaTeX files. List of tuples 160 | # (source start file, target name, title, author, documentclass [howto/manual]). 161 | latex_documents = [('index', project + '.tex', project + u' Documentation', 162 | author, 'manual')] 163 | 164 | # -- Options for manual page output -------------------------------------------- 165 | 166 | # One entry per manual page. List of tuples 167 | # (source start file, name, description, authors, manual section). 168 | man_pages = [('index', project.lower(), project + u' Documentation', [author], 169 | 1)] 170 | 171 | ## -- Options for the edit_on_github extension ---------------------------------------- 172 | 173 | if eval(setup_cfg.get('edit_on_github')): 174 | extensions += ['astropy_helpers.sphinx.ext.edit_on_github'] 175 | 176 | versionmod = __import__(setup_cfg['package_name'] + '.version') 177 | edit_on_github_project = setup_cfg['github_project'] 178 | if versionmod.version.release: 179 | edit_on_github_branch = "v" + versionmod.version.version 180 | else: 181 | edit_on_github_branch = "master" 182 | 183 | edit_on_github_source_root = "" 184 | edit_on_github_doc_root = "docs" 185 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Solar Magnetic Field Extrapolation 2 | ================================== 3 | 4 | This is an affiliated package for the AstroPy package. The documentation for 5 | this package is here: 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | self 11 | auto_examples/index 12 | 13 | .. automodapi:: solarbextrapolation.extrapolators 14 | .. automodapi:: solarbextrapolation.preprocessors 15 | .. automodapi:: solarbextrapolation.analyticalmodels 16 | .. automodapi:: solarbextrapolation.utilities 17 | .. automodapi:: solarbextrapolation.map3dclasses 18 | 19 | 20 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | if NOT "%PAPER%" == "" ( 11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 12 | ) 13 | 14 | if "%1" == "" goto help 15 | 16 | if "%1" == "help" ( 17 | :help 18 | echo.Please use `make ^` where ^ is one of 19 | echo. html to make standalone HTML files 20 | echo. dirhtml to make HTML files named index.html in directories 21 | echo. singlehtml to make a single large HTML file 22 | echo. pickle to make pickle files 23 | echo. json to make JSON files 24 | echo. htmlhelp to make HTML files and a HTML help project 25 | echo. qthelp to make HTML files and a qthelp project 26 | echo. devhelp to make HTML files and a Devhelp project 27 | echo. epub to make an epub 28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 29 | echo. text to make text files 30 | echo. man to make manual pages 31 | echo. changes to make an overview over all changed/added/deprecated items 32 | echo. linkcheck to check all external links for integrity 33 | echo. doctest to run all doctests embedded in the documentation if enabled 34 | goto end 35 | ) 36 | 37 | if "%1" == "clean" ( 38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 39 | del /q /s %BUILDDIR%\* 40 | goto end 41 | ) 42 | 43 | if "%1" == "html" ( 44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 45 | if errorlevel 1 exit /b 1 46 | echo. 47 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 48 | goto end 49 | ) 50 | 51 | if "%1" == "dirhtml" ( 52 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 53 | if errorlevel 1 exit /b 1 54 | echo. 55 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 56 | goto end 57 | ) 58 | 59 | if "%1" == "singlehtml" ( 60 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 61 | if errorlevel 1 exit /b 1 62 | echo. 63 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 64 | goto end 65 | ) 66 | 67 | if "%1" == "pickle" ( 68 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 69 | if errorlevel 1 exit /b 1 70 | echo. 71 | echo.Build finished; now you can process the pickle files. 72 | goto end 73 | ) 74 | 75 | if "%1" == "json" ( 76 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished; now you can process the JSON files. 80 | goto end 81 | ) 82 | 83 | if "%1" == "htmlhelp" ( 84 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished; now you can run HTML Help Workshop with the ^ 88 | .hhp project file in %BUILDDIR%/htmlhelp. 89 | goto end 90 | ) 91 | 92 | if "%1" == "qthelp" ( 93 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 94 | if errorlevel 1 exit /b 1 95 | echo. 96 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 97 | .qhcp project file in %BUILDDIR%/qthelp, like this: 98 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Astropy.qhcp 99 | echo.To view the help file: 100 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Astropy.ghc 101 | goto end 102 | ) 103 | 104 | if "%1" == "devhelp" ( 105 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 106 | if errorlevel 1 exit /b 1 107 | echo. 108 | echo.Build finished. 109 | goto end 110 | ) 111 | 112 | if "%1" == "epub" ( 113 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 117 | goto end 118 | ) 119 | 120 | if "%1" == "latex" ( 121 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 122 | if errorlevel 1 exit /b 1 123 | echo. 124 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 125 | goto end 126 | ) 127 | 128 | if "%1" == "text" ( 129 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 130 | if errorlevel 1 exit /b 1 131 | echo. 132 | echo.Build finished. The text files are in %BUILDDIR%/text. 133 | goto end 134 | ) 135 | 136 | if "%1" == "man" ( 137 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 141 | goto end 142 | ) 143 | 144 | if "%1" == "changes" ( 145 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.The overview file is in %BUILDDIR%/changes. 149 | goto end 150 | ) 151 | 152 | if "%1" == "linkcheck" ( 153 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Link check complete; look for any errors in the above output ^ 157 | or in %BUILDDIR%/linkcheck/output.txt. 158 | goto end 159 | ) 160 | 161 | if "%1" == "doctest" ( 162 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 163 | if errorlevel 1 exit /b 1 164 | echo. 165 | echo.Testing of doctests in the sources finished, look at the ^ 166 | results in %BUILDDIR%/doctest/output.txt. 167 | goto end 168 | ) 169 | 170 | :end 171 | -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | SunPy Solar Magnetic Field Extrapolation Gallery 2 | ================================================ 3 | 4 | It does what it says on the tin. 5 | -------------------------------------------------------------------------------- /examples/define_and_run_trivial_analytical_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | =============================================== 4 | Defining and Run a Custom Analytical Model 5 | =============================================== 6 | 7 | Here you will be creating trivial analytical model following the API. 8 | 9 | """ 10 | 11 | ############################################################################## 12 | # You can start by importing the necessary module components. 13 | 14 | # Module imports 15 | from solarbextrapolation.map3dclasses import Map3D 16 | from solarbextrapolation.analyticalmodels import AnalyticalModel 17 | from solarbextrapolation.visualisation_functions import visualise 18 | 19 | ############################################################################## 20 | # You also need the ability to convert astropyunits, manipulate numpy arrays 21 | # and use MayaVi for visualisation. 22 | 23 | # General imports 24 | import astropy.units as u 25 | import numpy as np 26 | from mayavi import mlab 27 | 28 | 29 | ############################################################################## 30 | # You are going to try and define a 3D cuboid grid of 20x22x20 with ranges in 31 | # arcseconds, these parameters can be stored in the following lists and astropy 32 | # quantities. 33 | 34 | # Input parameters: 35 | qua_shape = u.Quantity([ 20, 20, 20] * u.pixel) 36 | qua_x_range = u.Quantity([ -80.0, 80 ] * u.Mm) 37 | qua_y_range = u.Quantity([ -80.0, 80 ] * u.Mm) 38 | qua_z_range = u.Quantity([ 0.0, 120 ] * u.Mm) 39 | 40 | ############################################################################## 41 | # From the above parameters you can derive the grid step size and total size in 42 | # each dimension. 43 | 44 | """ 45 | # Derived parameters (make SI where applicable) 46 | x_0 = x_range[0].to(u.m).value 47 | Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value 48 | x_size = Dx * tup_shape[0] 49 | y_0 = y_range[0].to(u.m).value 50 | Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value 51 | y_size = Dy * tup_shape[1] 52 | z_0 = z_range[0].to(u.m).value 53 | Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value 54 | z_size = Dy * tup_shape[2] 55 | """ 56 | 57 | ############################################################################## 58 | # You can define this analytical model as a child of the AnalyticalModel class. 59 | class AnaOnes(AnalyticalModel): 60 | def __init__(self, **kwargs): 61 | super(AnaOnes, self).__init__(**kwargs) 62 | 63 | def _generate_field(self, **kwargs): 64 | # Adding in custom parameters to the metadata 65 | self.meta['analytical_model_routine'] = 'Ones Model' 66 | 67 | # Generate a trivial field and return (X,Y,Z,Vec) 68 | arr_4d = np.ones(self.shape.value.tolist() + [3]) 69 | self.field = arr_4d 70 | 71 | # Extract the LoS Magnetogram from this: 72 | self.magnetogram.data = arr_4d[:,:,0,2] 73 | 74 | # Now return the vector field. 75 | return Map3D( arr_4d, self.meta ) 76 | 77 | 78 | ############################################################################## 79 | # You can instansiate a copy of the new analytical model. 80 | aAnaMod = AnaOnes(shape=qua_shape, xrange=qua_x_range, yrange=qua_y_range, zrange=qua_z_range) 81 | 82 | ############################################################################## 83 | # Note: you could use default ranges and grid shape using aAnaMod = AnaOnes(). 84 | 85 | ############################################################################## 86 | # You can now calculate the vector field. 87 | aMap3D = aAnaMod.generate() 88 | 89 | ############################################################################## 90 | # You can now see the 2D boundary data used for extrapolation. 91 | aMap2D = aAnaMod.to_los_magnetogram() 92 | aMap2D.peek() 93 | 94 | ############################################################################## 95 | # You also visulise the 3D vector field: 96 | fig = visualise(aMap3D, 97 | show_boundary_axes=False, 98 | show_volume_axes=False, 99 | debug=False) 100 | mlab.show() 101 | 102 | # Note: you can add boundary axes using: 103 | """ 104 | fig = visualise(aMap3D, 105 | show_boundary_axes=False, 106 | boundary_units=[1.0*u.arcsec, 1.0*u.arcsec], 107 | show_volume_axes=True, 108 | debug=False) 109 | """ 110 | -------------------------------------------------------------------------------- /examples/define_and_run_trivial_preprocessor_and_extrapolator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | =============================================== 4 | Defining a Custom Preprocessor and Extrapolator 5 | =============================================== 6 | 7 | Here you will be creating trivial preprocessor and and exztrqapolatoirs 8 | following the API. 9 | 10 | """ 11 | 12 | ################################################################################ 13 | # You start by importing the necessary modules. 14 | 15 | # General imports 16 | import sunpy.map as mp 17 | import numpy as np 18 | from mayavi import mlab # Necessary for visulisation 19 | 20 | # Module imports 21 | from solarbextrapolation.preprocessors import Preprocessors 22 | from solarbextrapolation.extrapolators import Extrapolators 23 | from solarbextrapolation.map3dclasses import Map3D 24 | from solarbextrapolation.visualisation_functions import visualise 25 | 26 | ########################################################################### 27 | # Preprocessor 28 | # Defining a trivial preprocessor that returns a zeros map for any given input 29 | # map. 30 | class PreZeros(Preprocessors): 31 | def __init__(self, map_magnetogram): 32 | super(PreZeros, self).__init__(map_magnetogram) 33 | 34 | def _preprocessor(self): 35 | # Adding in custom parameters to the meta 36 | self.meta['preprocessor_routine'] = 'Zeros Preprocessor' 37 | 38 | # Creating the trivial zeros map of the same shape as the input map 39 | map_output = mp.Map((np.zeros(self.map_input.data.shape), 40 | self.meta)) 41 | 42 | # Outputting the map. 43 | return map_output 44 | 45 | ########################################################################### 46 | # Make an input map that we will run the preprocessor on. 47 | # This will be changed to using the sample HMI image. 48 | # aMap2D = mp.Map('C://git//solarextrapolation//solarextrapolation//data//example_data_(100x100)__01_hmi.fits') 49 | from solarbextrapolation.example_data_generator import generate_example_data, dummyDataToMap 50 | import astropy.units as u 51 | aMap2D = arr_Data = dummyDataToMap(generate_example_data([ 20, 20 ],u.Quantity([ -10.0, 10.0 ] * u.arcsec),u.Quantity([ -10.0, 10.0 ] * u.arcsec)), u.Quantity([ -10.0, 10.0 ] * u.arcsec), u.Quantity([ -10.0, 10.0 ] * u.arcsec)) 52 | 53 | ########################################################################### 54 | # Instansiate the preprocessor and process the input map. 55 | aPrePro = PreZeros(aMap2D.submap([0, 10]*u.arcsec, [0, 10]*u.arcsec)) 56 | aPreProMap = aPrePro.preprocess() 57 | 58 | 59 | ########################################################################### 60 | # You can plot the preprocessed map using peek. 61 | aPreProMap.peek() 62 | 63 | ########################################################################### 64 | # You can also access the metadata of the preprocessor like any map: 65 | print "preprocessor_routine: " + str(aPreProMap.meta['preprocessor_routine']) 66 | print "preprocessor_duration: " + str(aPreProMap.meta['preprocessor_duration']) 67 | 68 | 69 | 70 | 71 | 72 | ########################################################################### 73 | # Extrapolator 74 | # Defining a trivial extrapolator that returns a volume of one vectors. 75 | class ExtOnes(Extrapolators): 76 | def __init__(self, map_magnetogram, **kwargs): 77 | super(ExtOnes, self).__init__(map_magnetogram, **kwargs) 78 | 79 | def _extrapolation(self): 80 | # Adding in custom parameters to the meta 81 | self.meta['extrapolator_routine'] = 'Ones Extrapolator' 82 | 83 | #arr_4d = np.ones([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[0], self.z, 3]) 84 | arr_4d = np.ones(self.shape.tolist() + [3]) 85 | return Map3D(arr_4d, self.meta) 86 | 87 | ########################################################################### 88 | # Instansiate the preprocessor and extrapolate. 89 | aExt = ExtOnes(aPreProMap, zshape=10) 90 | aMap3D = aExt.extrapolate() 91 | 92 | ########################################################################### 93 | # You can visulise the field using MayaVi. 94 | fig = visualise(aMap3D, 95 | boundary=aPreProMap, 96 | show_boundary_axes=False, 97 | show_volume_axes=False, 98 | debug=False) 99 | mlab.show() 100 | 101 | """ 102 | 103 | # aPreProData = aMap2D.submap([0,10], [0,10]) 104 | 105 | # Some checks: 106 | #aPreProData.data # Should be a 2D zeros array. 107 | #aPreProData.meta 108 | #aPreProData.meta['preprocessor_routine'] 109 | #aPreProData.meta['preprocessor_start_time'] 110 | 111 | ########################################################################### 112 | # Testing an extrapolator 113 | 114 | 115 | # Define trivial extrapolator 116 | class ExtZeros(Extrapolators): 117 | def __init__(self, map_magnetogram, **kwargs): 118 | super(ExtZeros, self).__init__(map_magnetogram, **kwargs) 119 | 120 | def _extrapolation(self): 121 | # Adding in custom parameters to the meta 122 | self.meta['extrapolator_routine'] = 'Zeros Extrapolator' 123 | 124 | arr_4d = np.zeros([self.map_boundary_data.data.shape[0], 125 | self.map_boundary_data.data.shape[0], self.z, 3]) 126 | return Map3D((arr_4d, self.meta)) 127 | 128 | 129 | aExt = ExtZeros( 130 | aPreProData, 131 | filepath='C://Users/Alex/solarextrapolation/solarextrapolation/3Dmap.m3d') 132 | aMap3D = aExt.extrapolate() 133 | 134 | # Some checks: 135 | #aMap3D.data # Should be a 4D zeros array. 136 | #aMap3D.meta 137 | #aMap3D.meta['extrapolator_routine'] 138 | #aMap3D.meta['extrapolator_start_time'] 139 | 140 | # Testing a Map3DCube 141 | 142 | aMapCube = Map3DCube(aMap3D, aMap3D) 143 | aMapCube[0] 144 | aMapCube[0].data 145 | aMapCube[0].meta 146 | aMapCube[1].data 147 | aMapCube[1].meta 148 | """ 149 | -------------------------------------------------------------------------------- /examples/gaussian_example_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | ========================================= 4 | Generating Example Gaussian Boundary Data 5 | ========================================= 6 | 7 | In this example you will be generating some example data and extrapolate this 8 | using the basic potential extrapolator. 9 | """ 10 | 11 | ############################################################################## 12 | # You can start by importing the necessary module components. 13 | 14 | # Module imports 15 | from solarbextrapolation.example_data_generator import generate_example_data, dummyDataToMap 16 | 17 | ############################################################################## 18 | # You also need the ability to convert astropyunits. 19 | import astropy.units as u 20 | 21 | ############################################################################## 22 | # You need to define the parameters of the eare, includsing the x and y ranges 23 | # as astropy quantities with angular or distance units and the grid shape. 24 | 25 | # Input parameters: 26 | arr_grid_shape = [ 20, 22 ] # [ y-size, x-size ] 27 | qua_xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 28 | qua_yrange = u.Quantity([ -11.0, 11.0 ] * u.arcsec) 29 | 30 | ############################################################################## 31 | # The generated data will consist of a 2D space with 2 Gaussian spots, one 32 | # positive and one negative, on a background of 0.0. 33 | # solarbextrapolation.example_data_generator provides many ways to achieve this, 34 | # including letting it randomly generate the position, magnitude and size of 35 | # each spot/pole. 36 | 37 | # To randomly generate 2 poles simply don't add any pole parameters: 38 | arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange) 39 | # Note: each time you run this pole positions/magnitudes will change. 40 | 41 | ############################################################################## 42 | # We can now convert this into a a sunpy map object: 43 | aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange) 44 | 45 | ############################################################################## 46 | # We can see this map using peek: 47 | aMap.peek() 48 | 49 | ############################################################################## 50 | # To manually position poles, simply build lists of parameters for each pole. 51 | # It's often easiest to use percentage units for location/size, wheer we compare 52 | # to the maps region. 53 | # arrA0 = [ Position, size, Max Magnitude ] 54 | arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ] 55 | arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ] 56 | 57 | # To generate and view: 58 | arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange, arrA0, arrA1) 59 | aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange) 60 | aMap.peek() 61 | 62 | ############################################################################## 63 | # But absolute positioning using the map range units is also possible 64 | arrA2 = [ u.Quantity([ -6, 6 ] * u.arcsec), 2 * u.arcsec, -0.2 * u.T ] 65 | arrA3 = [ u.Quantity([ 6, -7 ] * u.arcsec), 2 * u.arcsec, 0.2 * u.T ] 66 | 67 | # To generate and view: 68 | arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange, arrA2, arrA3) 69 | aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange) 70 | aMap.peek() 71 | 72 | ############################################################################## 73 | # You can add as many poles as you want: 74 | arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange, arrA0, arrA1, arrA2, arrA3) 75 | aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange) 76 | aMap.peek() 77 | 78 | ############################################################################## 79 | # And being a map you can use all the normal SunPy functions, such as saving 80 | # the map using aMap.save(filepath). 81 | -------------------------------------------------------------------------------- /examples/potential_extrapolation_of_example_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | ============================ 4 | Generated Data Extrapolation 5 | ============================ 6 | 7 | In this example you will be generating some example data and extrapolate this 8 | using the basic potential extrapolator. 9 | """ 10 | 11 | ############################################################################## 12 | # You can start by importing the necessary module components. 13 | 14 | # Module imports 15 | from solarbextrapolation.map3dclasses import Map3D 16 | #from solarbextrapolation.potential_field_extrapolator import PotentialExtrapolator 17 | from solarbextrapolation.extrapolators import PotentialExtrapolator 18 | from solarbextrapolation.example_data_generator import generate_example_data, dummyDataToMap 19 | from solarbextrapolation.visualisation_functions import visualise 20 | 21 | ############################################################################## 22 | # You also need the ability to convert astropyunits and use MayaVi for 23 | # visualisation. 24 | 25 | # General imports 26 | import astropy.units as u 27 | from mayavi import mlab 28 | import numpy as np 29 | 30 | ############################################################################## 31 | # You are going to try and define a 3D cuboid grid of 20x22x20 with ranges in 32 | # arcseconds, these parameters can be stored in the following lists and astropy 33 | # quantities. 34 | 35 | # Input parameters: 36 | arr_grid_shape = [ 20, 22, 20 ] # [ y-size, x-size ] 37 | xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 38 | yrange = u.Quantity([ -11.0, 11.0 ] * u.arcsec) 39 | zrange = u.Quantity([ 0, 20.0 ] * u.arcsec) 40 | 41 | ############################################################################## 42 | # The generated data will consist of a 2D space with 2 Gaussian spots, one 43 | # positive and one negative, on a background of 0.0. 44 | # solarbextrapolation.example_data_generator provides many ways to achieve this, 45 | # including letting it randomly generate the position, magnitude and size of 46 | # each spot. 47 | # In this case you will manually define the parameters of each spot as a list, 48 | # using percentage units so that the spots will be inside the given ranges of 49 | # any generated data: 50 | 51 | # Manual Pole Details 52 | #arrA# = [ position, size, maximum strength ] 53 | arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ] 54 | arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ] 55 | 56 | ############################################################################## 57 | # You generate the data using generate_example_data(...) and create a map with 58 | # this using dummyDataToMap(...). 59 | 60 | # Generate the data and make into a map 61 | arr_data = generate_example_data(arr_grid_shape[0:2], xrange, yrange, arrA0, arrA1) 62 | map_boundary = dummyDataToMap(arr_data, xrange, yrange) 63 | 64 | ############################################################################## 65 | # You can check the resulting generated data by using peek(). 66 | map_boundary.peek() 67 | 68 | ############################################################################## 69 | # You now simply want to extrapolate using this boundary data, this is achieved 70 | # by first creating a potential extrapolator object and then by running the 71 | # extrapolate on this to return a Map3D object with the resulting vector field. 72 | 73 | # Use potential extrapolator to generate field 74 | aPotExt = PotentialExtrapolator(map_boundary, zshape=arr_grid_shape[2], zrange=zrange) 75 | aMap3D = aPotExt.extrapolate(enable_numba=True) 76 | 77 | # The Extrapolations run time is stored in the meta 78 | floSeconds = np.round(aMap3D.meta['extrapolator_duration'],3) 79 | print('\nextrapolation duration: ' + str(floSeconds) + ' s\n') 80 | 81 | ############################################################################## 82 | # Note that you used enable_numba=True to speed up the computation on systems 83 | # with Anaconda numba installed. 84 | 85 | ############################################################################## 86 | # You can now get a quick and easy visualisation using the 87 | # solarbextrapolation.example_data_generator.visualise tools: 88 | 89 | # Visualise the 3D vector field 90 | fig = visualise(aMap3D, 91 | boundary=map_boundary, 92 | volume_units=[1.0*u.arcsec, 1.0*u.arcsec, 1.0*u.Mm], 93 | show_boundary_axes=False, 94 | boundary_units=[1.0*u.arcsec, 1.0*u.arcsec], 95 | show_volume_axes=True, 96 | debug=False) 97 | mlab.show() 98 | ############################################################################## 99 | # Note that the parameters here are simply to decide what boundary ranges 100 | # to display. 101 | 102 | -------------------------------------------------------------------------------- /examples/potential_extrapolation_of_hmi_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | ====================== 4 | Extrapolating HMI Data 5 | ====================== 6 | 7 | In this example you will be downloading boundary data from VSO, extrapolating 8 | using the potential extrapolator and visualising in MayaVi. 9 | """ 10 | 11 | ################################################################################ 12 | # You start by importing the necessary modules. 13 | 14 | # General imports 15 | import numpy as np 16 | import sunpy.map as mp 17 | from sunpy.net import vso 18 | from astropy import units as u 19 | from mayavi import mlab # Necessary for visulisation 20 | import os 21 | 22 | # Module imports 23 | from solarbextrapolation.map3dclasses import Map3D 24 | from solarbextrapolation.extrapolators import PotentialExtrapolator 25 | from solarbextrapolation.visualisation_functions import visualise 26 | 27 | ################################################################################ 28 | # You will retrieve the boundary data from the VSO using the SunPy VSO client. 29 | # In this case we will retrieve an SDO HMI line-of-sight magnetogram that was 30 | # made on the 14th of February 2011, as used in Sun et al (2012). 31 | 32 | # Create a new VSOClient instance 33 | client = vso.VSOClient() 34 | 35 | # Build the query, this can return one item, or a list of them to DL matching 36 | # the given filters. 37 | result_hmi = client.query( 38 | # The following are filters for collecting the desired data. 39 | vso.attrs.Time((2011, 2, 14, 20, 34, 0), (2011, 2, 14, 21, 0, 0)), # Time range. 40 | vso.attrs.Instrument('HMI'), # Helioseismic and Magnetic Imager. 41 | vso.attrs.Physobs('LOS_magnetic_field'), # Physical observables 42 | vso.attrs.Sample(4000 * u.s) # Only take a shot every $var seconds. 43 | # More observables at http://sdac.virtualsolar.org/cgi/show_details?keyword=PHYSOBS 44 | ) 45 | 46 | # Save the results to fits files. (Using Rice compression if possible) 47 | data_hmi = client.get(result_hmi, methods=('URL-FILE_Rice', 'URL-FILE')).wait() 48 | 49 | ################################################################################ 50 | # You may also decide to get the corrisponding SDO AIA data showing the EUV 51 | # image at the same time, this can be used to see the flux tubes for comparrison 52 | # to the vector field streamlines for visulisation. 53 | 54 | # Query VSO. 55 | result_aia = client.query( 56 | vso.attrs.Time((2011, 2, 14, 20, 34, 0), (2011, 2, 14, 21, 0, 0)), # Time range. 57 | vso.attrs.Instrument('AIA'), # Helioseismic and Magnetic Imager. 58 | vso.attrs.Physobs('intensity'), # Physical observables 59 | vso.attrs.Sample(4000 * u.s) # Only take a shot every $var seconds. 60 | # More observables at http://sdac.virtualsolar.org/cgi/show_details?keyword=PHYSOBS 61 | ) 62 | 63 | # Save the results to fits files. (Using Rice compression if possible) 64 | data_aia = client.get(result_aia, methods=('URL-FILE_Rice', 'URL-FILE')).wait() 65 | 66 | ################################################################################ 67 | # You want to crop on solar-x and solar-y the the active region of interest. 68 | # Likewise you want to decide on the altertude ranges to extrapolate within. 69 | # Extrapolators use astropy quantities for ranges, importanmtly these are 70 | # designed to work with either physical length or angular units, conversion is 71 | # done using the assumption the boundary data ios on the surface of the sun and 72 | # following the small angle approximation. 73 | # In this case we use angular uniits (arcsec specifically) for the zrange 74 | # quantity, this is physically meaningless, but gives an easy way to ensure 75 | # your zrange is similar to teh other ranges. 76 | # We also want extended solar-x and solar-y ranges for plotting the 77 | 78 | # Cropping into the active region within the HMI map 79 | xrange = u.Quantity([50, 300] * u.arcsec) 80 | yrange = u.Quantity([-350, -100] * u.arcsec) 81 | zrange = u.Quantity([0, 250] * u.arcsec) 82 | 83 | # Open the map and create a cropped version for the extrapolation. 84 | map_hmi = mp.Map(data_hmi[0]) 85 | map_hmi_cropped = map_hmi.submap(xrange, yrange) 86 | 87 | ################################################################################ 88 | # If your boundary data has a high resolution then you may need to resample to 89 | # ensure it extrapolates within a reasonable timeframe. 90 | 91 | # Resample boundary data map 92 | shape = u.Quantity([20, 20] * u.pixel) 93 | map_hmi_cropped_resampled = map_hmi_cropped.resample(shape, method='linear') 94 | 95 | ################################################################################ 96 | # You can check the resulting generated data by using peek(). 97 | map_hmi_cropped_resampled.peek() 98 | 99 | ################################################################################ 100 | # To speed up repeat usage of this script it will save the extrapolation output, 101 | # you can use os.path.isfile() to check if the file already exists, assuming it 102 | # doesn't you will extrapolate and create it, otherwise you load it. 103 | 104 | # Only extrapolate if we don't have a saved version 105 | str_vol_filepath = data_hmi[0][0:-5] + '_Bxyz.npy' 106 | if not os.path.isfile(str_vol_filepath): 107 | # Create the potential extrapolator and run the extrapolate method. 108 | aPotExt = PotentialExtrapolator(map_hmi_cropped_resampled, filepath=str_vol_filepath, zshape=20, zrange=zrange) 109 | aMap3D = aPotExt.extrapolate() 110 | # Load the results. 111 | aMap3D = Map3D.load(str_vol_filepath) 112 | #print '\nextrapolation duration: ' + str(np.round(aMap3D.meta['extrapolator_duration'],3)) + ' s\n' 113 | 114 | 115 | ################################################################################ 116 | # For the perposes of visualisation we will want an extended boundary data, not 117 | # just that of the extrapolated region, and at the instruments full resolution, 118 | # not resampled. 119 | 120 | xrangeextended = u.Quantity([ xrange.value[0] - 50, xrange.value[1] + 50 ] * xrange.unit) 121 | yrangeextended = u.Quantity([ yrange.value[0] - 50, yrange.value[1] + 50 ] * yrange.unit) 122 | 123 | # Open the map and create a cropped version for the visualisation. 124 | map_boundary = mp.Map(data_hmi[0]) 125 | map_boundary_cropped = map_boundary.submap(xrangeextended, yrangeextended) 126 | 127 | ################################################################################ 128 | # You can now get a quick and easy visualisation using the 129 | # solarbextrapolation.example_data_generator.visualise tools: 130 | 131 | # Visualise the 3D vector field 132 | visualise(aMap3D, boundary=map_boundary_cropped, scale=1.0*u.Mm, boundary_unit=1.0*u.arcsec, show_boundary_axes=False, show_volume_axes=True, debug=False) 133 | mlab.show() 134 | -------------------------------------------------------------------------------- /examples/potential_extrapolation_of_hmi_fits_file.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | =========================== 4 | HMI FITS File Extrapolation 5 | =========================== 6 | 7 | Example of extrapolating from a HMI fitts file using the potential 8 | extrapolator and visualising. 9 | """ 10 | # General imports 11 | import numpy as np 12 | import sunpy.map as mp 13 | from astropy import units as u 14 | from mayavi import mlab 15 | import os 16 | 17 | # Module imports 18 | from solarbextrapolation.map3dclasses import Map3D 19 | from solarbextrapolation.extrapolators import PotentialExtrapolator 20 | from solarbextrapolation.visualisation_functions import visualise 21 | 22 | # Cropping into the active region within the HMI map 23 | str_vol_filepath = 'C:\\git\\solarbextrapolation\\examples\\2011-02-14__20-35-25__02_Bxyz.npy' 24 | xrange = u.Quantity([50, 300] * u.arcsec) 25 | yrange = u.Quantity([-350, -100] * u.arcsec) 26 | zrange = u.Quantity([0, 250] * u.arcsec) 27 | xrangeextended = u.Quantity([xrange.value[0] - 50, xrange.value[1] + 50] * 28 | xrange.unit) 29 | yrangeextended = u.Quantity([yrange.value[0] - 50, yrange.value[1] + 50] * 30 | yrange.unit) 31 | 32 | # Open the map and create a cropped version for the extrapolation. 33 | map_hmi = mp.Map( 34 | 'C:\\git\\solarbextrapolation\\examples\\2011-02-14__20-35-25__01_hmi.fits') 35 | map_hmi_cropped = map_hmi.submap(xrange, yrange) 36 | dimensions = u.Quantity([100, 100] * u.pixel) 37 | map_hmi_cropped_resampled = map_hmi_cropped.resample(dimensions, 38 | method='linear') 39 | 40 | # Open the map and create a cropped version for the visualisation. 41 | #map_boundary = mp.Map('C:\\git\\solarbextrapolation\\examples\\2011-02-14__20-35-25__02_aia.fits') # For AIA 42 | map_boundary = mp.Map( 43 | 'C:\\git\\solarbextrapolation\\examples\\2011-02-14__20-35-25__01_hmi.fits' 44 | ) # For HMI 45 | 46 | map_boundary_cropped = map_boundary.submap(xrangeextended, yrangeextended) 47 | 48 | # Only extrapolate if we don't have a saved version 49 | if not os.path.isfile(str_vol_filepath): 50 | aPotExt = PotentialExtrapolator(map_hmi_cropped_resampled, 51 | filepath=str_vol_filepath, 52 | zshape=dimensions[0].value, 53 | zrange=zrange) 54 | aMap3D = aPotExt.extrapolate() 55 | aMap3D = Map3D.load(str_vol_filepath) 56 | print('\nextrapolation duration: ' + str(np.round(aMap3D.meta['extrapolator_duration'], 3)) + ' s\n') 57 | 58 | # Visualise this 59 | visualise(aMap3D, 60 | boundary=map_boundary_cropped, 61 | scale=1.0 * u.Mm, 62 | boundary_unit=1.0 * u.arcsec, 63 | show_boundary_axes=False, 64 | show_volume_axes=True, 65 | debug=False) 66 | mlab.show() 67 | -------------------------------------------------------------------------------- /examples/potential_extrapolation_of_images.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | ===================================== 4 | Potential Extrapolation from an Image 5 | ===================================== 6 | 7 | This example is to demonstrate using the potential extrapolator on an image. 8 | It was built for a bit of fun. 9 | """ 10 | 11 | from __future__ import print_function 12 | 13 | # General imports 14 | from astropy import units as u 15 | from scipy import misc 16 | from mayavi import mlab 17 | import numpy as np 18 | 19 | # Module imports 20 | from solarbextrapolation.potential_field_extrapolator import PotentialExtrapolator 21 | from solarbextrapolation.example_data_generator import generate_example_data, dummyDataToMap 22 | from solarbextrapolation.visualisation_functions import visualise 23 | 24 | # The input parameters: 25 | arr_grid_shape = [ 50, 50, 50 ] # [ y-size, x-size ] 26 | xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 27 | yrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 28 | zrange = u.Quantity([ 0, 20.0 ] * u.arcsec) 29 | 30 | # Manual Pole Details 31 | arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ] 32 | arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ] 33 | 34 | # Generate the data and make into a map 35 | arr_data = generate_example_data(arr_grid_shape[0:2], xrange, yrange, arrA0, arrA1) 36 | print('\n' + str(type(arr_data.dtype))) 37 | print(str(arr_data.shape)) 38 | print(str(arr_data.dtype) + '\n') 39 | arr_image = (misc.imread('sunpy_powered_50x50.png')[...,:3] - 127.5) / 1270.0 40 | arr_data = np.zeros(arr_image.shape[:2]) 41 | for i in range(0,arr_data.shape[0]): # Row/Y 42 | for j in range(0,arr_data.shape[1]): # Column/X 43 | arr_data[i,j] = ((arr_image[i,j,0] + arr_image[i,j,1] + arr_image[i,j,2]) / 3.0) 44 | print('\n' + str(type(arr_data.dtype))) 45 | print(str(arr_data.shape)) 46 | print(str(arr_data.dtype) + '\n') 47 | 48 | map_boundary = dummyDataToMap(arr_data, xrange, yrange) 49 | 50 | # Use potential extrapolator to generate field 51 | aPotExt = PotentialExtrapolator(map_boundary, zshape=arr_grid_shape[2], zrange=zrange) 52 | aMap3D = aPotExt.extrapolate(enable_numba=True) 53 | print('extrapolator_duration:' + str(aMap3D.meta['extrapolator_duration'])) 54 | 55 | 56 | # Visualise 57 | visualise(aMap3D, 58 | boundary=map_boundary, 59 | volume_units=[1.0*u.arcsec, 1.0*u.arcsec, 1.0*u.Mm], 60 | show_boundary_axes=False, 61 | boundary_units=[1.0*u.arcsec, 1.0*u.arcsec], 62 | show_volume_axes=True, 63 | debug=False) 64 | mlab.show() 65 | -------------------------------------------------------------------------------- /examples/potential_extrapolation_performance_tests.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | ===================================== 4 | Extrapolator Perfomance Testing 5 | ===================================== 6 | 7 | In this example you will be running the potential field extrapolator both with 8 | numba enabled and disabled over a number of datasets and tabulating the results 9 | into an astropy table. 10 | 11 | Note: if you don't have conda numba installed the code should work but the 12 | results should not show any speed difference. 13 | """ 14 | 15 | ############################################################################## 16 | # You can start by importing the necessary module components. 17 | 18 | # Module imports 19 | from solarbextrapolation.extrapolators import PotentialExtrapolator 20 | from solarbextrapolation.example_data_generator import generate_example_data, dummyDataToMap 21 | 22 | ############################################################################## 23 | # You also need the ability to convert astropyunits, use numpy arrays and 24 | # astropy tables. 25 | 26 | # General imports 27 | from astropy import units as u 28 | from astropy.table import Table 29 | import numpy as np 30 | 31 | ############################################################################## 32 | # You are going to create a series of volume grids with a given shape and then 33 | # attribute arbitrary units to it's axes. 34 | lis_grid_shapes = [ [ 50, 50, 50 ] ] 35 | xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 36 | yrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 37 | zrange = u.Quantity([ 0, 20.0 ] * u.arcsec) 38 | 39 | ############################################################################## 40 | # Note that you could easily choose any grid dimensions: 41 | # e.g. [ [ 100, 100, 200 ] ] 42 | # or add more then one grid shape in the list: 43 | # e.g. [ [ 10, 10, 10 ],[ 50, 50, 50 ], [ 100, 100, 100 ] ] 44 | # to make the test more grid-size agnostic, but this will notably increase 45 | # runtime. 46 | 47 | ############################################################################## 48 | # You will create an example dataset using Gaussian spots, as show in the 49 | # Generating Example Data example. We use the parameters: 50 | 51 | # Manual Pole Details 52 | arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ] 53 | arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ] 54 | arrA2 = [ u.Quantity([ 25, 75 ] * u.percent), 10.0 * u.percent, 0.1 * u.T ] 55 | arrA3 = [ u.Quantity([ 75, 25 ] * u.percent), 10.0 * u.percent, -0.1 * u.T ] 56 | 57 | # Generate the datasets and maps 58 | # lis_maps = [] 59 | # lis_extrapolators = [] 60 | 61 | ############################################################################## 62 | # You will create an astropy table to store the runtimes of the extrapolations. 63 | 64 | # A table for storing the data 65 | t = Table(names=('grid size', 'time (min)', 'time (ave)', 'time (std)'), meta={'name': 'times tables'}, dtype=('S24', 'f8', 'f8', 'f8')) 66 | t['time (min)'].unit = u.s 67 | t['time (ave)'].unit = u.s 68 | t['time (std)'].unit = u.s 69 | 70 | ############################################################################## 71 | # You will store all the datasets to test with in a list. 72 | # In this case the datasets will simply be the various generated example 73 | # boundary data maps for the list of grid sizes, which is simply one example. 74 | lis_datasets = [] 75 | for shape in lis_grid_shapes: 76 | lis_datasets.append([ str(shape), shape[2], zrange, 77 | dummyDataToMap(generate_example_data(shape[0:2], xrange, yrange, arrA0, arrA1, arrA2, arrA3), xrange, yrange) ]) 78 | 79 | ############################################################################## 80 | # You may wish to run each test more than once, so you can use a parameter to 81 | # autimate this. 82 | int_trials = 1 # The times to repeat each extrapolation. 83 | 84 | ############################################################################## 85 | # You iterate through the extrapolations on each dataset, adding teh runtime to 86 | # the table. 87 | for extrapolation in lis_datasets: 88 | # Setup the extrapolator and table 89 | aPotExt = PotentialExtrapolator(extrapolation[3], zshape=extrapolation[1], zrange=extrapolation[2]) 90 | 91 | # List to store the trial 92 | lis_times = [] 93 | 94 | # Run the extrapolation without numba for each dataset (map and ranges). 95 | for i in range(0, int_trials): 96 | aMap3D = aPotExt.extrapolate(enable_numba=False) 97 | lis_times.append(aMap3D.meta['extrapolator_duration']) 98 | t.add_row([extrapolation[0], np.round(np.min(lis_times), 2), np.round(np.average(lis_times), 2), np.round(np.std(lis_times), 2)]) 99 | 100 | # List to store the trial 101 | lis_times = [] 102 | 103 | # Run the extrapolation with numba for each dataset (map and ranges). 104 | for i in range(0, int_trials): 105 | aMap3D = aPotExt.extrapolate(enable_numba=True) 106 | lis_times.append(aMap3D.meta['extrapolator_duration']) 107 | t.add_row(['(numba)'+extrapolation[0], np.round(np.min(lis_times), 2), np.round(np.average(lis_times), 2), np.round(np.std(lis_times), 2)]) 108 | 109 | ############################################################################## 110 | # You can now see the results in the table. 111 | print t 112 | -------------------------------------------------------------------------------- /ez_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Setuptools bootstrapping installer. 5 | 6 | Maintained at https://github.com/pypa/setuptools/tree/bootstrap. 7 | 8 | Run this script to install or upgrade setuptools. 9 | 10 | This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. 11 | """ 12 | 13 | import os 14 | import shutil 15 | import sys 16 | import tempfile 17 | import zipfile 18 | import optparse 19 | import subprocess 20 | import platform 21 | import textwrap 22 | import contextlib 23 | 24 | from distutils import log 25 | 26 | try: 27 | from urllib.request import urlopen 28 | except ImportError: 29 | from urllib2 import urlopen 30 | 31 | try: 32 | from site import USER_SITE 33 | except ImportError: 34 | USER_SITE = None 35 | 36 | # 33.1.1 is the last version that supports setuptools self upgrade/installation. 37 | DEFAULT_VERSION = "33.1.1" 38 | DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" 39 | DEFAULT_SAVE_DIR = os.curdir 40 | DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" 41 | 42 | MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' 43 | 44 | log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) 45 | 46 | 47 | def _python_cmd(*args): 48 | """ 49 | Execute a command. 50 | 51 | Return True if the command succeeded. 52 | """ 53 | args = (sys.executable,) + args 54 | return subprocess.call(args) == 0 55 | 56 | 57 | def _install(archive_filename, install_args=()): 58 | """Install Setuptools.""" 59 | with archive_context(archive_filename): 60 | # installing 61 | log.warn('Installing Setuptools') 62 | if not _python_cmd('setup.py', 'install', *install_args): 63 | log.warn('Something went wrong during the installation.') 64 | log.warn('See the error message above.') 65 | # exitcode will be 2 66 | return 2 67 | 68 | 69 | def _build_egg(egg, archive_filename, to_dir): 70 | """Build Setuptools egg.""" 71 | with archive_context(archive_filename): 72 | # building an egg 73 | log.warn('Building a Setuptools egg in %s', to_dir) 74 | _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) 75 | # returning the result 76 | log.warn(egg) 77 | if not os.path.exists(egg): 78 | raise IOError('Could not build the egg.') 79 | 80 | 81 | class ContextualZipFile(zipfile.ZipFile): 82 | 83 | """Supplement ZipFile class to support context manager for Python 2.6.""" 84 | 85 | def __enter__(self): 86 | return self 87 | 88 | def __exit__(self, type, value, traceback): 89 | self.close() 90 | 91 | def __new__(cls, *args, **kwargs): 92 | """Construct a ZipFile or ContextualZipFile as appropriate.""" 93 | if hasattr(zipfile.ZipFile, '__exit__'): 94 | return zipfile.ZipFile(*args, **kwargs) 95 | return super(ContextualZipFile, cls).__new__(cls) 96 | 97 | 98 | @contextlib.contextmanager 99 | def archive_context(filename): 100 | """ 101 | Unzip filename to a temporary directory, set to the cwd. 102 | 103 | The unzipped target is cleaned up after. 104 | """ 105 | tmpdir = tempfile.mkdtemp() 106 | log.warn('Extracting in %s', tmpdir) 107 | old_wd = os.getcwd() 108 | try: 109 | os.chdir(tmpdir) 110 | try: 111 | with ContextualZipFile(filename) as archive: 112 | archive.extractall() 113 | except zipfile.BadZipfile as err: 114 | if not err.args: 115 | err.args = ('', ) 116 | err.args = err.args + ( 117 | MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), 118 | ) 119 | raise 120 | 121 | # going in the directory 122 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 123 | os.chdir(subdir) 124 | log.warn('Now working in %s', subdir) 125 | yield 126 | 127 | finally: 128 | os.chdir(old_wd) 129 | shutil.rmtree(tmpdir) 130 | 131 | 132 | def _do_download(version, download_base, to_dir, download_delay): 133 | """Download Setuptools.""" 134 | py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) 135 | tp = 'setuptools-{version}-{py_desig}.egg' 136 | egg = os.path.join(to_dir, tp.format(**locals())) 137 | if not os.path.exists(egg): 138 | archive = download_setuptools(version, download_base, 139 | to_dir, download_delay) 140 | _build_egg(egg, archive, to_dir) 141 | sys.path.insert(0, egg) 142 | 143 | # Remove previously-imported pkg_resources if present (see 144 | # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). 145 | if 'pkg_resources' in sys.modules: 146 | _unload_pkg_resources() 147 | 148 | import setuptools 149 | setuptools.bootstrap_install_from = egg 150 | 151 | 152 | def use_setuptools( 153 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 154 | to_dir=DEFAULT_SAVE_DIR, download_delay=15): 155 | """ 156 | Ensure that a setuptools version is installed. 157 | 158 | Return None. Raise SystemExit if the requested version 159 | or later cannot be installed. 160 | """ 161 | to_dir = os.path.abspath(to_dir) 162 | 163 | # prior to importing, capture the module state for 164 | # representative modules. 165 | rep_modules = 'pkg_resources', 'setuptools' 166 | imported = set(sys.modules).intersection(rep_modules) 167 | 168 | try: 169 | import pkg_resources 170 | pkg_resources.require("setuptools>=" + version) 171 | # a suitable version is already installed 172 | return 173 | except ImportError: 174 | # pkg_resources not available; setuptools is not installed; download 175 | pass 176 | except pkg_resources.DistributionNotFound: 177 | # no version of setuptools was found; allow download 178 | pass 179 | except pkg_resources.VersionConflict as VC_err: 180 | if imported: 181 | _conflict_bail(VC_err, version) 182 | 183 | # otherwise, unload pkg_resources to allow the downloaded version to 184 | # take precedence. 185 | del pkg_resources 186 | _unload_pkg_resources() 187 | 188 | return _do_download(version, download_base, to_dir, download_delay) 189 | 190 | 191 | def _conflict_bail(VC_err, version): 192 | """ 193 | Setuptools was imported prior to invocation, so it is 194 | unsafe to unload it. Bail out. 195 | """ 196 | conflict_tmpl = textwrap.dedent(""" 197 | The required version of setuptools (>={version}) is not available, 198 | and can't be installed while this script is running. Please 199 | install a more recent version first, using 200 | 'easy_install -U setuptools'. 201 | 202 | (Currently using {VC_err.args[0]!r}) 203 | """) 204 | msg = conflict_tmpl.format(**locals()) 205 | sys.stderr.write(msg) 206 | sys.exit(2) 207 | 208 | 209 | def _unload_pkg_resources(): 210 | sys.meta_path = [ 211 | importer 212 | for importer in sys.meta_path 213 | if importer.__class__.__module__ != 'pkg_resources.extern' 214 | ] 215 | del_modules = [ 216 | name for name in sys.modules 217 | if name.startswith('pkg_resources') 218 | ] 219 | for mod_name in del_modules: 220 | del sys.modules[mod_name] 221 | 222 | 223 | def _clean_check(cmd, target): 224 | """ 225 | Run the command to download target. 226 | 227 | If the command fails, clean up before re-raising the error. 228 | """ 229 | try: 230 | subprocess.check_call(cmd) 231 | except subprocess.CalledProcessError: 232 | if os.access(target, os.F_OK): 233 | os.unlink(target) 234 | raise 235 | 236 | 237 | def download_file_powershell(url, target): 238 | """ 239 | Download the file at url to target using Powershell. 240 | 241 | Powershell will validate trust. 242 | Raise an exception if the command cannot complete. 243 | """ 244 | target = os.path.abspath(target) 245 | ps_cmd = ( 246 | "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " 247 | "[System.Net.CredentialCache]::DefaultCredentials; " 248 | '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' 249 | % locals() 250 | ) 251 | cmd = [ 252 | 'powershell', 253 | '-Command', 254 | ps_cmd, 255 | ] 256 | _clean_check(cmd, target) 257 | 258 | 259 | def has_powershell(): 260 | """Determine if Powershell is available.""" 261 | if platform.system() != 'Windows': 262 | return False 263 | cmd = ['powershell', '-Command', 'echo test'] 264 | with open(os.path.devnull, 'wb') as devnull: 265 | try: 266 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 267 | except Exception: 268 | return False 269 | return True 270 | download_file_powershell.viable = has_powershell 271 | 272 | 273 | def download_file_curl(url, target): 274 | cmd = ['curl', url, '--location', '--silent', '--output', target] 275 | _clean_check(cmd, target) 276 | 277 | 278 | def has_curl(): 279 | cmd = ['curl', '--version'] 280 | with open(os.path.devnull, 'wb') as devnull: 281 | try: 282 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 283 | except Exception: 284 | return False 285 | return True 286 | download_file_curl.viable = has_curl 287 | 288 | 289 | def download_file_wget(url, target): 290 | cmd = ['wget', url, '--quiet', '--output-document', target] 291 | _clean_check(cmd, target) 292 | 293 | 294 | def has_wget(): 295 | cmd = ['wget', '--version'] 296 | with open(os.path.devnull, 'wb') as devnull: 297 | try: 298 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 299 | except Exception: 300 | return False 301 | return True 302 | download_file_wget.viable = has_wget 303 | 304 | 305 | def download_file_insecure(url, target): 306 | """Use Python to download the file, without connection authentication.""" 307 | src = urlopen(url) 308 | try: 309 | # Read all the data in one block. 310 | data = src.read() 311 | finally: 312 | src.close() 313 | 314 | # Write all the data in one block to avoid creating a partial file. 315 | with open(target, "wb") as dst: 316 | dst.write(data) 317 | download_file_insecure.viable = lambda: True 318 | 319 | 320 | def get_best_downloader(): 321 | downloaders = ( 322 | download_file_powershell, 323 | download_file_curl, 324 | download_file_wget, 325 | download_file_insecure, 326 | ) 327 | viable_downloaders = (dl for dl in downloaders if dl.viable()) 328 | return next(viable_downloaders, None) 329 | 330 | 331 | def download_setuptools( 332 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 333 | to_dir=DEFAULT_SAVE_DIR, delay=15, 334 | downloader_factory=get_best_downloader): 335 | """ 336 | Download setuptools from a specified location and return its filename. 337 | 338 | `version` should be a valid setuptools version number that is available 339 | as an sdist for download under the `download_base` URL (which should end 340 | with a '/'). `to_dir` is the directory where the egg will be downloaded. 341 | `delay` is the number of seconds to pause before an actual download 342 | attempt. 343 | 344 | ``downloader_factory`` should be a function taking no arguments and 345 | returning a function for downloading a URL to a target. 346 | """ 347 | # making sure we use the absolute path 348 | to_dir = os.path.abspath(to_dir) 349 | zip_name = "setuptools-%s.zip" % version 350 | url = download_base + zip_name 351 | saveto = os.path.join(to_dir, zip_name) 352 | if not os.path.exists(saveto): # Avoid repeated downloads 353 | log.warn("Downloading %s", url) 354 | downloader = downloader_factory() 355 | downloader(url, saveto) 356 | return os.path.realpath(saveto) 357 | 358 | 359 | def _build_install_args(options): 360 | """ 361 | Build the arguments to 'python setup.py install' on the setuptools package. 362 | 363 | Returns list of command line arguments. 364 | """ 365 | return ['--user'] if options.user_install else [] 366 | 367 | 368 | def _parse_args(): 369 | """Parse the command line for options.""" 370 | parser = optparse.OptionParser() 371 | parser.add_option( 372 | '--user', dest='user_install', action='store_true', default=False, 373 | help='install in user site package') 374 | parser.add_option( 375 | '--download-base', dest='download_base', metavar="URL", 376 | default=DEFAULT_URL, 377 | help='alternative URL from where to download the setuptools package') 378 | parser.add_option( 379 | '--insecure', dest='downloader_factory', action='store_const', 380 | const=lambda: download_file_insecure, default=get_best_downloader, 381 | help='Use internal, non-validating downloader' 382 | ) 383 | parser.add_option( 384 | '--version', help="Specify which version to download", 385 | default=DEFAULT_VERSION, 386 | ) 387 | parser.add_option( 388 | '--to-dir', 389 | help="Directory to save (and re-use) package", 390 | default=DEFAULT_SAVE_DIR, 391 | ) 392 | options, args = parser.parse_args() 393 | # positional arguments are ignored 394 | return options 395 | 396 | 397 | def _download_args(options): 398 | """Return args for download_setuptools function from cmdline args.""" 399 | return dict( 400 | version=options.version, 401 | download_base=options.download_base, 402 | downloader_factory=options.downloader_factory, 403 | to_dir=options.to_dir, 404 | ) 405 | 406 | 407 | def main(): 408 | """Install or upgrade setuptools and EasyInstall.""" 409 | options = _parse_args() 410 | archive = download_setuptools(**_download_args(options)) 411 | return _install(archive, _build_install_args(options)) 412 | 413 | if __name__ == '__main__': 414 | sys.exit(main()) 415 | -------------------------------------------------------------------------------- /licenses/LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Alexander Ian Hamilton 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, this 10 | list of conditions and the following disclaimer in the documentation and/or 11 | other materials provided with the distribution. 12 | * Neither the name of the Astropy Team nor the names of its contributors may be 13 | used to endorse or promote products derived from this software without 14 | specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 20 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 23 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /licenses/README.rst: -------------------------------------------------------------------------------- 1 | Licenses 2 | ======== 3 | 4 | This directory holds license and credit information for the affiliated package, 5 | works the affiliated package is derived from, and/or datasets. 6 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | norecursedirs = build *.egg-info astropy_helpers 3 | python_files = test_?*.py 4 | addopts = -p no:doctest -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | conda: 2 | file: .rtd-environment.yml 3 | 4 | python: 5 | setup_py_install: true 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [upload_docs] 7 | upload-dir = docs/_build/html 8 | show-response = 1 9 | 10 | [pytest] 11 | minversion = 2.2 12 | norecursedirs = build docs/_build docs/auto_examples examples 13 | doctest_plus = enabled 14 | 15 | [ah_bootstrap] 16 | auto_use = True 17 | 18 | [metadata] 19 | package_name = solarbextrapolation 20 | description = Solar atmosphere magnetic field extrapolation package. 21 | long_description = A package of tools relating to the extrapolation of solar atmosphere magnetic fields using magenetogram boundary data. 22 | author = Alex Hamilton 23 | author_email = a.i.hamilton@se11.qmul.ac.uk 24 | license = BSD 25 | url = http://docs.sunpy.org/projects/solarbextrapolation/ 26 | edit_on_github = False 27 | github_project = sunpy/solarbextrapolation 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 3 | 4 | import glob 5 | import os 6 | import sys 7 | 8 | import ah_bootstrap 9 | from setuptools import setup 10 | 11 | #A dirty hack to get around some early import/configurations ambiguities 12 | if sys.version_info[0] >= 3: 13 | import builtins 14 | else: 15 | import __builtin__ as builtins 16 | builtins._ASTROPY_SETUP_ = True 17 | 18 | from astropy_helpers.setup_helpers import ( 19 | register_commands, adjust_compiler, get_debug_option, get_package_info) 20 | from astropy_helpers.git_helpers import get_git_devstr 21 | from astropy_helpers.version_helpers import generate_version_py 22 | 23 | # -- Read the Docs Setup ------------------------------------------------------- 24 | 25 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 26 | 27 | if on_rtd: 28 | os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/solarbextrapolation/' 29 | os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/solarbextrapolation/' 30 | 31 | # ------------------------------------------------------------------------------ 32 | 33 | # Get some values from the setup.cfg 34 | try: 35 | from ConfigParser import ConfigParser 36 | except ImportError: 37 | from configparser import ConfigParser 38 | conf = ConfigParser() 39 | conf.read(['setup.cfg']) 40 | metadata = dict(conf.items('metadata')) 41 | 42 | PACKAGENAME = metadata.get('package_name', 'solarbextrapolation') 43 | DESCRIPTION = metadata.get('description', 'Extrapolation framework for Solar Magnetic Fields') 44 | AUTHOR = metadata.get('author', 'Alex Hamilton, Stuart Mumford, The SunPy Project') 45 | AUTHOR_EMAIL = metadata.get('author_email', 'sunpy@googlegroups.com') 46 | LICENSE = metadata.get('license', 'BSD 2-Clause') 47 | URL = metadata.get('url', 'http://docs.sunpy.org/projects/solarbextrapolation/') 48 | 49 | # Get the long description from the package's docstring 50 | __import__(PACKAGENAME) 51 | package = sys.modules[PACKAGENAME] 52 | LONG_DESCRIPTION = package.__doc__ 53 | 54 | # Store the package name in a built-in variable so it's easy 55 | # to get from other parts of the setup infrastructure 56 | builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME 57 | 58 | # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) 59 | VERSION = '0.1.dev' 60 | 61 | # Indicates if this version is a release version 62 | RELEASE = 'dev' not in VERSION 63 | 64 | if not RELEASE: 65 | VERSION += get_git_devstr(False) 66 | 67 | # Populate the dict of setup command overrides; this should be done before 68 | # invoking any other functionality from distutils since it can potentially 69 | # modify distutils' behavior. 70 | cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) 71 | 72 | # Adjust the compiler in case the default on this platform is to use a 73 | # broken one. 74 | adjust_compiler(PACKAGENAME) 75 | 76 | # Freeze build information in version.py 77 | generate_version_py(PACKAGENAME, VERSION, RELEASE, 78 | get_debug_option(PACKAGENAME)) 79 | 80 | # Treat everything in scripts except README.rst as a script to be installed 81 | scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) 82 | if os.path.basename(fname) != 'README.rst'] 83 | 84 | 85 | # Get configuration information from all of the various subpackages. 86 | # See the docstring for setup_helpers.update_package_files for more 87 | # details. 88 | package_info = get_package_info() 89 | 90 | # Add the project-global data 91 | package_info['package_data'].setdefault(PACKAGENAME, []) 92 | package_info['package_data'][PACKAGENAME].append('data/*') 93 | 94 | # Define entry points for command-line scripts 95 | entry_points = {} 96 | entry_points['console_scripts'] = [ 97 | 'astropy-package-template-example = packagename.example_mod:main', 98 | ] 99 | 100 | # Include all .c files, recursively, including those generated by 101 | # Cython, since we can not do this in MANIFEST.in with a "dynamic" 102 | # directory name. 103 | c_files = [] 104 | for root, dirs, files in os.walk(PACKAGENAME): 105 | for filename in files: 106 | if filename.endswith('.c'): 107 | c_files.append( 108 | os.path.join( 109 | os.path.relpath(root, PACKAGENAME), filename)) 110 | package_info['package_data'][PACKAGENAME].extend(c_files) 111 | 112 | # Note that requires and provides should not be included in the call to 113 | # ``setup``, since these are now deprecated. See this link for more details: 114 | # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM 115 | 116 | setup(name=PACKAGENAME, 117 | version=VERSION, 118 | description=DESCRIPTION, 119 | scripts=scripts, 120 | install_requires=['astropy', 121 | 'sunpy'], 122 | author=AUTHOR, 123 | author_email=AUTHOR_EMAIL, 124 | license=LICENSE, 125 | url=URL, 126 | long_description=LONG_DESCRIPTION, 127 | cmdclass=cmdclassd, 128 | zip_safe=False, 129 | use_2to3=True, 130 | entry_points=entry_points, 131 | **package_info 132 | ) 133 | -------------------------------------------------------------------------------- /solarbextrapolation/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 | 3 | """ 4 | This is an Astropy affiliated package. 5 | """ 6 | 7 | # Affiliated packages may add whatever they like to this file, but 8 | # should keep this content at the top. 9 | # ---------------------------------------------------------------------------- 10 | from ._astropy_init import * 11 | # ---------------------------------------------------------------------------- 12 | 13 | # For egg_info test builds to pass, put package imports here. 14 | #if not _ASTROPY_SETUP_: 15 | -------------------------------------------------------------------------------- /solarbextrapolation/_astropy_init.py: -------------------------------------------------------------------------------- 1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 | 3 | __all__ = ['__version__', '__githash__', 'test'] 4 | 5 | # this indicates whether or not we are in the package's setup.py 6 | try: 7 | _ASTROPY_SETUP_ 8 | except NameError: 9 | from sys import version_info 10 | if version_info[0] >= 3: 11 | import builtins 12 | else: 13 | import __builtin__ as builtins 14 | builtins._ASTROPY_SETUP_ = False 15 | 16 | try: 17 | from .version import version as __version__ 18 | except ImportError: 19 | __version__ = '' 20 | try: 21 | from .version import githash as __githash__ 22 | except ImportError: 23 | __githash__ = '' 24 | 25 | # set up the test command 26 | def _get_test_runner(): 27 | import os 28 | from astropy.tests.helper import TestRunner 29 | return TestRunner(os.path.dirname(__file__)) 30 | 31 | def test(package=None, test_path=None, args=None, plugins=None, 32 | verbose=False, pastebin=None, remote_data=False, pep8=False, 33 | pdb=False, coverage=False, open_files=False, **kwargs): 34 | """ 35 | Run the tests using `py.test `__. A proper set 36 | of arguments is constructed and passed to `pytest.main`_. 37 | 38 | .. _py.test: http://pytest.org/latest/ 39 | .. _pytest.main: http://pytest.org/latest/builtin.html#pytest.main 40 | 41 | Parameters 42 | ---------- 43 | package : str, optional 44 | The name of a specific package to test, e.g. 'io.fits' or 'utils'. 45 | If nothing is specified all default tests are run. 46 | 47 | test_path : str, optional 48 | Specify location to test by path. May be a single file or 49 | directory. Must be specified absolutely or relative to the 50 | calling directory. 51 | 52 | args : str, optional 53 | Additional arguments to be passed to pytest.main_ in the ``args`` 54 | keyword argument. 55 | 56 | plugins : list, optional 57 | Plugins to be passed to pytest.main_ in the ``plugins`` keyword 58 | argument. 59 | 60 | verbose : bool, optional 61 | Convenience option to turn on verbose output from py.test_. Passing 62 | True is the same as specifying ``'-v'`` in ``args``. 63 | 64 | pastebin : {'failed','all',None}, optional 65 | Convenience option for turning on py.test_ pastebin output. Set to 66 | ``'failed'`` to upload info for failed tests, or ``'all'`` to upload 67 | info for all tests. 68 | 69 | remote_data : bool, optional 70 | Controls whether to run tests marked with @remote_data. These 71 | tests use online data and are not run by default. Set to True to 72 | run these tests. 73 | 74 | pep8 : bool, optional 75 | Turn on PEP8 checking via the `pytest-pep8 plugin 76 | `_ and disable normal 77 | tests. Same as specifying ``'--pep8 -k pep8'`` in ``args``. 78 | 79 | pdb : bool, optional 80 | Turn on PDB post-mortem analysis for failing tests. Same as 81 | specifying ``'--pdb'`` in ``args``. 82 | 83 | coverage : bool, optional 84 | Generate a test coverage report. The result will be placed in 85 | the directory htmlcov. 86 | 87 | open_files : bool, optional 88 | Fail when any tests leave files open. Off by default, because 89 | this adds extra run time to the test suite. Requires the 90 | `psutil `_ package. 91 | 92 | parallel : int, optional 93 | When provided, run the tests in parallel on the specified 94 | number of CPUs. If parallel is negative, it will use the all 95 | the cores on the machine. Requires the 96 | `pytest-xdist `_ plugin 97 | installed. Only available when using Astropy 0.3 or later. 98 | 99 | kwargs 100 | Any additional keywords passed into this function will be passed 101 | on to the astropy test runner. This allows use of test-related 102 | functionality implemented in later versions of astropy without 103 | explicitly updating the package template. 104 | 105 | """ 106 | test_runner = _get_test_runner() 107 | return test_runner.run_tests( 108 | package=package, test_path=test_path, args=args, 109 | plugins=plugins, verbose=verbose, pastebin=pastebin, 110 | remote_data=remote_data, pep8=pep8, pdb=pdb, 111 | coverage=coverage, open_files=open_files, **kwargs) 112 | 113 | if not _ASTROPY_SETUP_: 114 | import os 115 | from warnings import warn 116 | from astropy import config 117 | 118 | # add these here so we only need to cleanup the namespace at the end 119 | config_dir = None 120 | 121 | if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False): 122 | config_dir = os.path.dirname(__file__) 123 | config_template = os.path.join(config_dir, __package__ + ".cfg") 124 | if os.path.isfile(config_template): 125 | try: 126 | config.configuration.update_default_config( 127 | __package__, config_dir, version=__version__) 128 | except TypeError as orig_error: 129 | try: 130 | config.configuration.update_default_config( 131 | __package__, config_dir) 132 | except config.configuration.ConfigurationDefaultMissingError as e: 133 | wmsg = (e.args[0] + " Cannot install default profile. If you are " 134 | "importing from source, this is expected.") 135 | warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg)) 136 | del e 137 | except: 138 | raise orig_error 139 | -------------------------------------------------------------------------------- /solarbextrapolation/analyticalmodels/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from base import AnalyticalModel 4 | 5 | # This will need to be changes once I have the TDE model within an AnalyticalModel class. 6 | #from titov_demoulin_equilibrium import titov_demoulin_equilibrium 7 | 8 | __all__ = ['AnalyticalModel'] #, 'titov_demoulin_equilibrium'] 9 | -------------------------------------------------------------------------------- /solarbextrapolation/analyticalmodels/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Sep 28 19:30:22 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | # General Imports 9 | import matplotlib as mpl 10 | mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict. 11 | import numpy as np 12 | #import pickle 13 | import time 14 | from datetime import datetime 15 | #from collections import namedtuple 16 | import warnings 17 | import inspect 18 | #from sunpy.sun._constants import physical_constants as con 19 | 20 | # SunPy imports 21 | import sunpy.map 22 | from sunpy.sun import constants, sun 23 | from sunpy.time import parse_time, is_time 24 | from astropy.table import Table 25 | import astropy.units as u 26 | # from mayavi import mlab 27 | 28 | # Internal imports 29 | #from solarbextrapolation.utilities import si_this_map 30 | from solarbextrapolation.map3dclasses import Map3D 31 | 32 | class AnalyticalModel(object): 33 | """ 34 | Common class for the development of anylitical models of magnetic fields. 35 | Use the models to evaluate the accuracy of an extrapolation routine with 36 | the figures of merit. 37 | """ 38 | def __init__(self, **kwargs): 39 | # Default grid shape and physical ranges for the volume the model covers. 40 | self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z) 41 | self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm)) 42 | self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm)) 43 | self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm)) 44 | 45 | # Metadata 46 | self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value} 47 | self.meta['analytical_model_notes'] = kwargs.get('notes', '') 48 | self.meta['BUNIT'] = kwargs.get('bunit', u.T) 49 | # CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files. 50 | self.filepath = kwargs.get('filepath', None) 51 | self.routine = kwargs.get('analytical_model_routine', type(self)) 52 | 53 | # Default 3D magnetic field 54 | #X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value) 55 | npField = np.zeros([3]+self.shape.value) 56 | self.field = Map3D(npField, self.meta) 57 | 58 | # Default magnetic field on boundary 59 | magnetogram = np.zeros(self.shape[0:2].value) 60 | magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value} 61 | self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header)) 62 | 63 | def _generate_field(self, **kwargs): 64 | """ 65 | The method for running a model to generate the field. 66 | This is the primary method to be edited in subclasses for specific 67 | model implementations. 68 | """ 69 | # Model code goes here. 70 | arr_4d = np.zeros([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[1], 1, 3]) 71 | 72 | # Turn the 4D array into a Map3D object. 73 | map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange ) 74 | 75 | return map_output 76 | 77 | def generate(self, **kwargs): 78 | """ 79 | Method to be called to calculate the vector field and return as a Map3D object. 80 | Times and saves the extrapolation where applicable. 81 | """ 82 | # Record the time and duration of the extrapolation. 83 | dt_start = datetime.now() 84 | tim_start = time.time() 85 | arr_output = self._generate_field(**kwargs) 86 | tim_duration = time.time() - tim_start 87 | 88 | # Add the duration and time to the meta/header data. 89 | arr_output.meta['extrapolator_start_time'] = dt_start.isoformat() 90 | arr_output.meta['extrapolator_duration'] = tim_duration 91 | arr_output.meta['extrapolator_duration_unit'] = u.s 92 | 93 | # Save the Map3D if a filepath has been set. (to avoid loosing work) 94 | if self.filepath: 95 | arr_output.save(self.filepath) 96 | 97 | # Add the output map to the object and return. 98 | self.map = arr_output 99 | return arr_output 100 | 101 | def to_los_magnetogram(self, **kwargs): 102 | """ 103 | Calculate the LoS vector field as a SunPy map and return. 104 | 105 | Generally this will require that you have run generate(self, ``**kwargs``) 106 | first, so in the base class this is checked, but it is not always the 107 | case as some models may allow this to be determined without calculating 108 | the full field. 109 | 110 | .. I'm not sure if this is a good default. 111 | """ 112 | return self.magnetogram 113 | 114 | def to_vec_magnetogram(self, **kwargs): 115 | """ 116 | Calculate the vector field as a SunPy map and return. 117 | 118 | Generally this will require that you have run ``generate(self, **kwargs)`` 119 | first, so in the base class this is checked, but it is not always the 120 | case as some models may allow this to be determined without calculating 121 | the full field. ######### I'm not sure if this is a good default. 122 | """ 123 | return self.magnetogram 124 | 125 | '''if __name__ == '__main__': 126 | # User-specified parameters 127 | tup_shape = ( 20, 20, 20 ) 128 | x_range = ( -80.0, 80 ) * u.Mm 129 | y_range = ( -80.0, 80 ) * u.Mm 130 | z_range = ( 0.0, 120 ) * u.Mm 131 | 132 | # Derived parameters (make SI where applicable) 133 | x_0 = x_range[0].to(u.m).value 134 | Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value 135 | x_size = Dx * tup_shape[0] 136 | y_0 = y_range[0].to(u.m).value 137 | Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value 138 | y_size = Dy * tup_shape[1] 139 | z_0 = z_range[0].to(u.m).value 140 | Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value 141 | z_size = Dy * tup_shape[2] 142 | 143 | 144 | 145 | 146 | # Define the extrapolator as a child of the Extrapolators class 147 | class AnaOnes(AnalyticalModel): 148 | def __init__(self, **kwargs): 149 | super(AnaOnes, self).__init__(**kwargs) 150 | 151 | def _generate_field(self, **kwargs): 152 | # Adding in custom parameters to the metadata 153 | self.meta['analytical_model_routine'] = 'Ones Model' 154 | 155 | # Generate a trivial field and return (X,Y,Z,Vec) 156 | arr_4d = np.ones(self.shape.value.tolist() + [3]) 157 | return Map3D( arr_4d, self.meta ) 158 | 159 | 160 | # Setup an anylitical model 161 | xrange = u.Quantity([ 50, 300] * u.arcsec) 162 | yrange = u.Quantity([-350, -100] * u.arcsec) 163 | zrange = u.Quantity([ 0, 250] * u.arcsec) 164 | 165 | aAnaMod = AnaOnes() 166 | aMap3D = aAnaMod.generate() 167 | 168 | 169 | # Visualise the 3D vector field 170 | from solarbextrapolation.visualisation_functions import visualise 171 | """ 172 | fig = visualise(aMap3D, 173 | show_boundary_axes=False, 174 | boundary_units=[1.0*u.arcsec, 1.0*u.arcsec], 175 | show_volume_axes=True, 176 | debug=False) 177 | """ 178 | fig = visualise(aMap3D, 179 | show_boundary_axes=False, 180 | show_volume_axes=False, 181 | debug=False) 182 | mlab.show() 183 | 184 | 185 | """ 186 | # For B_I field only, to save re-creating this interpolator for every cell. 187 | A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0) 188 | 189 | field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) ) 190 | for i in range(0, tup_shape[0]): 191 | for j in range(0, tup_shape[1]): 192 | for k in range(0, tup_shape[2]): 193 | # Position of this point in space 194 | x_pos = x_0 + ( i + 0.5 ) * Dx 195 | y_pos = y_0 + ( j + 0.5 ) * Dy 196 | z_pos = z_0 + ( k + 0.5 ) * Dz 197 | 198 | #field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) 199 | #field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) 200 | #field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator) 201 | field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator) 202 | 203 | 204 | 205 | 206 | map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range ) 207 | np_boundary_data = field[:,:,0,2].T 208 | dummyDataToMap(np_boundary_data, x_range, y_range) 209 | 210 | #dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, } 211 | visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True) 212 | """''' 213 | -------------------------------------------------------------------------------- /solarbextrapolation/analyticalmodels/titov_demoulin_equilibrium.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Aug 21 00:45:30 2015 4 | 5 | This code is intended to implement an analytical solution for a flux loop. 6 | The Titov Demoulin Equilibrium is from the paper: 7 | Basic topology of twisted magnetic configurations in solar flares 8 | V.S. Titov 1 and P. Démoulin 9 | 1999 10 | 11 | The magnetic field over the region is created as the sum of 3 components: 12 | B_I: Field over circular area a due to current I 13 | B_q: Field created by the active region spots +q and -q that lie on I_0 14 | B_theta: Field created by flow I_0 15 | With: 16 | I_0: the line of sub-surface current going allong the x-axis. 17 | With parameters: 18 | d: depth of I_0 below the photosphere. 19 | L: distances of q_plus and q_minus from the centre of I_0. 20 | R: radii of the large curcle of current I about I_0. 21 | 22 | Useful numpy functions: 23 | np.add(x,y), np.add(x,y): element-wise addition/subtraction. 24 | 25 | @author: alex_ 26 | """ 27 | 28 | import numpy as np 29 | import math 30 | import scipy 31 | from scipy import interpolate 32 | from astropy import units as u 33 | 34 | 35 | # Imports for numba JIT compilation 36 | from numba import double 37 | from numba.decorators import jit, autojit 38 | 39 | # Module Imports 40 | ###from solarbextrapolation import * 41 | from classes import * 42 | from utilities import * 43 | from example_data_generator import * 44 | from visualisation_functions import * 45 | 46 | # Universal values. 47 | from scipy.constants import mu_0, pi, au 48 | # mu_0 = 1.256637061 * 10^-6 # T m A^-1 49 | 50 | 51 | # Loop Configuration Parameters 52 | qua_TD_L = 50.0*10**6 * u.m # m # Distance of +q/-q from centre of volume. 53 | qua_TD_d = 50.0*10**6 * u.m # m # Depth of I_0 below photosphere. 54 | qua_TD_R = 85.0*10**6 * u.m # m # Radius of circle of current I (makes area a) 55 | qua_TD_q = 100.0 * 10**12 * u.m * u.m * u.T # T m^2 # ABS Charge of +q and -q. 56 | qua_TD_a = 31.0*10**6 * u.m # m # Radius of uniform current I. 57 | 58 | qua_TD_I_0 = -7.0*10**12 * u.A # A # - 7.0 TA # 59 | flo_TD_li = 1.0 / 2.0 # for uniform distribution of current over toroidal flux tube. 60 | 61 | # Convert all these into SI units 62 | flo_TD_L = qua_TD_L.to(u.m).value 63 | flo_TD_d = qua_TD_d.to(u.m).value 64 | flo_TD_R = qua_TD_R.to(u.m).value 65 | flo_TD_q = qua_TD_q.value # This doesn't convert into SI units 66 | flo_TD_a = qua_TD_a.to(u.m).value 67 | flo_TD_I_0 = qua_TD_I_0.to(u.A).value 68 | flo_TD_I = 8.0 * pi * flo_TD_q * flo_TD_L * flo_TD_R * (flo_TD_R**2.0 + flo_TD_L**2)**(-3.0/2.0) / ( mu_0 * (np.log(8.0 * flo_TD_R / flo_TD_a) - (3.0/2.0) + (flo_TD_li / 2.0)) ) 69 | 70 | 71 | """ 72 | TD_L = 50.0*10**6 # m # Distance of +q/-q from centre of volume. 73 | TD_d = 50.0*10**6 # m # Depth of I_0 below photosphere. 74 | TD_R = 85.0*10**6 # m # Radius of circle of current I (makes area a) 75 | TD_q = 100.0*10**12 # T m^2 # ABS Charge of +q and -q. 76 | TD_a = 31.0*10**6 # m # Radius of uniform current I. 77 | 78 | TD_I_0 = -7.0*10**12 # A # - 7.0 TA # 79 | TD_li = 1.0/2.0 # for uniform distribution of current over toroidal flux tube. 80 | TD_I = 8.0 * pi * TD_q * TD_L * TD_R * (TD_R**2.0 + TD_L**2)**(-3.0/2.0) / ( mu_0 * (np.log(8.0 * TD_R / TD_a) - (3.0/2.0) + (TD_li / 2.0)) ) # 11 # 11000 GA # Equilibrium 81 | 82 | 83 | # Unit strings, for filenames 84 | TD_L_unit, TD_d_unit, TD_R_unit, TD_a_unit = 'm', 'm', 'm', 'm' 85 | TD_q_unit = 'Tm2' 86 | TD_I_0_unit = 'A' 87 | """ 88 | 89 | 90 | ############################################################################### 91 | # # 92 | # B_theta components # 93 | # # 94 | ############################################################################### 95 | 96 | 97 | # The Heaviside chi(X) function 98 | # Returns: 99 | # chi(X) = 1 if X > 0 100 | # chi(X) = 1 otherwise 101 | # else returns 1 and prints an error message in the terminal. 102 | def chi(X, debug = 0): 103 | if X > 0.0: 104 | return 1.0; 105 | else: 106 | return 0.0; 107 | 108 | # Safe Heaviside chi(X) function 109 | # Returns: 110 | # chi_safe(X, val) = val if X > 0 111 | # chi_safe(X, val) = 0 otherwise 112 | # This is designed to correct for errors where you have X <= 0 and val = NaN, 113 | # which causes: 114 | # chi(X) * val = NaN 115 | # Rather then the prefered: 116 | # chi(X) * val = 0.0 117 | def chi_safe(X, val, debug = 0): 118 | out = 0.0 119 | if X > 0.0: 120 | out = val 121 | 122 | # Debugging 123 | if debug > 0: 124 | print('chi_safe(' + str(X) + '): ' + str(out)) 125 | # Output 126 | return out 127 | 128 | # r_perpendicular 129 | def r_perpendicular_scalar(y, z, d, debug = 0): 130 | # Output 131 | out = ( y**2.0 + (z + d)**2.0 )**(0.5) 132 | 133 | # Debugging 134 | if debug > 0: 135 | print('r_perpendicular_scalar: ' + str(out)) 136 | # Output 137 | return out 138 | 139 | # 140 | def rho(x, y, z, R, d, debug = 0): 141 | # Output 142 | out = ( x**2.0 + (r_perpendicular_scalar(y, z, d, debug - 2) - R)**2.0 )**(0.5) 143 | 144 | # Debugging 145 | if debug > 0: 146 | print('rho: ' + str(out)) 147 | # Output 148 | return out 149 | 150 | # theta_hat function 151 | def theta_hat(y, z, d, debug = 0): 152 | # r_perp 153 | r_perp = r_perpendicular_scalar(y, z, d, debug - 2) 154 | 155 | # Components 156 | theta_hat_x = 0.0 157 | theta_hat_y = - ( ( z + d ) / r_perp ) 158 | theta_hat_z = y / r_perp 159 | 160 | # Output 161 | out = np.array([ theta_hat_x, theta_hat_y, theta_hat_z ]) 162 | 163 | # Debugging 164 | if debug > 0: 165 | print('theta_hat: ' + str(out)) 166 | # Output 167 | return out 168 | 169 | 170 | # Note, independent of x??? 171 | def B_theta(x, y, z, a, d, R, I, I_0, debug = 0): 172 | # Find rho 173 | rho_val = rho(x, y, z, R, d, debug - 2) 174 | 175 | # The parts. 176 | part_1 = ( mu_0 * I_0 ) / (2.0 * pi ) 177 | part_2a = R**(-2.0) 178 | part_2b = (2.0 ) / ( a**2.0 ) 179 | part_2c = (I**2.0) / (I_0**2.0) 180 | part_2d = 1.0 - ((rho_val**2.0)/(a**2.0)) 181 | part_2 = (part_2a + chi_safe(a - rho_val, part_2b * part_2c * part_2d))**(0.5) 182 | part_3 = (y**2.0 + (z + d)**2.0)**(-0.5) 183 | part_4 = R**(-1.0) 184 | 185 | # Now put it together. 186 | scalar = part_1 * (part_2 + part_3 - part_4) 187 | vector = theta_hat(y, z, d) 188 | 189 | # Output 190 | out = np.array([scalar * vector[0], scalar * vector[1], scalar * vector[2]]) 191 | 192 | # Debugging 193 | if debug > 0: 194 | print('B_theta: ' + str(out)) 195 | if debug > 1: 196 | print (' B_theta part_1: ' + str(part_1)) 197 | print (' B_theta part_2: ' + str(part_2)) 198 | print (' B_theta part_2a: ' + str(part_2a)) 199 | print (' B_theta part_2b: ' + str(part_2b)) 200 | print (' B_theta part_2c: ' + str(part_2c)) 201 | print (' B_theta part_2d: ' + str(part_2d)) 202 | print (' B_theta part_3: ' + str(part_3)) 203 | print (' B_theta part_4: ' + str(part_4)) 204 | print (' B_theta scalar: ' + str(scalar)) 205 | print (' B_theta vector: ' + str(vector) + '\n') 206 | # Output 207 | return out 208 | 209 | 210 | ############################################################################### 211 | # # 212 | # B_q components # 213 | # # 214 | ############################################################################### 215 | 216 | # sign is either +1.0 or -1.0 217 | def r_plusminus(x, y, z, L, d, sign, debug = 0): 218 | # Components 219 | r_x = x + - sign * L 220 | r_y = y * 1.0 221 | r_z = z + d 222 | 223 | # Output B_q vector. 224 | out = np.array([ r_x, r_y, r_z ]) 225 | 226 | # Debugging 227 | if debug > 0: 228 | print('r_plusminus(..., ' + str(sign) + '): ' + str(out)) 229 | # Output 230 | return out 231 | 232 | # Returns the B-q vector (numpy array) at given x, y, z. 233 | def B_q(x, y, z, L, d, q, debug = 0): 234 | # Getting the r+- vectors 235 | r_plus = r_plusminus(x, y, z, L, d, 1.0, debug - 2) 236 | r_minus = r_plusminus(x, y, z, L, d, -1.0, debug - 2) 237 | 238 | # Get the modulus of these 239 | mod_r_plus = (r_plus[0]**2.0 + r_plus[1]**2.0 + r_plus[2]**2.0)**(0.5) 240 | mod_r_minus = (r_minus[0]**2.0 + r_minus[1]**2.0 + r_minus[2]**2.0)**(0.5) 241 | 242 | # Get the two fractions form (20) 243 | frac_r_plus = r_plus / (mod_r_plus**3.0) 244 | frac_r_minus = r_minus / (mod_r_minus**3.0) 245 | 246 | # Output B_q vector. 247 | out = q * np.subtract(frac_r_plus, frac_r_minus) 248 | 249 | # Debugging 250 | if debug > 0: 251 | print('B_q: ' + str(out)) 252 | if debug > 1: 253 | print(' B_q r_plus: ' + str(r_plus)) 254 | print(' B_q r_minus: ' + str(r_minus)) 255 | print(' B_q mod_r_plus: ' + str(mod_r_plus)) 256 | print(' B_q mod_r_minus: ' + str(mod_r_minus)) 257 | print(' B_q frac_r_plus: ' + str(frac_r_plus)) 258 | print(' B_q frac_r_minus: ' + str(frac_r_minus) + '\n') 259 | # Output 260 | return out 261 | 262 | 263 | 264 | 265 | ############################################################################### 266 | # # 267 | # B_I components # 268 | # # 269 | ############################################################################### 270 | 271 | # Note, we use the scipy complete elliptic integrals of the 1st and 2nd kind 272 | # scipy.special.ellipk(flo_m) # first kind 273 | # scipy.special.ellipe(flo_m) # second kind 274 | 275 | 276 | # r_perpendicular as a vector 277 | def r_perpendicular_vector(y, z, d, debug = 0): 278 | # Output 279 | out = np.array([ 0.0, (z + d)*1.0, y*1.0]) 280 | 281 | # Debugging 282 | if debug > 0: 283 | print('r_perpendicular_vector: ' + str(out)) 284 | # Output 285 | return out 286 | 287 | 288 | def k_func(x, y, z, d, R, debug = 0): 289 | # Parameters 290 | r_perp = r_perpendicular_scalar(y, z, d, debug - 2) 291 | 292 | # fraction inside root 293 | frac = (r_perp * R)/((r_perp + R)**2.0 + x**2.0) 294 | 295 | # Output 296 | out = 2 * (frac)**(0.5) 297 | 298 | # Debugging 299 | if debug > 0: 300 | print('k_a_func: ' + str(out)) 301 | if debug > 1: 302 | print(' k_func r_perp: ' + str(r_perp)) 303 | print(' k_func frac: ' + str(frac)) 304 | # Output 305 | return out 306 | 307 | 308 | def k_a_func(y, z, d, R, a, debug = 0): 309 | # Parameters 310 | r_perp = r_perpendicular_scalar(y, z, d, debug - 2) 311 | 312 | # fraction inside root 313 | frac = (r_perp * R)/(4.0 * r_perp * R + a**2.0) 314 | 315 | # Output 316 | out = 2 * (frac)**(0.5) 317 | 318 | # Debugging 319 | if debug > 0: 320 | print('k_a_func: ' + str(out)) 321 | if debug > 1: 322 | print(' k_a_func r_perp: ' + str(r_perp)) 323 | print(' k_a_func frac: ' + str(frac)) 324 | # Output 325 | return out 326 | 327 | 328 | def A_of_k(k, debug = 0): 329 | # Output 330 | out = (k**(-1.0))*((2.0 - k**2.0) * scipy.special.ellipk(k)) 331 | 332 | # Debugging 333 | if debug > 0: 334 | print('A_of_k(' + str(k) + '): ' + str(out)) 335 | # Output 336 | return out 337 | 338 | def A_prime_of_k(k, debug = 0): 339 | # Building the parts 340 | numerator_1 = (2.0 - k**2.0) * scipy.special.ellipe(k) 341 | numerator_2 = 2.0 * (1.0 - k**2.0) * scipy.special.ellipk(k) 342 | denominator = k**2.0 * (1.0 - k**2.0) 343 | 344 | # Output 345 | out = (numerator_1 - numerator_2) / denominator 346 | 347 | # Debugging 348 | if debug > 0: 349 | print ('A_prime_of_k: ' + str(out)) 350 | if debug > 1: 351 | print(' A_prime_of_k numerator_1: ' + str(numerator_1)) 352 | print(' A_prime_of_k numerator_2: ' + str(numerator_2)) 353 | print(' A_prime_of_k denominator: ' + str(denominator)) 354 | # Output 355 | return out 356 | 357 | def A_tilde_I_in(k, k_a, r_perp, R, I, debug = 0): 358 | # Building the parts 359 | part_1 = (mu_0 * I)/(2.0 * pi) 360 | part_2 = ((R)/(r_perp))**(0.5) 361 | part_3 = A_of_k(k_a, debug - 2) + A_prime_of_k(k_a, debug - 2) * (k - k_a) 362 | 363 | # Output 364 | out = part_1 * part_2 * part_3 365 | 366 | # Debugging 367 | if debug > 0: 368 | print('A_tilde_I_in: ' + str(out)) 369 | if debug > 1: 370 | print(' A_tilde_I_in part_1: ' + str(part_1)) 371 | print(' A_tilde_I_in part_2: ' + str(part_2)) 372 | print(' A_tilde_I_in part_3: ' + str(part_3)) 373 | # Output 374 | return out 375 | 376 | def A_I_ex(k, r_perp, R, I, debug = 0): 377 | # Building the parts 378 | part_1 = (mu_0 * I)/(2.0 * pi) 379 | part_2 = ((R)/(r_perp))**(0.5) 380 | part_3 = A_of_k(k) 381 | 382 | # Output 383 | out = part_1 * part_2 * part_3 384 | 385 | # Debugging 386 | if debug > 0: 387 | print('A_I_ex: ' + str(out)) 388 | if debug > 1: 389 | print(' A_I_ex part_1: ' + str(part_1)) 390 | print(' A_I_ex part_2: ' + str(part_2)) 391 | print(' A_I_ex part_3: ' + str(part_3)) 392 | # Output 393 | return out 394 | 395 | def A_I(x, y, z, R, a, d, I, debug = 0): 396 | # Values 397 | rho_val = rho(x, y, z, R, d, debug - 2) 398 | k = k_func(x, y, z, d, R, debug - 2) 399 | k_a = k_a_func(y, z, d, R, a, debug - 2) 400 | r_perp = r_perpendicular_scalar(y, z, d, debug - 2) 401 | 402 | # Parts 403 | part_1 = chi_safe(a - rho_val, A_tilde_I_in(k, k_a, r_perp, R, I)) 404 | part_2 = chi_safe(rho_val - a, A_I_ex(k, r_perp, R, I)) 405 | out = part_1 + part_2 406 | 407 | # Debugging 408 | if debug > 0: 409 | print('A_I: ' + str(out)) 410 | if debug > 1: 411 | print(' A_I part_1: ' + str(part_1)) 412 | print(' A_I part_2: ' + str(part_2)) 413 | # Output 414 | return out 415 | 416 | # function to return an interpolator object for A_I_from_r_perp. 417 | def interpolate_A_I_from_r_perp(R, a, d, I, r_perp_max, resolution = 100000, debug = 0): 418 | # parameters to pass in: 419 | dr_perp = 1.0 * r_perp_max / resolution 420 | 421 | # 1D array of vectors for A_I(r_perp) 422 | npm_A_I = np.zeros((resolution, 2)) 423 | 424 | # If we lock x = 0, z = -d then we know r_perp = y 425 | x = 0.0 426 | z = - d 427 | dy = dr_perp 428 | for i in range(1, resolution): # Can't start at 0. 429 | y = i * dy 430 | npm_A_I[i][0] = y 431 | npm_A_I[i][1] = A_I(x, y, z, R, a, d, I) 432 | # Make the first row very close to 0. 433 | npm_A_I[0][0] = 0.001 434 | npm_A_I[i][1] = A_I(x, npm_A_I[0][0], z, R, a, d, I) 435 | 436 | # Make/return the interpolation object. 437 | interpolator = scipy.interpolate.interp1d(npm_A_I[:,0], npm_A_I[:,1], kind='linear', fill_value=0.0, bounds_error=False) 438 | return interpolator 439 | 440 | 441 | # dr_perp should be notably smaller then the grid size in the original 3D space. 442 | def dA_I_dr_perp(r_perp, dr_perp, R, a, d, I, interpolator, debug = 0): 443 | # Get my 2 values of A_I 444 | A_Ia = interpolator(r_perp - dr_perp) 445 | A_Ib = interpolator(r_perp + dr_perp) 446 | 447 | # Numerical differentiation 448 | out = (A_Ib - A_Ia) / (2.0 * dr_perp) 449 | 450 | # Debugging 451 | if debug > 0: 452 | print('dA_I_dr_perp: ' + str(out)) 453 | if debug > 1: 454 | print(' dA_I_dr_perp A_Ia: ' + str(A_Ia)) 455 | print(' dA_I_dr_perp A_Ib: ' + str(A_Ib)) 456 | return out 457 | 458 | 459 | # The numerical derivative of A_I 460 | def dA_I_dx(x, y, z, R, a, d, I, Dx, debug = 0): 461 | A_I_x_minus_1 = A_I(x - Dx, y, z, R, a, d, I, debug - 2) 462 | A_I_x_plus_1 = A_I(x + Dx, y, z, R, a, d, I, debug - 2) 463 | 464 | # Out 465 | out = (A_I_x_plus_1 - A_I_x_minus_1)/(2.0*Dx) 466 | 467 | # Debugging 468 | if debug > 0: 469 | print('dA_I_dx: ' + str(out)) 470 | if debug > 1: 471 | print(' dA_I_dx A_I_x_minus_1: ' + str(A_I_x_minus_1)) 472 | print(' dA_I_dx A_I_x_plus_1: ' + str(A_I_x_plus_1)) 473 | # Output 474 | return out 475 | 476 | 477 | # The resulting B_I function. 478 | def B_I(x, y, z, R, a, d, I, Dx, A_I_r_perp_interpolator, debug = 0): 479 | # Values 480 | A_I_val = A_I(x, y, z, R, a, d, I, debug - 2) 481 | dA_I_dx_val = dA_I_dx(x, y, z, R, a, d, I, Dx, debug - 2) 482 | r_perp = r_perpendicular_scalar(y, z, d, debug - 2) 483 | r_perp_vec = r_perpendicular_vector(y, z, d, debug - 2) 484 | 485 | # To get dA_I_dr_perp we use inperpolation to get A_I(r_perp). 486 | dA_I_dr_perp_val = dA_I_dr_perp(r_perp, Dx * 0.2, R, a, d, I, A_I_r_perp_interpolator, debug - 2) 487 | 488 | # Parts 489 | part_1 = - dA_I_dx_val * ( r_perp_vec / r_perp ) 490 | #print '1: ' + str(part_1) 491 | part_2 = np.array([dA_I_dr_perp_val + ( A_I_val / r_perp ), 0, 0]) 492 | #print '2: ' + str(part_2) + '\n' 493 | 494 | # Output 495 | out = np.add(part_1,part_2) 496 | 497 | # Debugging 498 | if debug > 0: 499 | print('B_I: ' + str(out)) 500 | if debug > 1: 501 | print(' B_I part_1: ' + str(part_1)) 502 | print(' B_I part_2: ' + str(part_2) + '\n') 503 | return out 504 | 505 | 506 | if __name__ == '__main__': 507 | # User-specified parameters 508 | tup_shape = ( 20, 20, 20 ) 509 | x_range = ( -80.0, 80 ) * u.Mm 510 | y_range = ( -80.0, 80 ) * u.Mm 511 | z_range = ( 0.0, 120 ) * u.Mm 512 | 513 | # Derived parameters (make SI where applicable) 514 | x_0 = x_range[0].to(u.m).value 515 | Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value 516 | x_size = Dx * tup_shape[0] 517 | y_0 = y_range[0].to(u.m).value 518 | Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value 519 | y_size = Dy * tup_shape[1] 520 | z_0 = z_range[0].to(u.m).value 521 | Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value 522 | z_size = Dy * tup_shape[2] 523 | 524 | # For B_I field only, to save re-creating this interpolator for every cell. 525 | A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 10000) 526 | 527 | field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) ) 528 | for i in range(0, tup_shape[0]): 529 | for j in range(0, tup_shape[1]): 530 | for k in range(0, tup_shape[2]): 531 | # Position of this point in space 532 | x_pos = x_0 + ( i + 0.5 ) * Dx 533 | y_pos = y_0 + ( j + 0.5 ) * Dy 534 | z_pos = z_0 + ( k + 0.5 ) * Dz 535 | 536 | #field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) 537 | #field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) 538 | #field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator) 539 | field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator) 540 | 541 | 542 | 543 | 544 | map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range ) 545 | np_boundary_data = field[:,:,0,2].T 546 | dummyDataToMap(np_boundary_data, x_range, y_range) 547 | 548 | #dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, } 549 | visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True) 550 | -------------------------------------------------------------------------------- /solarbextrapolation/conftest.py: -------------------------------------------------------------------------------- 1 | # this contains imports plugins that configure py.test for astropy tests. 2 | # by importing them here in conftest.py they are discoverable by py.test 3 | # no matter how it is invoked within the source tree. 4 | 5 | from astropy.tests.pytest_plugins import * 6 | 7 | import matplotlib 8 | matplotlib.use('Agg') 9 | 10 | ## Uncomment the following line to treat all DeprecationWarnings as 11 | ## exceptions 12 | # enable_deprecations_as_exceptions() 13 | 14 | ## Uncomment and customize the following lines to add/remove entries 15 | ## from the list of packages for which version numbers are displayed 16 | ## when running the tests 17 | try: 18 | PYTEST_HEADER_MODULES['Astropy'] = 'astropy' 19 | PYTEST_HEADER_MODULES['SunPy'] = 'sunpy' 20 | PYTEST_HEADER_MODULES['mayavi'] = 'mayavi' 21 | del PYTEST_HEADER_MODULES['h5py'] 22 | except NameError: # needed to support Astropy < 1.0 23 | pass 24 | 25 | ## Uncomment the following lines to display the version number of the 26 | ## package rather than the version number of Astropy in the top line when 27 | ## running the tests. 28 | # import os 29 | # 30 | ## This is to figure out the affiliated package version, rather than 31 | ## using Astropy's 32 | # from . import version 33 | # 34 | # try: 35 | # packagename = os.path.basename(os.path.dirname(__file__)) 36 | # TESTED_VERSIONS[packagename] = version.version 37 | # except NameError: # Needed to support Astropy <= 1.0.0 38 | # pass 39 | -------------------------------------------------------------------------------- /solarbextrapolation/data/__init__.py: -------------------------------------------------------------------------------- 1 | from ._sample import download_sample_data 2 | 3 | -------------------------------------------------------------------------------- /solarbextrapolation/data/_sample.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """SunPy sample data files""" 3 | from __future__ import absolute_import 4 | 5 | from os import remove 6 | import os.path 7 | from zipfile import ZipFile 8 | from urllib2 import URLError 9 | from shutil import move 10 | 11 | from astropy.utils.data import download_file 12 | 13 | from sunpy.util.net import url_exists 14 | from sunpy import config 15 | 16 | __author__ = "Steven Christe" 17 | __email__ = "steven.christe@nasa.gov" 18 | 19 | 20 | sampledata_dir = config.get("downloads", "sample_dir") 21 | 22 | # urls to search for the sample data 23 | _base_urls = ( 24 | 'http://data.sunpy.org/solarbextrapolation/sample_data/', 25 | 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/', 26 | 'https://github.com/ehsteve/sunpy-sample-data/raw/master/') 27 | 28 | # keys are file shortcuts 29 | # values consist of filename as well as optional file extension if files are 30 | # hosted compressed. This extension is removed after download. 31 | _files = { 32 | "HMI_2011": ("2011-02-14__20-35-25__01_hmi.fits", ""), 33 | "AIA_2011": ("2011-02-14__20-35-25__02_aia.fits", ""), 34 | "Bxyz_2011": ("2011-02-14__20-35-25__03_Bxyz.npy", ""), 35 | } 36 | 37 | sample_files = {} 38 | for key in _files: 39 | sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0])) 40 | 41 | 42 | def download_sample_data(progress=True, overwrite=True): 43 | """ 44 | Download the sample data. 45 | 46 | Parameters 47 | ---------- 48 | progress: bool 49 | Show a progress bar during download 50 | overwrite: bool 51 | If exist overwrites the downloaded sample data. 52 | 53 | Returns 54 | ------- 55 | None 56 | """ 57 | number_of_files_fetched = 0 58 | print("Downloading sample files to " + sampledata_dir) 59 | for file_name in _files.itervalues(): 60 | if not overwrite: 61 | if os.path.isfile(os.path.join(sampledata_dir, 62 | file_name[0])): 63 | number_of_files_fetched += 1 64 | continue 65 | 66 | for base_url in _base_urls: 67 | full_file_name = file_name[0] + file_name[1] 68 | print(full_file_name) 69 | if url_exists(os.path.join(base_url, full_file_name)): 70 | f = download_file(os.path.join(base_url, full_file_name)) 71 | real_name, ext = os.path.splitext(full_file_name) 72 | 73 | if file_name[1] == '.zip': 74 | print("Unpacking: %s" % real_name) 75 | with ZipFile(f, 'r') as zip_file: 76 | zip_file.extract(real_name, sampledata_dir) 77 | remove(f) 78 | else: 79 | # move files to the data directory 80 | move(f, os.path.join(sampledata_dir, file_name[0])) 81 | # increment the number of files obtained to check later 82 | number_of_files_fetched += 1 83 | break 84 | 85 | if number_of_files_fetched < len(_files.keys()): 86 | raise URLError("Could not download all samples files. Problem with accessing sample data servers.") 87 | -------------------------------------------------------------------------------- /solarbextrapolation/data/sample.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """SunPy sample data files""" 3 | 4 | import sys 5 | from _sample import sample_files as _sample_files 6 | import os.path 7 | 8 | from sunpy import config as _config 9 | _sampledata_dir = _config.get("downloads", "sample_dir") 10 | 11 | for _key in _sample_files: 12 | if os.path.isfile(os.path.join(_sampledata_dir, _sample_files[_key])): 13 | setattr(sys.modules[__name__], _key, os.path.join(_sampledata_dir, _sample_files[_key])) 14 | else: 15 | raise ImportError("Sample data file(s) missing. Use solarbextrapolation.data.download_sample_data() to get them.") 16 | 17 | file_dict = _sample_files 18 | file_list = _sample_files.values() 19 | 20 | __all__ = _sample_files.keys() + ['file_dict', 'file_list'] 21 | -------------------------------------------------------------------------------- /solarbextrapolation/example_data_generator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jul 27 15:38:51 2015 4 | 5 | Function for creating dummy boundary map datas for use with extrapolator 6 | routines. 7 | 8 | @author: alex_ 9 | """ 10 | 11 | import numpy as np 12 | import math 13 | import matplotlib.pyplot as plt 14 | import random 15 | import sunpy.map as mp 16 | import re 17 | from astropy import units as u 18 | from datetime import datetime 19 | 20 | # Function to generate the grid with Gaussian points. 21 | # Arguments are: 22 | # - in_arr_area: 2 tuple for the x and y dimensions. 23 | # - *argv: manual parameters for all the spots. Optional: defaults to 2 random spots. 24 | def generate_example_data(shape, xrange, yrange, *argv): 25 | """ 26 | A function to generate a 2D numpy.array of example data for testing 27 | extrapolation code. 28 | The result is a mid-value region with a number of gausian spots with 29 | positive/negative values. 30 | The gausians can be specifially defined, or randomly generated. 31 | 32 | Parameters 33 | ---------- 34 | 35 | shape : list 36 | A list of the axis grid sizes, (nx,ny). 37 | 38 | xrange : astropy.units.Quantity 39 | The xrange for the returned dataset. 40 | 41 | yrange : astropy.units.Quantity 42 | The yrange for the returned dataset. 43 | 44 | *argv : int or list, optional 45 | Either given the integer number of the number of poles to randomly 46 | generate, which defaults to 2. 47 | Otherwise, the user can put in lists of parameters that define a pole. 48 | Each list contains: 49 | position : astropy.units.Quantity 50 | both x and y coordinates as physical or percentage units 51 | sigma : astropy.units.Quantity 52 | spot size as physical or percentage units 53 | max : astropy.units.Quantity 54 | the maximum spot intensity 55 | 56 | 57 | """ 58 | # If the list is empty then create random data. 59 | arr_args = [] 60 | if not argv: # If no particle parameters or numbers were given. 61 | arr_args = [2] # [ random.randrange(1, 6) ] 62 | else: 63 | arr_args = list(argv) 64 | arr_poles = [] 65 | 66 | # If we are only given the number, then generate randomly. 67 | if isinstance( arr_args[0], ( int, long ) ): 68 | for pole in range(0, arr_args[0]): 69 | # random parameters in percentage 70 | sigma = random.uniform(2, 15) * u.percent 71 | x_pos = random.uniform(2.0 * sigma.value, 100.0 - 2.0 * sigma.value) 72 | y_pos = random.uniform(2.0 * sigma.value, 100.0 - 2.0 * sigma.value) 73 | An_max = random.uniform(0.1, 0.2) * ((float(pole % 2) * 2.0) - 1) * u.T # Alternate pos/neg 74 | 75 | arrPole = [ u.Quantity([x_pos, y_pos] * u.percent), sigma, An_max ] 76 | arr_poles.append(arrPole) 77 | 78 | else: 79 | # We are given the hard-coded parameters, so use them. 80 | arr_poles = arr_args 81 | 82 | # Build the empty data array 83 | arr_data = np.zeros((shape[1], shape[0])) 84 | 85 | # Grid pixel shape 86 | qua_pixel = u.Quantity([ ( xrange[1] - xrange[0] ) / shape[0], ( yrange[1] - yrange[0] ) / shape[1] ]) 87 | 88 | # Convert percentage positions/sigmas to physical units (units from ranges) 89 | for pole in range(0, len(arr_poles)): 90 | if arr_poles[pole][0].unit is u.percent: 91 | position = u.Quantity([ (arr_poles[pole][0][0].value / 100.0) * (xrange[1] - xrange[0]) + xrange[0], 92 | (arr_poles[pole][0][1].value / 100.0) * (yrange[1] - yrange[0]) + yrange[0] ]) 93 | arr_poles[pole] = [ position, arr_poles[pole][1], arr_poles[pole][2] ] 94 | if arr_poles[pole][1].unit is u.percent: 95 | sigma = (arr_poles[pole][1].value / 100.0) * (xrange[1] - xrange[0]) 96 | arr_poles[pole] = [ arr_poles[pole][0], sigma, arr_poles[pole][2] ] 97 | 98 | 99 | # Iterate through the 2D array/matrix. 100 | for i in range(0,shape[0]): # Row/Y 101 | for j in range(0,shape[1]): # Column/X 102 | # The current position 103 | floXPrime = i * qua_pixel[0] 104 | floYPrime = j * qua_pixel[1] 105 | 106 | # A variable to store the sum of the magnetic fields for this point. 107 | flo_value = 0.0 108 | 109 | # Add all the contributions. 110 | for tupPole in arr_poles: 111 | # A0 (positive) and A1 (negative) parameters 112 | An_max = tupPole[2].value 113 | An_x = tupPole[0][0] 114 | An_y = tupPole[0][1] 115 | An_Dx = floXPrime - An_x + xrange[0] 116 | An_Dy = floYPrime - An_y + yrange[0] 117 | An_DxSqu = np.power(An_Dx.value, 2.0) 118 | An_DySqu = np.power(An_Dy.value, 2.0) 119 | An_Sigma = tupPole[1].value 120 | 121 | # So this contibution is calculated and added. 122 | flo_An_cont = An_max * math.exp( - ( (An_DxSqu + An_DySqu) / (2 * np.power(An_Sigma, 2.0)) )) 123 | flo_value += flo_An_cont 124 | 125 | # Now add this to the data array. 126 | arr_data[j][i] = flo_value 127 | 128 | # Now return the 2D numpy array. 129 | return arr_data 130 | 131 | # A function that creates a dummy header and saves the input as a fits file. 132 | def dummyDataToMap(data, xrange, yrange, **kwargs): 133 | """ 134 | Basic function for taking generated data and returning a valid sunpy.map. 135 | """ 136 | # The kwargs 137 | dic_user_def_meta = kwargs.get('meta', {}) 138 | 139 | # Create a header dictionary. 140 | dicHeader = { 141 | 't_obs': datetime.now().isoformat(), 142 | 'bunit': 'Tesla', #'Gauss', 143 | 'bitpix': 64, #re.search('\\d+', 'float64')[0],#64, # Automatic 144 | 'naxis': 2, # Automatic 145 | 'naxis1': data.shape[1], # Automatic 146 | 'naxis2': data.shape[0], # Automatic 147 | 'cdelt1': (xrange[1].value - xrange[0].value) / data.shape[1], # 0.504295, 148 | 'cdelt2': (yrange[1].value - yrange[0].value) / data.shape[0], 149 | 'cunit1': str(xrange.unit), #'arcsec', 150 | 'cunit2': str(yrange.unit), #'arcsec', 151 | 'crpix1': data.shape[1] / 2.0 + 0.5, # central x-pixel. 152 | 'crpix2': data.shape[0] / 2.0 + 0.5, # cnetral y-pixel. 153 | 'rsun_ref': 696000000, 154 | 'dsun_ref': 149597870691, 155 | 'datamax': data.max(), 156 | 'datamin': data.min(), 157 | 'datavals': data.shape[0] * data.shape[1], 158 | 'CRVAL1': (xrange[0].value + xrange[1].value)/2.0, #0.000000, 159 | 'CRVAL2': (yrange[0].value + yrange[1].value)/2.0 160 | } 161 | 162 | # Add the user defined meta entries 163 | for key, value in dic_user_def_meta.iteritems(): 164 | dicHeader[key] = value 165 | #print str(key) + ': ' + str(value) 166 | 167 | # Create and return a sunpy map from the data 168 | return mp.Map((data, dicHeader)) 169 | 170 | if __name__ == '__main__': 171 | # Generate an example map 172 | # The input parameters: 173 | arr_grid_shape = [ 20, 22 ] # [ y-size, x-size ] 174 | qua_xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec) 175 | qua_yrange = u.Quantity([ -11.0, 11.0 ] * u.arcsec) 176 | 177 | # Manual Pole Details 178 | #arrA0 = [ u.Quantity([ 1.0, 1.0 ] * u.arcsec), 2.0 * u.arcsec, 0.2 * u.T ] 179 | arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ] 180 | arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ] 181 | 182 | # Generate the data and save to a map 183 | arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange, arrA0, arrA1)#, arrA0, arrA1) 184 | #arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange)#, arrA0, arrA1) 185 | aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange) 186 | aMap.save('C://fits//temp6.fits') 187 | -------------------------------------------------------------------------------- /solarbextrapolation/extrapolators/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from base import Extrapolators 4 | from potential_field_extrapolator import PotentialExtrapolator 5 | 6 | __all__ = ['Extrapolators', 'PotentialExtrapolator'] 7 | -------------------------------------------------------------------------------- /solarbextrapolation/extrapolators/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Sep 28 19:17:31 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | # General Imports 9 | import matplotlib as mpl 10 | mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict. 11 | import numpy as np 12 | import pickle 13 | import time 14 | from datetime import datetime 15 | from collections import namedtuple 16 | import warnings 17 | import inspect 18 | #from sunpy.sun._constants import physical_constants as con 19 | 20 | # SunPy imports 21 | import sunpy.map 22 | from sunpy.sun import constants, sun 23 | from sunpy.time import parse_time, is_time 24 | from astropy.table import Table 25 | import astropy.units as u 26 | 27 | # Internal imports 28 | #from solarbextrapolation.utilities import * 29 | from solarbextrapolation.map3dclasses import Map3D 30 | 31 | class Extrapolators(object): 32 | """ 33 | Common class for all 3D vector field extrapolation routines. 34 | Each routine, created by building a subclass, will have wildly varying 35 | capabilities and input arguments so this have been left intentionally 36 | minimal. 37 | The primary method to override is extrapolation(), the primary method to 38 | call is extrapolate() which will both call extrapolation() and save the 39 | result if a filepath argument is given. 40 | 41 | Parameters 42 | ---------- 43 | 44 | map_magnetogram : `sunpy.map.GenericMap` 45 | The sunpy map containing the boundary magnetogram data. 46 | 47 | filepath : `string` 48 | The optional filepath for automatic saving of extrapolation results. 49 | 50 | notes : `string` 51 | The optional notes regarding thius run of the extrapolation routine. 52 | 53 | extrapolator_routine : `string` 54 | The name for the extrapolation routine. 55 | 56 | zshape : `int` 57 | The vertical grid size. 58 | 59 | xrange : `astropy.unit.Quantity`, optional 60 | The x edge to edge coordinates. If defined will manually scale the 61 | boundary data. 62 | 63 | yrange : `astropy.units.quantity.Quantity`, optional 64 | The y edge to edge coordinates. If defined will manually scale the 65 | boundary data. 66 | 67 | zrange : `astropy.unit.Quantity` 68 | The vertical edge to edge coordinates for the vertical range. 69 | 70 | notes : `string` 71 | User specified notes that will be added to the metadata. 72 | """ 73 | 74 | def __init__(self, map_magnetogram, **kwargs): 75 | """ 76 | Construct an extrapolator using the given 2D map. 77 | """ 78 | self.map_boundary_data = map_magnetogram 79 | self.meta = { 'boundary_1_meta': self.map_boundary_data.meta } 80 | self.meta['extrapolator_notes'] = kwargs.get('notes', '') 81 | 82 | # Normalise the units to SI May possible be added here 83 | 84 | # Crop the boundary data if required. 85 | self.xrange = kwargs.get('xrange', self.map_boundary_data.xrange) 86 | self.yrange = kwargs.get('yrange', self.map_boundary_data.yrange) 87 | self.map_boundary_data = self.map_boundary_data.submap(self.xrange, self.yrange) 88 | self.xobsrange = self.map_boundary_data.xrange 89 | self.yobsrange = self.map_boundary_data.yrange 90 | 91 | #print '\n\nHelp for u:' 92 | #print 'help(u): ' + str(help(u)) 93 | #print '\n\n' 94 | self.zrange = kwargs.get('zrange', u.Quantity([0.0, 1.0] * u.Mm) ) 95 | self.shape = np.asarray([self.map_boundary_data.data.shape[1], 96 | self.map_boundary_data.data.shape[0], 97 | long(kwargs.get('zshape', 1L))]) 98 | self.filepath = kwargs.get('filepath', None) 99 | self.routine = kwargs.get('extrapolator_routine', type(self)) 100 | 101 | 102 | def _angle_to_length(self, arc, **kwargs): 103 | """ 104 | Approximate a surface length from the observed arc length. 105 | Uses the small angle approximation. 106 | """ 107 | r = self.map_boundary_data.dsun - self.map_boundary_data.rsun_meters 108 | length = (r * arc.to(u.radian)) 109 | return length.to(u.m, equivalencies=u.dimensionless_angles()) 110 | 111 | def _to_SI(self, **kwargs): 112 | """ 113 | 114 | """ 115 | # Scale the x/y ranges 116 | # Setup the equivilence 117 | obs_distance = self.map_boundary_data.dsun - self.map_boundary_data.rsun_meters 118 | radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ] 119 | 120 | # Extract the maps x/yrange values and convert to length units 121 | #x_range = self.map_boundary_data.xrange 122 | #x_range = ( decompose_ang_len(x_range[0], equivalencies=radian_length), 123 | # decompose_ang_len(x_range[1], equivalencies=radian_length) ) 124 | #x_range = 125 | #y_range = self.map_boundary_data.yrange 126 | """ 127 | x_range = self.map_boundary_data.xrange.to(u.meter, equivalencies=radian_length) 128 | y_range = self.map_boundary_data.yrange.to(u.meter, equivalencies=radian_length) 129 | # Normalise to start at 0.0 130 | x_range = [self.map_boundary_data.xrange[0] - self.map_boundary_data.xrange[0], 131 | self.map_boundary_data.xrange[1] - self.map_boundary_data.xrange[0]] 132 | y_range = [self.map_boundary_data.yrange[0] - self.map_boundary_data.yrange[0], 133 | self.map_boundary_data.yrange[1] - self.map_boundary_data.yrange[0]] 134 | """ 135 | # Scale the magnetic field units 136 | ori_bunit = u.Unit(self.map_boundary_data.meta.get('bunit', 'Tesla')) 137 | scale_factor = ori_bunit.to(u.T) 138 | self.map_boundary_data = self.map_boundary_data * scale_factor 139 | self.map_boundary_data.meta['bunit'] = 'Tesla' 140 | self.meta['boundary_1_meta']['bunit'] = 'Tesla' 141 | 142 | def _extrapolation(self, **kwargs): 143 | """ 144 | The method for running an extrapolation routine. 145 | This is the primary method to be edited in subclasses for specific 146 | extrapolation routine implementations. 147 | """ 148 | # Add some type checking, we want a map object, check for .unit attribute. 149 | # Extrapolation code goes here. 150 | arr_4d = np.zeros([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[1], 1, 3]) 151 | 152 | # Calculate the ranges in each dimension in length units (meters) 153 | x_range = self._angle_to_length(self.xrange) 154 | y_range = self._angle_to_length(self.yrange) 155 | z_range = self.zrange 156 | 157 | # Turn the 4D array into a Map3D object. 158 | map_output = Map3D( arr_4d, self.meta, xrange=x_range, yrange=y_range, zrange=z_range, xobsrange=self.xobsrange, yobsrange=self.yobsrange ) 159 | 160 | return map_output 161 | 162 | def extrapolate(self, **kwargs): 163 | """ 164 | Method to be called to run the extrapolation. 165 | Times and saves the extrapolation where applicable. 166 | """ 167 | # Record the time and duration of the extrapolation. 168 | dt_start = datetime.now() 169 | tim_start = time.time() 170 | arr_output = self._extrapolation(**kwargs) 171 | tim_duration = time.time() - tim_start 172 | 173 | # Add the duration and time to the meta/header data. 174 | arr_output.meta['extrapolator_start_time'] = dt_start.isoformat() 175 | arr_output.meta['extrapolator_duration'] = tim_duration 176 | arr_output.meta['extrapolator_duration_unit'] = u.s 177 | 178 | # Save the Map3D if a filepath has been set. (to avoid loosing work) 179 | if self.filepath: 180 | arr_output.save(self.filepath) 181 | return arr_output 182 | -------------------------------------------------------------------------------- /solarbextrapolation/extrapolators/potential_field_extrapolator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Jun 12 13:01:54 2015 4 | 5 | @author: Alex 6 | """ 7 | 8 | import numpy as np 9 | import sunpy.map as mp 10 | import astropy.units as u 11 | 12 | # Module Imports 13 | #from classes import * 14 | #from solarbextrapolation.utilities import * 15 | from solarbextrapolation.extrapolators import Extrapolators 16 | from solarbextrapolation.utilities import si_this_map 17 | from solarbextrapolation.map3dclasses import Map3D 18 | #from solarbextrapolation.visualisation_functions import visualise 19 | 20 | 21 | __all__ = ['PotentialExtrapolator'] 22 | 23 | class PotentialExtrapolator(Extrapolators): 24 | """ 25 | This is a greens function for extrapolating the potential (scalar) field 26 | above a given magnetogram. 27 | Equations are from the following book: 28 | 29 | | Title: Physics of the Solar Corona 30 | | Author: T. J. M. Boyd and J. J. Sanderson 31 | | Publisher: Springer Books and Praxis Publishing 32 | | ISBN: 978-3-540-30766-2 33 | 34 | See chapter 5 on potential fields. 35 | Which references to the paper Takashi Sakurai 1982: 36 | http://adsabs.harvard.edu/full/1982SoPh...76..301S 37 | 38 | """ 39 | def __init__(self, map_magnetogram, **kwargs): 40 | super(PotentialExtrapolator, self).__init__(map_magnetogram, **kwargs) 41 | self.meta['extrapolator_routine'] = 'Potential Field Extrapolator' 42 | 43 | # Convert the map to SI units. (Add to extrapolator class API???) 44 | self.map_boundary_data = si_this_map(self.map_boundary_data) 45 | 46 | # More specific parameters (Add to extrapolator class API???) 47 | self.Dx = (self.xrange[1] - self.xrange[0]) / self.shape[0] 48 | self.Dy = (self.yrange[1] - self.yrange[0]) / self.shape[1] 49 | self.Dz = (self.zrange[1] - self.zrange[0]) / self.shape[2] 50 | 51 | def _extrapolation(self, enable_numba=True, **kwargs): 52 | """ 53 | Override the primary execution method from the extrapolation class. 54 | The process is to extrapolate the potential (scalar) field (phi) and 55 | then use numerical differentiation (gradient) to find the vector field 56 | (Bxyz). 57 | """ 58 | 59 | if enable_numba: 60 | # Test that numba and the numba'ed extrpolator can be imported 61 | try: 62 | import numba 63 | from potential_field_extrapolator_numba import phi_extrapolation_numba 64 | except ImportError: 65 | enable_numba = False 66 | 67 | phi = self._extrapolate_phi(enable_numba, **kwargs) 68 | 69 | if enable_numba: 70 | from numba.decorators import autojit 71 | determine_vec = autojit(self._determine_vec) 72 | else: 73 | determine_vec = self._determine_vec 74 | 75 | npmVecSpace = np.zeros(list(phi.shape)+[3]) # in Order XYZC (C = component directions) 76 | Bxyz = determine_vec(phi, 1, npmVecSpace) 77 | 78 | return Map3D(Bxyz, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange) 79 | 80 | def _extrapolate_phi(self, enable_numba, debug=False, **kwargs): 81 | """ 82 | A function to extrapolate the magnetic field above the given boundary. 83 | Assumes the input B-field boundary data is near normal (the image must 84 | be near the centre of the HMI data). 85 | P183 (5.2.28) 86 | """ 87 | if debug: 88 | print "extrapolatePhi({},{},{})".format(self.map_boundary_data.data.shape, inZ, debug) 89 | 90 | # Parameters 91 | arr_boundary = self.map_boundary_data.data 92 | 93 | print(enable_numba) 94 | if enable_numba: 95 | from .potential_field_extrapolator_numba import phi_extrapolation_numba as phi_extrapolation 96 | else: 97 | from .potential_field_extrapolator_python import phi_extrapolation_python as phi_extrapolation 98 | 99 | return phi_extrapolation(arr_boundary, self.shape, self.Dx.value, self.Dy.value, self.Dz.value) 100 | 101 | 102 | # Make this a static method so it is more efficient to numba 103 | @staticmethod 104 | def _determine_vec(phi, D, npmVecSpace): 105 | """ 106 | Create an empty 3D matrix from the output. 107 | ATM, for simplicity, I make the same size as the potential field, though the outer 2 layers are all 0.0. 108 | """ 109 | tupVolShape = npmVecSpace.shape 110 | 111 | # For each cell we use data from 2 in each direction, this means we need to reduce the volume by 2 in eaach direction. 112 | for k in range(2, tupVolShape[2]-2): # Z - Only done first so I can say when an XY slice has been rendered. 113 | for j in range(2, tupVolShape[1]-2): # Y 114 | for i in range(2, tupVolShape[0]-2): # X 115 | npmVecSpace[i,j,k,0]=-(phi[i-2,j,k]-8.0*phi[i-1,j,k]+8.0*phi[i+1,j,k]-phi[i+2,j,k])/(12.0*D) 116 | npmVecSpace[i,j,k,1]=-(phi[i,j-2,k]-8.0*phi[i,j-1,k]+8.0*phi[i,j+1,k]-phi[i,j+2,k])/(12.0*D) 117 | npmVecSpace[i,j,k,2]=-(phi[i,j,k-2]-8.0*phi[i,j,k-1]+8.0*phi[i,j,k+1]-phi[i,j,k+2])/(12.0*D) 118 | 119 | return npmVecSpace 120 | 121 | -------------------------------------------------------------------------------- /solarbextrapolation/extrapolators/potential_field_extrapolator_numba.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Aug 20 12:59:44 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | import numpy as np 9 | 10 | # Imports for numba JIT compilation 11 | from numba import jit 12 | 13 | 14 | def phi_extrapolation_numba(boundary, shape, Dx, Dy, Dz): 15 | """ 16 | Function to extrapolate the scalar magnetic field above the given boundary 17 | data. 18 | This implementation runs in python and so is very slow for larger datasets. 19 | """ 20 | 21 | # Create the empty numpy volume array. 22 | D = np.empty((shape[1], shape[0], shape[2]), dtype=np.float) 23 | 24 | D = outer_loop(D, Dx, Dy, Dz, boundary) 25 | 26 | return D 27 | 28 | 29 | @jit(nopython=True) 30 | def outer_loop(D, Dx, Dy, Dz, boundary): 31 | shape = D.shape 32 | # From Sakurai 1982 P306, we submerge the monopole 33 | z_submerge = Dz / np.sqrt(2.0 * np.pi) 34 | # Iterate though the 3D space. 35 | for i in range(0, shape[1]): 36 | for j in range(0, shape[0]): 37 | for k in range(0, shape[2]): 38 | # Position of point in 3D space 39 | x = i * Dx 40 | y = j * Dy 41 | z = k * Dz 42 | 43 | # Now add this to the 3D grid. 44 | D[j, i, k] = inner_loop(shape, Dx, Dy, x, y, z, boundary, z_submerge) 45 | return D 46 | 47 | 48 | @jit(nopython=True) 49 | def inner_loop(shape, Dx, Dy, x, y, z, boundary, z_submerge): 50 | DxDy = Dx * Dy 51 | # Variable holding running total for the contributions to point. 52 | point_phi_sum = 0.0 53 | # Iterate through the boundary data. 54 | for i_prime in range(0, shape[1]): 55 | for j_prime in range(0, shape[0]): 56 | # Position of contributing point on 2D boundary 57 | xP = i_prime * Dx 58 | yP = j_prime * Dy 59 | 60 | # Find the components for this contribution product 61 | B_n = boundary[j_prime, i_prime] 62 | G_n = Gn_5_2_29(x, y, z, xP, yP, DxDy, z_submerge) 63 | 64 | # Add the contributions 65 | point_phi_sum += B_n * G_n * DxDy 66 | 67 | return point_phi_sum 68 | 69 | 70 | @jit(nopython=True) 71 | def Gn_5_2_29(x, y, z, xP, yP, DxDy_val, z_submerge): 72 | """ 73 | Discrete Greens Function 74 | Extends _Gn_5_2_26 by taking the starting position of each magnetic 75 | monopole as 1/root(2 pi) z grid cells below the surface. (as described 76 | in Sakurai 1982) 77 | This implementation runs using Anaconda numba JIT compilation to speed up 78 | the process. 79 | """ 80 | d_i = x - xP 81 | d_j = y - yP 82 | d_k = z - z_submerge 83 | floModDr = np.sqrt(d_i * d_i + d_j * d_j + d_k * d_k) 84 | 85 | floOut = 1.0 / (2.0 * np.pi * floModDr) 86 | return floOut 87 | -------------------------------------------------------------------------------- /solarbextrapolation/extrapolators/potential_field_extrapolator_python.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Aug 20 12:59:44 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def Gn_5_2_29(x, y, z, xP, yP, DxDy_val, z_submerge): 12 | """ 13 | Discrete Greens Function 14 | Extends _Gn_5_2_26 by taking the starting position of each magnetic 15 | monopole as 1/root(2 pi) z grid cells below the surface. (as described 16 | in Sakurai 1982) 17 | """ 18 | d_i = x - xP 19 | d_j = y - yP 20 | d_k = z - z_submerge 21 | floModDr = np.sqrt(d_i * d_i + d_j * d_j + d_k * d_k) 22 | 23 | floOut = 1.0 / (2.0 * np.pi * floModDr) 24 | return floOut 25 | 26 | 27 | def phi_extrapolation_python(boundary, shape, Dx, Dy, Dz): 28 | """ 29 | Function to extrapolate the scalar magnetic field above the given boundary 30 | data. 31 | This implementation runs in python and so is very slow for larger datasets. 32 | 33 | Parameters 34 | ---------- 35 | boundary : array-like 36 | Magnetogram boundary data 37 | shape : array-like 38 | Dimensions of of the extrapolated volume, (nx,ny,nz) 39 | Dx : `float` 40 | Spacing in x-direction, in units of the boundary map 41 | Dy : `float` 42 | Spacing in y-direction, in units of the boundary map 43 | Dz : `float` 44 | Spacing in z-direction, in chosen units 45 | """ 46 | 47 | # Derived parameters 48 | DxDy = Dx * Dy 49 | 50 | # From Sakurai 1982 P306, we submerge the monopole 51 | z_submerge = Dz / np.sqrt(2.0 * np.pi) 52 | 53 | # Create the empty numpy volume array. 54 | D = np.empty((shape[1], shape[0], shape[2]), dtype=np.float) 55 | 56 | i_prime, j_prime = np.indices((shape[1], shape[0])) 57 | xP = i_prime * Dx 58 | yP = j_prime * Dy 59 | 60 | # Iterate though the 3D space. 61 | for i in range(0, shape[0]): 62 | for j in range(0, shape[1]): 63 | for k in range(0, shape[2]): 64 | # Position of point in 3D space 65 | x = i * Dx 66 | y = j * Dy 67 | z = k * Dz 68 | 69 | # Variable holding running total for the contributions to point. 70 | point_phi_sum = 0.0 71 | 72 | G_n = Gn_5_2_29(x, y, z, xP, yP, DxDy, z_submerge) 73 | 74 | # Now add this to the 3D grid. 75 | D[j, i, k] = np.sum(boundary * G_n * DxDy) 76 | return D 77 | -------------------------------------------------------------------------------- /solarbextrapolation/map3dclasses.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # General Imports 4 | import matplotlib as mpl 5 | mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict. 6 | import numpy as np 7 | import pickle 8 | import time 9 | from datetime import datetime 10 | from collections import namedtuple 11 | import warnings 12 | import inspect 13 | from copy import deepcopy 14 | #from sunpy.sun._constants import physical_constants as con 15 | 16 | # SunPy imports 17 | import sunpy.map 18 | from sunpy.sun import constants, sun 19 | from sunpy.time import parse_time, is_time 20 | from astropy.table import Table 21 | import astropy.units as u 22 | 23 | # Internal imports 24 | from .utilities import * 25 | 26 | __all__ = ["Map3D", "Map3DCube", "Map3DComparer"] 27 | 28 | class Map3D(object): 29 | """ 30 | A basic data structure for holding a 3D numpy array of floats or 3-float 31 | vectors and metadata. 32 | The structure can be saved/loaded (using pickle ATM). 33 | 34 | Parameters 35 | ---------- 36 | 37 | data : `numpy.array` 38 | The numpy array containing the numerical data. 39 | meta : `dictionary` 40 | The container for additional information about the data in this object. 41 | Where: 42 | * x/y/zrange: the max/min spacial positions along the given axis. 43 | * x/yobsrange: the observational data range, often in arcsec. 44 | * cdelt1/2/3: the size of each pixel in each axis. 45 | * unit1/2/3: the spacial units in each axis. 46 | * naxis1/2/3: the number of pixels in each axis. 47 | 48 | """ 49 | def __init__(self, data, meta, **kwargs): 50 | self.data = data 51 | self.meta = meta 52 | self.xrange = kwargs.get('xrange', [ 0, data.shape[1] ] * u.pixel) 53 | self.yrange = kwargs.get('yrange', [ 0, data.shape[0] ] * u.pixel) 54 | self.zrange = kwargs.get('zrange', [ 0, data.shape[2] ] * u.pixel) 55 | self.xobsrange = kwargs.get('xobsrange', self.xrange) 56 | self.yobsrange = kwargs.get('yobsrange', self.yrange) 57 | 58 | # Add some general properties to the metadata dictionary 59 | self.meta['xrange'] = self.xrange 60 | self.meta['yrange'] = self.yrange 61 | self.meta['zrange'] = self.zrange 62 | self.meta['cdelt1'] = ((self.xrange[1] - self.xrange[0]) / self.data.shape[1]).value 63 | self.meta['cdelt2'] = ((self.yrange[1] - self.yrange[0]) / self.data.shape[0]).value 64 | self.meta['cdelt3'] = ((self.zrange[1] - self.zrange[0]) / self.data.shape[2]).value 65 | # Note: should be reversed to fortran array indexing 66 | self.meta['cunit1'] = self.xrange.unit 67 | self.meta['cunit2'] = self.yrange.unit 68 | self.meta['cunit3'] = self.zrange.unit 69 | self.meta['naxis1'] = self.data.shape[1] 70 | self.meta['naxis2'] = self.data.shape[0] 71 | self.meta['naxis3'] = self.data.shape[2] 72 | if kwargs.get('date_obs', False): 73 | self.meta['date-obs'] = kwargs.get('date_obs') 74 | self.meta['rsun_ref'] = kwargs.get('rsun_ref', constants.radius.value) 75 | if kwargs.get('dsun_obs', False): 76 | self.meta['dsun_obs'] = kwargs.get('dsun_obs') 77 | if kwargs.get('bunit', False): 78 | self.meta['bunit'] = kwargs.get('bunit') 79 | 80 | # For alignment with the boundary data 81 | self.meta['xobsrange'] = self.xobsrange 82 | self.meta['yobsrange'] = self.yobsrange 83 | 84 | 85 | @property 86 | def is_scalar(self, **kwargs): 87 | """ 88 | Returns true if data is a volume of scalar values (3D array) or false 89 | if it is a volume of vector values (4D array). 90 | """ 91 | return (True if self.data.ndim is 3 else False) 92 | 93 | @property 94 | def units(self, **kwargs): 95 | """ 96 | Image coordinate units along the x, y and z axes (cunit1/2/3). 97 | """ 98 | 99 | # Define a triple, a named tuple object for returning values 100 | Triple = namedtuple('Triple', 'x y z') 101 | 102 | return Triple(u.Unit(self.meta.get('cunit1', 'pix')), 103 | u.Unit(self.meta.get('cunit2', 'pix')), 104 | u.Unit(self.meta.get('cunit3', 'pix'))) 105 | 106 | @property 107 | def scale(self, **kwargs): 108 | """ 109 | Image scale along the x, y and z axes in units/pixel (cdelt1/2/3) 110 | """ 111 | # Define a triple, a named tuple object for returning values 112 | from collections import namedtuple 113 | Triple = namedtuple('Triple', 'x y z') 114 | 115 | ''' 116 | return Triple(u.Unit(self.meta.get('cdelt1', 'arcsec')), 117 | u.Unit(self.meta.get('cdelt1', 'arcsec')), 118 | u.Unit(self.meta.get('cdelt2', 'arcsec'))) 119 | ''' 120 | return Triple(self.meta.get('cdelt1', 1.) * self.units.x / u.pixel, 121 | self.meta.get('cdelt2', 1.) * self.units.y / u.pixel, 122 | self.meta.get('cdelt3', 1.) * self.units.z / u.pixel) 123 | 124 | @property 125 | def rsun_meters(self, **kwargs): 126 | """Radius of the sun in meters""" 127 | return u.Quantity(self.meta.get('rsun_ref', constants.radius), 'meter') 128 | 129 | @property 130 | def date(self, **kwargs): 131 | """Image observation time""" 132 | time = parse_time(self.meta.get('date-obs', 'now')) 133 | if time is None: 134 | warnings.warn("Missing metadata for observation time. Using current time.", Warning) 135 | return parse_time(time) 136 | 137 | @property 138 | def dsun(self, **kwargs): 139 | """ 140 | The observer distance from the Sun. 141 | """ 142 | dsun = self.meta.get('dsun_obs', None) 143 | 144 | if dsun is None: 145 | warnings.warn("Missing metadata for Sun-spacecraft separation: assuming Sun-Earth distance", 146 | Warning) 147 | dsun = sun.sunearth_distance(self.date).to(u.m) 148 | 149 | return u.Quantity(dsun, 'm') 150 | 151 | # #### I/O routines #### # 152 | @classmethod 153 | def load(self, filepath, **kwargs): 154 | """ 155 | Load a Map3D instance using pickle. 156 | """ 157 | loaded = pickle.load( open( filepath, "rb" ) ) 158 | return loaded 159 | 160 | def save(self, filepath, filetype='auto', **kwargs): 161 | """ 162 | Saves the Map3D object to a file. 163 | 164 | Currently uses Python pickle. 165 | https://docs.python.org/2/library/pickle.html 166 | In the future support will be added for saving to other formats. 167 | 168 | Parameters 169 | ---------- 170 | filepath : string 171 | Location to save file to. 172 | 173 | filetype : string 174 | 'auto' or any supported file extension 175 | """ 176 | #io.write_file(filepath, self.data, self.meta, filetype=filetype, 177 | # **kwargs) 178 | pickle.dump(self, open( filepath, "wb" ), **kwargs) 179 | 180 | 181 | from sunpy.util import expand_list 182 | 183 | 184 | class Map3DCube: 185 | """ 186 | A basic data structure for holding a list of Map3D objects. 187 | """ 188 | def __init__(self, *args, **kwargs): 189 | 190 | # Hack to get around Python 2.x not backporting PEP 3102. 191 | #sortby = kwargs.pop('sortby', 'date') 192 | #derotate = kwargs.pop('derotate', False) 193 | 194 | self.maps = expand_list(args) 195 | 196 | for m in self.maps: 197 | if not isinstance(m, Map3D): 198 | raise ValueError( 199 | 'CompositeMap expects pre-constructed map objects.') 200 | 201 | def __getitem__(self, key, **kwargs): 202 | """ 203 | Overriding indexing operation. If the key results in a single map, 204 | then a map object is returned. This allows functions like enumerate to 205 | work. Otherwise, a mapcube is returned. 206 | """ 207 | if isinstance(self.maps[key], Map3D): 208 | return self.maps[key] 209 | else: 210 | return Map3DCube(self.maps[key]) 211 | 212 | def __len__(self, **kwargs): 213 | """ 214 | Return the number of maps in a mapcube. 215 | """ 216 | return len(self.maps) 217 | 218 | 219 | def all_maps_same_shape(self, **kwargs): 220 | """ 221 | Tests if all the 3D maps have the same shape. 222 | """ 223 | return np.all([m.data.shape == self.maps[0].data.shape for m in self.maps]) 224 | 225 | 226 | class Map3DComparer(object): 227 | """ 228 | | Class for comparrison of vector fields. 229 | | There are two classification of test: 230 | | * **Mono**: returns a value for a given vector field. Can be normalized to the benchmark field. 231 | | * **Binary**: requires comparrison between two vector fields. 232 | | By default: 233 | | * Benchmark field is the first/original vector field. This is used as the baseline for comparrison. This can be changed using the ``benchmark=n`` kwarg. 234 | | * Normalise will be set to false. 235 | | Individual tests can be run and return results for imediate viewing (using astropy.table). 236 | | Likewise, compare_all can be used to run the whole series of tests. 237 | | Note: all vector fields must be of the same shape. 238 | 239 | """ 240 | def __init__(self, map3D, *args, **kwargs): 241 | # Use all the user parameters 242 | self.maps_list = map3D + expand_list(args) 243 | self.benchmark = kwargs.get('benchmark', 0) # Defaults to the first vector field in the list 244 | self.normalise = kwargs.get('normalise', False) 245 | 246 | # The table to store the test results 247 | self.results = Table(names=('extrapolator routine', 'extrapolation duration', 'fig of merit 1'), meta={'name': '3D field comparison table'}, dtype=('S24', 'f8', 'f8')) 248 | t['time (ave)'].unit = u.s 249 | 250 | # An empty table for the results: 251 | #N = len(self.maps_list) 252 | #t1, t2, t3, t4, t5, t6, t7 = [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N 253 | #self.results = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'}) 254 | #self.results_normalised = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'}) 255 | 256 | # Ensure that the input maps are all the same type and shape. 257 | for m in self.maps_list:#self.maps: 258 | # Check that this is a Map3D object. 259 | if not isinstance(m, Map3D): 260 | raise ValueError( 261 | 'Map3DComparer expects pre-constructed map3D objects.') 262 | 263 | # Compare the shape of this Map3D to the first in the Map3D list. 264 | if not m.data.shape == self.maps_list[0]: 265 | raise ValueError( 266 | 'Map3DComparer expects map3D objects with identical dimensions.') 267 | 268 | 269 | def _normalise(): 270 | """ 271 | Return the normalised table. 272 | """ 273 | # Get the benchmark extrapolation result. 274 | row_benchmark = self.results[self.benchmark] 275 | 276 | # Create a copy of the table 277 | tbl_output = deepcopy(self.results) 278 | 279 | for row in tbl_output: 280 | for val, val_benchmark in zip(row, row_benchmark): 281 | # If the value is a float then normalise. 282 | if type(val) == np.float64 or type(val) == np.float32 or type(val) == np.float16: 283 | val = val / val_benchmark 284 | 285 | 286 | 287 | def L_infin_norm(map_field, benchmark, **kwargs): 288 | """ 289 | l-infinity norm of the vector field. 290 | For vector field :math:`\bfx` this would be: 291 | 292 | .. math:: 293 | 294 | \| \mathbf{x} \| \infty = \sqrt[\infty]{\Sigma_i x_i^\infty} \approx \text{max}(|x_i|) 295 | 296 | (the malue of the maximum component) 297 | 298 | From: https://rorasa.wordpress.com/2012/05/13/l0-norm-l1-norm-l2-norm-l-infinity-norm/ 299 | """ 300 | 301 | # Placeholder for the maximum value. 302 | output = - 10.0**15 303 | 304 | # Iterate through the volume 305 | ni, nj, nk, D = map_field.shape 306 | for i in range(0, ni): 307 | for j in range(0, nj): 308 | for k in range(0, nk): 309 | # Get the sum of the components 310 | component_sum = 0.0 311 | for component in map_field[i][j][k]: 312 | component_sum += component 313 | 314 | # If this is bigger then the current max value. 315 | if output < component_sum: 316 | output = component_sum 317 | 318 | # Output 319 | return output 320 | 321 | def compare_all(self, **kwargs): 322 | """ 323 | Compare all of the given vector fields and return the results as an 324 | astropy.table. 325 | """ 326 | #num_tests = 1 327 | #num_maps = len(self.maps) 328 | #arr_data = np.zeros([num_tests, num_maps]) 329 | 330 | # For each given 3D field, run all the tests and add a row to the table. 331 | for map3D in self.maps: 332 | # Get the data 333 | arr_data = map3D.data 334 | 335 | # Store the results from each test for this field. 336 | lis_results = [ map3D.meta.get('extrapolator_routine', 'Unknown Routine'), 337 | map3D.meta.get( 'extrapolator_duration', 0.0 ) ] 338 | 339 | # Run through all the tests and append results to the list. 340 | lis_results.append(self.L_infin_norm(arr_data)) 341 | 342 | # Now add the results to the table. 343 | self.results.add_row(lis_results) 344 | 345 | 346 | if self.normalise: 347 | self.results_normalised 348 | else: 349 | self.results 350 | -------------------------------------------------------------------------------- /solarbextrapolation/mayavi_seed_streamlines.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This module contains a custom streamlining class derived from the MayaVi2 4 | streamlining class, modified to accept an array of seed points for visulaisation 5 | using mayavi. 6 | 7 | .. warning:: 8 | The documentation for this class cannot be built on Read The Docs, it is possible to build it locally. 9 | 10 | You can use this class thus: 11 | 12 | Create a new Streamline instance and add it to a pipeline 13 | """ 14 | 15 | import numpy as np 16 | from tvtk.api import tvtk 17 | from traits.api import Instance, TraitPrefixList, Trait, Array 18 | 19 | import mayavi 20 | from mayavi.modules.streamline import Streamline 21 | 22 | __all__ = ['SeedStreamline'] 23 | 24 | class SeedStreamline(Streamline): 25 | """ 26 | This class is a modification of the mayavi Streamline class that accepts 27 | an array of seed points as a input rather than a widget. 28 | 29 | Examples 30 | -------- 31 | Create a new Streamline instance and add it to a pipeline 32 | 33 | >>> from solarbextrapolation.mayavi_seed_streamlines import SeedStreamline 34 | >>> import numpy as np 35 | >>> seeds = [[1, 2, 5], [3, 4, 5]] 36 | >>> field_lines = SeedStreamline(seed_points = np.array(seeds)) #doctest: +SKIP 37 | >>> myvectorfield.add_child(field_lines) #doctest: +SKIP 38 | """ 39 | 40 | seed_points = Array(allow_none=False) 41 | seed = Instance(tvtk.PolyData, args=()) 42 | update_mode = Trait('interactive', TraitPrefixList(['interactive', 43 | 'semi-interactive', 44 | 'non-interactive']), 45 | desc='the speed at which the poly data is updated') 46 | 47 | def setup_pipeline(self): 48 | """Override this method so that it *creates* the tvtk 49 | pipeline. 50 | 51 | This method is invoked when the object is initialized via 52 | `__init__`. Note that at the time this method is called, the 53 | tvtk data pipeline will *not* yet be setup. So upstream data 54 | will not be available. The idea is that you simply create the 55 | basic objects and setup those parts of the pipeline not 56 | dependent on upstream sources and filters. You should also 57 | set the `actors` attribute up at this point. 58 | """ 59 | # Create and setup the default objects. 60 | self.seed = tvtk.PolyData(points=self.seed_points) 61 | self.stream_tracer = tvtk.StreamTracer(maximum_propagation=2000, 62 | integration_direction='backward', 63 | compute_vorticity=False, 64 | integrator_type='runge_kutta4', 65 | ) 66 | self.ribbon_filter = tvtk.RibbonFilter() 67 | self.tube_filter = tvtk.TubeFilter() 68 | 69 | self.actor = mayavi.components.actor.Actor() 70 | # Setup the actor suitably for this module. 71 | self.actor.property.line_width = 2.0 72 | 73 | def update_pipeline(self): 74 | """Override this method so that it *updates* the tvtk pipeline 75 | when data upstream is known to have changed. 76 | 77 | This method is invoked (automatically) when any of the inputs 78 | sends a `pipeline_changed` event. 79 | """ 80 | mm = self.module_manager 81 | if mm is None: 82 | return 83 | 84 | src = mm.source 85 | self.stream_tracer.input = src.outputs[0] 86 | #self.seed.inputs = [src] 87 | 88 | # Setup the radius/width of the tube/ribbon filters based on 89 | # given input. 90 | if self._first: 91 | b = src.outputs[0].bounds 92 | l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])] 93 | length = np.sqrt(l[0]*l[0] + l[1]*l[1] + l[2]*l[2]) 94 | self.ribbon_filter.width = length*0.0075 95 | self.tube_filter.radius = length*0.0075 96 | self._first = False 97 | 98 | self._streamline_type_changed(self.streamline_type) 99 | # Set the LUT for the mapper. 100 | self.actor.set_lut(mm.scalar_lut_manager.lut) 101 | 102 | self.pipeline_changed = True 103 | 104 | def _seed_points_changed(self, old, new): 105 | self.seed = tvtk.PolyData(points=self.seed_points) 106 | 107 | def _stream_tracer_changed(self, old, new): 108 | if old is not None: 109 | old.on_trait_change(self.render, remove=True) 110 | seed = self.seed 111 | if seed is not None: 112 | new.source = seed 113 | new.on_trait_change(self.render) 114 | mm = self.module_manager 115 | if mm is not None: 116 | new.input = mm.source.outputs[0] 117 | 118 | # A default output so there are no pipeline errors. The 119 | # update_pipeline call corrects this if needed. 120 | self.outputs = [new.output] 121 | 122 | self.update_pipeline() 123 | 124 | def _seed_changed(self, old, new): 125 | st = self.stream_tracer 126 | if st is not None: 127 | st.source = new#.poly_data 128 | #self._change_components(old, new) 129 | -------------------------------------------------------------------------------- /solarbextrapolation/preprocessors/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from base import Preprocessors 4 | 5 | __all__ = ['Preprocessors'] 6 | -------------------------------------------------------------------------------- /solarbextrapolation/preprocessors/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Sep 28 19:18:58 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | # General Imports 9 | import matplotlib as mpl 10 | mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict. 11 | import numpy as np 12 | import pickle 13 | import time 14 | from datetime import datetime 15 | from collections import namedtuple 16 | import warnings 17 | import inspect 18 | #from sunpy.sun._constants import physical_constants as con 19 | 20 | # SunPy imports 21 | import sunpy.map 22 | from sunpy.sun import constants, sun 23 | from sunpy.time import parse_time, is_time 24 | from astropy.table import Table 25 | import astropy.units as u 26 | 27 | # Internal imports 28 | #from solarbextrapolation.utilities import * 29 | 30 | class Preprocessors(object): 31 | """ 32 | A common class for all 2D pre-processing routines, tools used to pre-process 33 | the 2D sunpy map data for use in extrapolations. 34 | Usage can include basic filters for noise/contrast or algorythms to 35 | compensate for extrapolator assumptions, such as the force-free assumption 36 | that is assumed in many extrapolations, but isn't true in the photosphere 37 | where magnetogram observations are generally taken. 38 | 39 | Parameters 40 | ---------- 41 | 42 | map_data : `sunpy.map.GenericMap` 43 | The sunpy map containing the data to be processed. 44 | filepath : `string` 45 | The optional filepath for automatic saving of preprocessed results. 46 | notes : `string` 47 | User specified notes that will be added to the metadata. 48 | """ 49 | def __init__(self, map_data, **kwargs): 50 | """ 51 | Method for creating a preprocessor object, using a sunpy map. 52 | """ 53 | # Add some type checking, we want a map object, check for .unit attribute. 54 | self.map_input = map_data 55 | self.routine = kwargs.get('preprocessor_routine', type(self)) 56 | self.meta = self.map_input.meta 57 | self.meta['preprocessor_notes'] = kwargs.get('notes', '') 58 | self.meta['preprocessor_routine'] = self.routine 59 | self.filepath = kwargs.get('filepath', None) 60 | 61 | def _preprocessor(self, **kwargs): 62 | """ 63 | Method running the and returning a sunpy map. 64 | For tracability this should add entries into the metadata that 65 | include any parameters used for the given run. 66 | """ 67 | map_output = sunpy.map.Map(self.map_input.data, self.meta) 68 | return map_output 69 | 70 | def preprocess(self, **kwargs): 71 | 72 | """ 73 | Method to be called to run the preprocessor. 74 | Times the process and saves output where applicable. 75 | """ 76 | dt_start = datetime.now() 77 | tim_start = time.time() 78 | map_output = self._preprocessor() 79 | tim_duration = time.time() - tim_start 80 | 81 | map_output.meta['preprocessor_start_time'] = dt_start.isoformat() 82 | map_output.meta['preprocessor_duration'] = tim_duration 83 | 84 | if self.filepath: 85 | map_output.save(self.filepath) 86 | return map_output 87 | -------------------------------------------------------------------------------- /solarbextrapolation/temp_gradient.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Nov 20 19:49:49 2014 4 | 5 | @author: Alex 6 | """ 7 | 8 | import numpy as np 9 | import time 10 | 11 | 12 | # A function that takes the 3D magnetic scalar potential field and returns the 3D vector field. 13 | def determineVec_FORTRAN(phi, D = 1, debug = False): 14 | # Time this function. 15 | start = time.time() 16 | 17 | # Create an empty 3D matrix from the output. 18 | # ATM, for simplicity, I make the same size as the potential field, though the outer 2 layers are all 0.0. 19 | tupVolShape = phi.shape 20 | npmVecSpace = np.zeros((tupVolShape[0], tupVolShape[1], tupVolShape[2], 3)) # in Order XYZC (C = component directions) 21 | 22 | # For each cell we use data from 2 in each direction, this means we need to reduce the volume by 2 in eaach direction. 23 | for k in range(2, tupVolShape[2]-2): # Z - Only done first so I can say when an XY slice has been rendered. 24 | for j in range(2, tupVolShape[1]-2): # Y 25 | for i in range(2, tupVolShape[0]-2): # X 26 | # 27 | npmVecSpace[i,j,k,0]=-(phi[i-2,j,k]-8.0*phi[i-1,j,k]+8.0*phi[i+1,j,k]-phi[i+2,j,k])/(12.0*D) 28 | npmVecSpace[i,j,k,1]=-(phi[i,j-2,k]-8.0*phi[i,j-1,k]+8.0*phi[i,j+1,k]-phi[i,j+2,k])/(12.0*D) 29 | npmVecSpace[i,j,k,2]=-(phi[i,j,k-2]-8.0*phi[i,j,k-1]+8.0*phi[i,j,k+1]-phi[i,j,k+2])/(12.0*D) 30 | if debug and k%int(tupVolShape[2]*0.1) is 0: 31 | print '(Bx,By,Bz) calculated for layer ' + str(k) + '.' 32 | 33 | # Print the time ellapsed if debug mode. 34 | if debug: 35 | finish = time.time() 36 | print 'determineVec_FORTRAN time ' + str(tupVolShape) + ': ' + str(time.strftime("%H:%M:%S", time.gmtime(finish - start))) 37 | return npmVecSpace 38 | 39 | ############ 40 | # 41 | # Examples 42 | # 43 | ####### 44 | if __name__ == '__main__': 45 | a3DArray = np.random.rand(5,5,5) 46 | # Make this array structured: 47 | delta = 0.5 48 | for k in range(0,a3DArray.shape[0]): 49 | for j in range(0,a3DArray.shape[1]): 50 | for i in range(0,a3DArray.shape[2]): 51 | # Increase in all directions 52 | a3DArray[i,j,k] = i * delta + j * delta + k * delta 53 | # Increase in 2D 54 | #a3DArray[i,j,k] = i * delta + j * delta 55 | # Increase in 1D 56 | #a3DArray[i,j,k] = i * delta 57 | 58 | # Uncomment below to make it a random number volume again 59 | a3DArray = np.random.rand(5,5,5) 60 | 61 | # Print the scalar volume array 62 | print '\n\n' 63 | print a3DArray 64 | 65 | # Print the centre cell vector from my function 66 | print '\n\n' 67 | a4DArray = determineVec_FORTRAN(a3DArray) 68 | print a4DArray[2,2,2] 69 | 70 | # Print the centre cell vector from the numpy gradient function 71 | b4DArray = np.gradient(a3DArray,1) 72 | print '[ ' + str(b4DArray[0][2,2,2]) + ', ' + str(b4DArray[1][2,2,2]) + ', ' + str(b4DArray[2][2,2,2]) + ']' 73 | b4DArray = np.gradient(a3DArray,2) 74 | print '[ ' + str(b4DArray[0][2,2,2]) + ', ' + str(b4DArray[1][2,2,2]) + ', ' + str(b4DArray[2][2,2,2]) + ']' 75 | b4DArray = np.gradient(a3DArray,3) 76 | print '[ ' + str(b4DArray[0][2,2,2]) + ', ' + str(b4DArray[1][2,2,2]) + ', ' + str(b4DArray[2][2,2,2]) + ']' 77 | b4DArray = np.gradient(a3DArray,4) 78 | print '[ ' + str(b4DArray[0][2,2,2]) + ', ' + str(b4DArray[1][2,2,2]) + ', ' + str(b4DArray[2][2,2,2]) + ']' 79 | b4DArray = np.gradient(a3DArray,5) 80 | print '[ ' + str(b4DArray[0][2,2,2]) + ', ' + str(b4DArray[1][2,2,2]) + ', ' + str(b4DArray[2][2,2,2]) + ']' 81 | 82 | -------------------------------------------------------------------------------- /solarbextrapolation/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 | """ 3 | This packages contains affiliated package tests. 4 | """ 5 | -------------------------------------------------------------------------------- /solarbextrapolation/tests/coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = {packagename} 3 | omit = 4 | {packagename}/_astropy_init* 5 | {packagename}/conftest* 6 | {packagename}/cython_version* 7 | {packagename}/setup_package* 8 | {packagename}/*/setup_package* 9 | {packagename}/*/*/setup_package* 10 | {packagename}/tests/* 11 | {packagename}/*/tests/* 12 | {packagename}/*/*/tests/* 13 | {packagename}/version* 14 | 15 | [report] 16 | exclude_lines = 17 | # Have to re-enable the standard pragma 18 | pragma: no cover 19 | 20 | # Don't complain about packages we have installed 21 | except ImportError 22 | 23 | # Don't complain if tests don't hit assertions 24 | raise AssertionError 25 | raise NotImplementedError 26 | 27 | # Don't complain about script hooks 28 | def main\(.*\): 29 | 30 | # Ignore branches that don't pertain to this version of Python 31 | pragma: py{ignore_python_version} 32 | -------------------------------------------------------------------------------- /solarbextrapolation/tests/setup_package.py: -------------------------------------------------------------------------------- 1 | def get_package_data(): 2 | return { 3 | _ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc']} 4 | -------------------------------------------------------------------------------- /solarbextrapolation/tests/test_scripts.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Spyder Editor 4 | 5 | This is a temporary script file. 6 | """ 7 | 8 | import pytest 9 | import tempfile 10 | import pkg_resources 11 | 12 | import numpy as np 13 | import os 14 | from astropy import units as u 15 | 16 | import sunpy.map 17 | 18 | #from ..classes import * 19 | from solarbextrapolation.map3dclasses import Map3D#, Map3DCube 20 | from solarbextrapolation.preprocessors import Preprocessors 21 | from solarbextrapolation.extrapolators import Extrapolators, PotentialExtrapolator 22 | 23 | 24 | # Tests for the map3D class 25 | 26 | def test_create_scalar_Map3d(): 27 | aNumpyArray = np.zeros((2,2,2)) 28 | aMetaDict = { 'file': 'test Map3D object'} 29 | aMap3D = Map3D(aNumpyArray, aMetaDict) 30 | assert aMap3D.is_scalar 31 | 32 | @pytest.fixture 33 | def test_create_vector_Map3d(): 34 | aNumpyArray = np.zeros((2,2,2,2)) 35 | aMetaDict = { 'file': 'test Map3D object'} 36 | aMap3D = Map3D(aNumpyArray, aMetaDict) 37 | assert not is_scalar 38 | return aMap3D 39 | 40 | @pytest.fixture 41 | def text_save_Map3d(test_create_Map3d): 42 | afilename = tempfile.NamedTemporaryFile(suffix='np').name 43 | test_create_vector_Map3d.save(afilename) 44 | assert os.path.isfile(afilename) 45 | return afilename 46 | 47 | def text_load_Map3d(text_save_Map3d): 48 | aMap3D = Map3D.load(text_save_Map3d) 49 | # Compare the returned data array 50 | assert (aMap3D.data == np.zeros((2,2,2,2))).all() 51 | 52 | 53 | # Tests for the preprocessor class 54 | 55 | @pytest.fixture 56 | def test_create_preprocessor(): 57 | aNumpyArray = np.zeros((2,2)) 58 | aMetaDict = { 'file': 'test SunPy Map object'} 59 | aMap2D = sunpy.map.Map(aNumpyArray, aMetaDict) 60 | aPreprocessor = Preprocessors(aMap2D) 61 | return aPreprocessor 62 | 63 | def test_preprocessor_preprocess_method(test_create_preprocessor): 64 | test_create_preprocessor.preprocess() 65 | 66 | @pytest.fixture 67 | def test_create_extrapolator(): 68 | aNumpyArray = np.zeros((2,2)) 69 | aMetaDict = { 'file': 'test SunPy Map object'} 70 | aMap2D = sunpy.map.Map(aNumpyArray, aMetaDict) 71 | aExtrapolator = Extrapolators(aMap2D) 72 | return aExtrapolator 73 | 74 | def test_extrapolator_extrapolate_method(test_create_extrapolator): 75 | test_create_extrapolator.extrapolate() 76 | 77 | def test_create_and_run_preprocessor_subclass(): 78 | # Define the preprocessor as a child of the Preprocessors class 79 | class PreZeros(Preprocessors): 80 | def __init__(self, map_magnetogram): 81 | super(PreZeros, self).__init__(map_magnetogram) 82 | 83 | def _preprocessor(self): 84 | # Adding in custom parameters to the meta 85 | self.meta['preprocessor_routine'] = 'Zeros Preprocessor' 86 | 87 | # Creating the trivial zeros map of teh shape of the input map 88 | map_output = sunpy.map.Map((np.zeros(self.map_input.data.shape), 89 | self.meta)) 90 | 91 | # Outputting the map. 92 | return map_output 93 | 94 | # Instansiate the new child class 95 | aNumpyArray = np.zeros((2,2)) 96 | aMetaDict = { 'file': 'test Map object'} 97 | aMap2D = sunpy.map.Map(aNumpyArray, aMetaDict) 98 | aPrePro = PreZeros(aMap2D.submap([0,10]*u.arcsec, [0,10]*u.arcsec)) 99 | aPreProData = aPrePro.preprocess() 100 | 101 | 102 | # Tests for the extrapolator class 103 | 104 | def test_create_and_run_extrapolator_subclass(): 105 | # Define the extrapolator as a child of the Extrapolators class 106 | class ExtZeros(Extrapolators): 107 | def __init__(self, map_magnetogram, **kwargs): 108 | super(ExtZeros, self).__init__(map_magnetogram, **kwargs) 109 | # This is a test fixing hack: TODO: FIXME 110 | self.z = 1 111 | 112 | def _extrapolation(self): 113 | # Adding in custom parameters to the meta 114 | self.meta['extrapolator_routine'] = 'Zeros Extrapolator' 115 | 116 | arr_4d = np.zeros([self.map_boundary_data.data.shape[0], self.map_boundary_data.data.shape[0], self.z, 3]) 117 | return Map3D( arr_4d, self.meta ) 118 | 119 | # Instansiate the new child class 120 | afilename = tempfile.NamedTemporaryFile(suffix='np').name 121 | aNumpyArray = np.zeros((2,2)) 122 | aMetaDict = { 'file': 'test SunPy Map object'} 123 | aMap2D = sunpy.map.Map(aNumpyArray, aMetaDict) 124 | aExt = ExtZeros(aMap2D, filepath=afilename) 125 | aMap3D = aExt.extrapolate() 126 | assert os.path.isfile(afilename) 127 | 128 | # Tests for the extrapolator.PotentialExtrapolator class 129 | 130 | def test_potential_extrapolator_subclass(): 131 | # Parameters for the extrapolator 132 | xrange = u.Quantity([50, 300] * u.arcsec) 133 | yrange = u.Quantity([-350, -100] * u.arcsec) 134 | zrange = u.Quantity([0, 250] * u.arcsec) 135 | shape = u.Quantity([5, 5, 5] * u.pixel) 136 | 137 | # Load HMI map (from fits file) then submap and resample. 138 | hmi_filename = pkg_resources.resource_filename('solarbextrapolation', 139 | 'data/sdo-hmi_2011-02-14_20-34-12.fits') 140 | map_boundary = sunpy.map.Map(hmi_filename) 141 | map_boundary = map_boundary.submap(xrange, yrange).resample(shape[0:2], method='linear') 142 | 143 | # Extrapolate using python native code 144 | aPotExt = PotentialExtrapolator(map_boundary, zshape=shape[2].value, zrange=zrange) 145 | aMap3D = aPotExt.extrapolate(enable_numba=False) 146 | 147 | # Extrapolate using numba 148 | aMap3D = aPotExt.extrapolate() 149 | -------------------------------------------------------------------------------- /solarbextrapolation/utilities.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from astropy import units as u 4 | import numpy as np 5 | import sunpy.map as mp 6 | from copy import deepcopy 7 | 8 | __all__ = ["decompose_ang_len", "si_this_map_OLD", "si_this_map"] 9 | 10 | 11 | def decompose_ang_len(qua_input, **kwargs): 12 | """ 13 | Function to help decompose quantities that have an equivilence between angles 14 | and length, such as photospheric observational angles and object sizes. 15 | The function uses an equivilence to convert into either length or angle 16 | physical_type units. 17 | 18 | Parameters 19 | ---------- 20 | 21 | qua_input : `astropy.units.quantity.Quantity` 22 | The quantity you wish decomposed. 23 | 24 | working_units : `astropy.units.quantity.Quantity`, optional 25 | Unit that will be used for internal working and the returned quantity. 26 | Ensure that it is of the correct physical type, angle or length. 27 | 28 | equivalencies : astropy equivilence, 29 | Equivilence used to relate the length and angle units. 30 | 31 | """ 32 | # Parameters 33 | working_units = kwargs.get('working_units', u.m) * 1.0 34 | equivalence = kwargs.get('equivalencies', u.dimensionless_angles()) 35 | 36 | # Do nothing if the input is dimensionless. 37 | if qua_input.unit is u.Quantity(1.0).unit: 38 | return qua_input.decompose() 39 | else: 40 | # Components of the quantity 41 | value = qua_input.value 42 | length_unit = 0.0 * u.m 43 | length_exponent = 0.0 44 | angle_unit = 0.0 * u.radian 45 | angle_exponent = 0.0 46 | 47 | # If we have at least 1 base, populate from the first base 48 | if len(qua_input.unit.bases) > 0: 49 | if qua_input.unit.bases[0].physical_type is u.m.physical_type: 50 | length_unit = 1.0 * qua_input.unit.bases[0] 51 | length_exponent = qua_input.unit.powers[0] 52 | 53 | # convert to SI (meter here) 54 | length_unit = length_unit.to(u.m) 55 | elif qua_input.unit.bases[0].physical_type is u.radian.physical_type: 56 | angle_unit = 1.0 * qua_input.unit.bases[0] 57 | angle_exponent = qua_input.unit.powers[0] 58 | 59 | # Convert to SI (radian here) 60 | angle_unit = angle_unit.to(u.radian) 61 | 62 | # If we have 2 bases, populate from the second base 63 | if len(qua_input.unit.bases) > 1: 64 | if qua_input.unit.bases[1].physical_type is u.m.physical_type: 65 | length_unit = 1.0 * qua_input.unit.bases[1] 66 | length_exponent = qua_input.unit.powers[1] 67 | 68 | # Convert to SI (meter here) 69 | length_unit = length_unit.to(u.m) 70 | elif qua_input.unit.bases[1].physical_type is u.radian.physical_type: 71 | angle_unit = 1.0 * qua_input.unit.bases[1] 72 | angle_exponent = qua_input.unit.powers[1] 73 | 74 | # Convert to SI (radian here) 75 | angle_unit = angle_unit.to(u.radian) 76 | 77 | # Convert the incompatible base to the working units using the equivilence 78 | if working_units.unit.physical_type is u.m.physical_type: 79 | angle_unit = angle_unit.to(working_units, equivalencies=equivalence) 80 | 81 | # Strip out the units, so the output doesn't have squared lenth units 82 | #angle_unit = angle_unit.value # Kept in-case it causes bugs 83 | elif working_units.unit.physical_type is u.radian.physical_type: 84 | length_unit = length_unit.to(working_units, equivalencies=equivalence) 85 | 86 | # Strip out the units, so the output doesn't have squared length units 87 | #length_unit = length_unit.value # Kept in-case it causes bugs 88 | # The quantity to return 89 | quantity = value * length_unit ** length_exponent * angle_unit ** angle_exponent 90 | # Change to the working unit if not dimensionless 91 | if quantity.unit.physical_type is not (u.m / u.m).decompose().physical_type: 92 | quantity.to(working_units) 93 | return quantity.decompose() 94 | 95 | 96 | def si_this_map_OLD(map): 97 | """ 98 | Basic function to create a deep copy of a map but with all units in SI. 99 | """ 100 | # Find out the value units and convert this and data to SI 101 | units = 1.0 * u.Unit(map.meta['bunit']).to(u.Tesla) * u.Tesla 102 | data = deepcopy(map.data) * units.value 103 | 104 | # ATM I don't convert the x-axis and y-axis to SI 105 | 106 | # Modify the map header to reflect all these changes 107 | meta = deepcopy(map.meta) 108 | meta['bunit'] = units.unit 109 | meta['datamax'] = data.max() 110 | meta['datamin'] = data.min() 111 | #meta['cdelt1'] = 0.504295 # Following modified if we convert x/y-axes 112 | #meta['cdelt2'] = 0.504295 113 | #meta['cunit1'] = 'arcsec' 114 | #meta['cunit2'] = 'arcsec' 115 | #meta['crpix1'] = data.shape[1] / 2.0 + 0.5, # central x-pixel 116 | #meta['crpix2'] = data.shape[0] / 2.0 + 0.5, # cnetral y-pixel 117 | #meta['CRVAL1'] = 0.000000 118 | #meta['CRVAL2'] = 0.000000 119 | 120 | # Return the modified map 121 | return mp.Map((data, meta)) 122 | 123 | 124 | def si_this_map(map): 125 | """ 126 | Basic function to create a deep copy of a map but with all units in SI. 127 | """ 128 | # Find out the value units and convert this and data to SI 129 | units = 1.0 * u.Unit(map.meta['bunit']).to(u.Tesla) * u.Tesla 130 | data = deepcopy(map.data) * units.value 131 | 132 | # Setup the arc to length equivilence 133 | obs_distance = map.dsun - map.rsun_meters 134 | radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ] 135 | 136 | # Convert the x-axis and y-axis to SI 137 | cdelt1 = (float(map.meta['cdelt1']) * u.Unit(map.meta['cunit1'])).to(u.meter, equivalencies=radian_length) 138 | cdelt2 = (float(map.meta['cdelt2']) * u.Unit(map.meta['cunit2'])).to(u.meter, equivalencies=radian_length) 139 | crpix1 = (float(map.meta['crpix1']) * u.Unit(map.meta['cunit1'])).to(u.meter, equivalencies=radian_length) 140 | crpix2 = (float(map.meta['crpix2']) * u.Unit(map.meta['cunit2'])).to(u.meter, equivalencies=radian_length) 141 | 142 | # Modify the map header to reflect all these changes 143 | meta = deepcopy(map.meta) 144 | meta['bunit'] = 'Tesla' #units.unit 145 | meta['datamax'] = data.max() 146 | meta['datamin'] = data.min() 147 | # Following modified if we convert x/y-axes 148 | meta['cdelt1'] = str(cdelt1.value) 149 | meta['cdelt2'] = str(cdelt2.value) 150 | meta['cunit1'] = str(cdelt1.unit) 151 | meta['cunit2'] = str(cdelt2.unit) 152 | meta['crpix1'] = str(crpix1.value) 153 | meta['crpix2'] = str(crpix2.value) 154 | #meta['CRVAL1'] = 0.000000 # Reference data coordinates 155 | #meta['CRVAL2'] = 0.000000 156 | 157 | # Return the modified map 158 | return mp.Map((data, meta)) 159 | -------------------------------------------------------------------------------- /solarbextrapolation/visualisation_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jul 27 14:39:00 2015 4 | 5 | @author: alex_ 6 | """ 7 | 8 | 9 | # Universal Imports 10 | import numpy as np 11 | import sunpy.map as mp 12 | from astropy import units as u 13 | 14 | # Visulisation Imports 15 | from mayavi import mlab 16 | from mayavi_seed_streamlines import SeedStreamline, Streamline 17 | from mayavi.tools.sources import vector_field 18 | 19 | # Module Imports 20 | #from classes import * 21 | #from solarbextrapolation.map3dclasses import Map3D 22 | from solarbextrapolation.utilities import decompose_ang_len 23 | 24 | def visualise(aMap3D, **kwargs): 25 | """ 26 | Basic function for visualising a vector field from an extrapolator. 27 | General usage involves passing boundary map and volume vector field and 28 | these are then aligned and plotted in mayavi. 29 | The vector field will be represented by streamlines generated from the 30 | given (or otherwise default) seed points. 31 | The boundary data should be rendered in approbriate colours for the given 32 | map data. 33 | 34 | Parameters 35 | ---------- 36 | 37 | aMap3D : Map3D 38 | The 3D vector field from the extrapolator. 39 | 40 | boo_debug : boolean, optional 41 | If set, turns on logging functionality. 42 | 43 | seeds : numpy.array, optional 44 | If set, provides a list of manual seed points in the 3D vector field. 45 | 46 | boundary : sunpy.map, optional 47 | If set, provides the 2D map to place in the visulisation at the base of 48 | the volume. 49 | 50 | unit_length : `astropy.units.quantity.Quantity`, optional 51 | If set, provides the length of one unit in MayaVi for scaling maps. 52 | 53 | boundary_unit : `astropy.units.quantity.Quantity`, optional 54 | If set, provides a single unit for the x/y-axes of the boundary map. 55 | 56 | boundary_units : list, optional 57 | If set, provides a list of units for the x/y-axes of the boundary map. 58 | 59 | volume_unit : `astropy.units.quantity.Quantity`, optional 60 | If set, provides a single unit for the x/y/z-axes of the 3D vector field. 61 | 62 | volume_units : list, optional 63 | If set, provides a list of units for the x/y/z-axes of the 3D vector field. 64 | 65 | show_boundary_axes : boolean, optional 66 | If set, enables the display of the boundary map axes. 67 | 68 | show_volume_axes : boolean, optional 69 | If set, enables the display of the 3D vector field axes. 70 | 71 | """ 72 | 73 | # Optional parameters 74 | boo_debug = kwargs.get('debug', False) 75 | np_seeds = kwargs.get('seeds', None) 76 | boundary = kwargs.get('boundary', None) 77 | mayavi_unit_length = kwargs.get('unit_length', 1.0 * u.Mm) * 1.0 78 | boundary_unit = kwargs.get('boundary_unit', mayavi_unit_length) * 1.0 79 | boundary_units = kwargs.get('boundary_units', [ boundary_unit, boundary_unit, boundary_unit ]) 80 | volume_unit = kwargs.get('volume_unit', mayavi_unit_length) * 1.0 81 | volume_units = kwargs.get('volume_units', [ volume_unit, volume_unit, volume_unit ]) 82 | show_boundary_axes = kwargs.get('show_boundary_axes', True) 83 | show_volume_axes = kwargs.get('show_volume_axes', True) 84 | 85 | # Setup the arc to length equivilence 86 | obs_distance = aMap3D.dsun - aMap3D.rsun_meters 87 | radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ] 88 | 89 | # Slice (scale) the fields to make the vectors usable in mayavi. 90 | int_slice_scale = 1 91 | print "shape: " + str(aMap3D.data.shape) 92 | npm_3d_sliced = aMap3D.data[::int_slice_scale,::int_slice_scale,::int_slice_scale,:] 93 | 94 | # Plot the main vector field (volume). 95 | fig = mlab.figure() 96 | 97 | # Make 3D coords for ever point in the 3D grid. 98 | x_range = u.Quantity([ decompose_ang_len(aMap3D.xobsrange[0], equivalencies=radian_length), 99 | decompose_ang_len(aMap3D.xobsrange[1], equivalencies=radian_length) ]) 100 | y_range = u.Quantity([ decompose_ang_len(aMap3D.yobsrange[0], equivalencies=radian_length), 101 | decompose_ang_len(aMap3D.yobsrange[1], equivalencies=radian_length) ]) 102 | #z_range = aMap3D.zrange.to(u.meter, equivalencies=radian_length) 103 | z_range = u.Quantity([ decompose_ang_len(aMap3D.zrange[0], equivalencies=radian_length), 104 | decompose_ang_len(aMap3D.zrange[1], equivalencies=radian_length) ]) 105 | x_range_scaled = (x_range/mayavi_unit_length).decompose().value 106 | y_range_scaled = (y_range/mayavi_unit_length).decompose().value 107 | z_range_scaled = (z_range/mayavi_unit_length).decompose().value 108 | X, Y, Z = np.mgrid[x_range_scaled[0]:x_range_scaled[1]:npm_3d_sliced.shape[0]*1j, 109 | y_range_scaled[0]:y_range_scaled[1]:npm_3d_sliced.shape[1]*1j, 110 | z_range_scaled[0]:z_range_scaled[1]:npm_3d_sliced.shape[2]*1j] 111 | vec_field = vector_field(X, Y, Z, npm_3d_sliced[:,:,:,0], npm_3d_sliced[:,:,:,1], npm_3d_sliced[:,:,:,2], 112 | name='Magnetic Vector Field', figure=fig) 113 | vec_field_mag = mlab.pipeline.extract_vector_norm(vec_field, name="Magnetic Field Magnitude") 114 | 115 | # Place a small outline around the data cube 116 | mlab.outline() 117 | 118 | if show_volume_axes: 119 | # Label axes 120 | axes = mlab.axes() 121 | x_range_axis = decompose_ang_len((x_range/volume_units[0]).decompose(), equivalencies=radian_length, working_units=volume_units[0])#(x_range/volume_units[0]).decompose() 122 | y_range_axis = decompose_ang_len((y_range/volume_units[1]).decompose(), equivalencies=radian_length, working_units=volume_units[1])#(y_range/volume_units[1]).decompose() 123 | z_range_axis = decompose_ang_len((z_range/volume_units[2]).decompose(), equivalencies=radian_length, working_units=volume_units[2])#(z_range/volume_units[2]).decompose() 124 | if boo_debug: 125 | print '\n\n' 126 | print 'x_range: ' + str(x_range) 127 | print 'y_range: ' + str(y_range) 128 | print 'z_range: ' + str(z_range) 129 | print '\n\n' 130 | print 'x_range_axis: ' + str(x_range_axis) 131 | print 'y_range_axis: ' + str(y_range_axis) 132 | print 'z_range_axis: ' + str(z_range_axis) 133 | print '\n\n' 134 | print 'x_range_axis[0]: ' + str(x_range_axis[0]) 135 | print 'y_range_axis[0]: ' + str(y_range_axis[0]) 136 | print 'z_range_axis[0]: ' + str(z_range_axis[0]) 137 | 138 | print '\nx_range_axis[0].value: ' + str(x_range_axis[0].value) 139 | print 'x_range_axis[0].unit: ' + str(x_range_axis[0].unit) 140 | print 'type(x_range_axis[0].unit): ' + str(type(x_range_axis[0].unit)) 141 | 142 | axes.axes.ranges = np.array([ x_range_axis[0], x_range_axis[1], y_range_axis[0], y_range_axis[1], z_range_axis[0], z_range_axis[1]]) 143 | axes.axes.use_ranges = True 144 | #axes.axes.ranges = np.array([ 0.0, 10.0, 0.0, 10.0, z_range_axis[0], z_range_axis[1]]) 145 | axes.axes.x_label = 'Solar X (' + unit_label(volume_units[0]) + ')' 146 | axes.axes.y_label = 'Solar Y (' + unit_label(volume_units[1]) + ')' 147 | axes.axes.z_label = 'Z (' + unit_label(volume_units[2]) + ')' 148 | 149 | # Plot the seed points 150 | if np_seeds is None: 151 | # Generate a plane for the streamline seed points 152 | streamline = Streamline() 153 | vec_field_mag.add_child(streamline) 154 | streamline.stream_tracer.integration_direction = 'both' 155 | streamline.seed.widget = streamline.seed.widget_list[2] 156 | streamline.seed.widget.resolution = 10 157 | #streamline.seed.widget.enabled = False 158 | #streamline.seed.widget.interactor = None 159 | 160 | # Some necessary points within the volume 161 | z = (0.15 * (z_range_scaled[1] - z_range_scaled[0])) + z_range_scaled[0] 162 | x_mid = (x_range_scaled[0] + x_range_scaled[1])/2.0 163 | y_mid = (y_range_scaled[0] + y_range_scaled[1])/2.0 164 | 165 | # Orientate, position and scale the plane 166 | streamline.seed.widget.normal_to_z_axis = True 167 | streamline.seed.widget.center = np.array([ x_mid, y_mid, z]) 168 | streamline.seed.widget.point1 = np.array([ x_range_scaled[1], y_range_scaled[0], z]) 169 | streamline.seed.widget.point2 = np.array([ x_range_scaled[0], y_range_scaled[1], z]) 170 | streamline.seed.widget.origin = np.array([ x_range_scaled[0], y_range_scaled[0], z]) 171 | 172 | # Update the render 173 | scene = fig.scene 174 | scene.render() 175 | else: 176 | points = mlab.points3d(np_seeds[:,0], np_seeds[:,1], np_seeds[:,2]) 177 | # Make the points smaller 178 | points.glyph.glyph.scale_factor = 10.0 #mayavi_scale 179 | # Make the points blue 180 | points.actor.property.color = (0.2,0,1) 181 | # Create the custom streamline object 182 | streamline = SeedStreamline(seed_points=np_seeds) 183 | 184 | # Add the streamline object to the plot and make it use the magentic field data, 185 | # by adding it as a child of the field we created earlier. 186 | # We add it to the magnitude field (which is in itself a child of bfield) 187 | # so that it picks up the scalar values and colours the lines. 188 | vec_field_mag.add_child(streamline) 189 | 190 | # Adjust some of the streamline appearance parameters 191 | streamline.module_manager.scalar_lut_manager.lut_mode = 'winter'#'Greys' 192 | streamline.stream_tracer.integration_direction = 'both' 193 | streamline.stream_tracer.maximum_propagation = 500.0 194 | streamline.update_pipeline() # This doesn't seem to work ATM 195 | 196 | 197 | # Add the boundary data 2D map 198 | if boundary: 199 | #x_range = boundary.xrange.to(u.meter, equivalencies=radian_length) 200 | x_range = u.Quantity([ decompose_ang_len(boundary.xrange[0], equivalencies=radian_length), 201 | decompose_ang_len(boundary.xrange[1], equivalencies=radian_length) ]) 202 | if boo_debug: print '\nboundary: x_range: ' + str(x_range) 203 | #y_range = boundary.yrange.to(u.meter, equivalencies=radian_length) 204 | y_range = u.Quantity([ decompose_ang_len(boundary.yrange[0], equivalencies=radian_length), 205 | decompose_ang_len(boundary.yrange[1], equivalencies=radian_length) ]) 206 | x_range_scaled = (x_range/mayavi_unit_length).decompose().value 207 | if boo_debug: print '\nboundary: x_range_scaled: ' + str(x_range_scaled) 208 | y_range_scaled = (y_range/mayavi_unit_length).decompose().value 209 | 210 | # Create explicit points in 3D space 211 | X, Y = np.mgrid[x_range_scaled[0]:x_range_scaled[1]:boundary.data.shape[0]*1j, 212 | y_range_scaled[0]:y_range_scaled[1]:boundary.data.shape[1]*1j] 213 | 214 | # Plot and add to the current figure 215 | img_boundary = mlab.pipeline.array2d_source(X, Y, boundary.data, figure=fig) 216 | img_boundary = mlab.pipeline.image_actor(img_boundary, figure = fig) 217 | 218 | # Color the image according to the data 219 | mayavi_ct = boundary.plot_settings['cmap'](range(255)) 220 | img_boundary.module_manager.scalar_lut_manager.lut.table = mayavi_ct*255 221 | 222 | # Legend details 223 | img_boundary.module_manager.scalar_lut_manager.show_legend = True #module_manager2.scalar_lut_manager.show_legend = True 224 | img_boundary.module_manager.scalar_lut_manager.scalar_bar_representation.position = np.array([ 0.1, 0.1 ]) 225 | 226 | # Place a small outline around the data cube 227 | mlab.outline() 228 | 229 | # Show the axes if selected 230 | if show_boundary_axes: 231 | axes = mlab.axes() 232 | 233 | # Get the ranges of the boundary and scale to the selected units 234 | x_range = boundary.xrange.to(boundary_units[0].unit, equivalencies=radian_length) 235 | y_range = boundary.yrange.to(boundary_units[1].unit, equivalencies=radian_length) 236 | x_range_scaled = (x_range/boundary_units[0]).decompose().value 237 | y_range_scaled = (y_range/boundary_units[1]).decompose().value 238 | 239 | # Update the ranges manually to use custom units for the boundary 240 | axes.axes.ranges = np.array([ x_range_scaled[0], x_range_scaled[1], y_range_scaled[0], y_range_scaled[1], 0, 0]) 241 | axes.axes.use_ranges = True 242 | axes.axes.x_label = 'Solar X (' + unit_label(boundary_units[0]) + ')' 243 | axes.axes.y_label = 'Solar Y (' + unit_label(boundary_units[1]) + ')' 244 | 245 | return fig 246 | 247 | 248 | def unit_label(quantity): 249 | """ 250 | Small function to return a string label that is empty if value is 1.0 or is 251 | the given number otherwise. 252 | """ 253 | if quantity.value == 1.0: 254 | return str(quantity.unit) 255 | return str(quantity) 256 | 257 | --------------------------------------------------------------------------------