├── calcs
├── __init__.py
├── euler
│ ├── __init__.py
│ ├── LICENSE.md
│ ├── synthetic_test.py
│ ├── README.md
│ ├── plot_functions.py
│ ├── euler_python.py
│ └── estimates_statistics.py
├── igrf
│ ├── __init__.py
│ ├── pyIGRF_acknowledgements.md
│ ├── pyIGRF_LICENCE.txt
│ └── SHC_files
│ │ └── IGRF3.SHC
├── worms
│ ├── __init__.py
│ ├── BSDWorms_COPYRIGHT.txt
│ ├── FourierDomainGrid.py
│ └── FftUtils.py
├── GridData_no_pandas.py
├── PSplot.py
├── ConvolutionFilter.py
└── PCAICA.py
├── ArcGIS_Pro
├── __init__.py
├── GeophysicalProcessor.TiltAngle.pyt.xml
├── GeophysicalProcessor.ReductionToPole.pyt.xml
├── GeophysicalProcessor.RemoveRegionalTrend.pyt.xml
├── GeophysicalProcessor.VerticalIntegration.pyt.xml
├── GeophysicalProcessor.TotalHorizontalGradient.pyt.xml
├── GeophysicalProcessor.pyt.xml
├── GeophysicalProcessor.AutomaticGainControl.pyt.xml
├── GeophysicalProcessor.AnalyticSignal.pyt.xml
├── GeophysicalProcessor.DirectionalButterworthBandPass.pyt.xml
├── GeophysicalProcessor.LowPassFilter.pyt.xml
├── GeophysicalProcessor.ComputeDerivative.pyt.xml
├── GeophysicalProcessor.UpwardContinuation.pyt.xml
├── GeophysicalProcessor.DownwardContinuation.pyt.xml
├── GeophysicalProcessor.HighPassFilter.pyt.xml
└── GeophysicalProcessor.BandPassFilter.pyt.xml
├── requirements.txt
├── dialog.png
├── icon.png
├── .gitattributes
├── resources.qrc
├── repository.xml
├── scripts
├── compile-strings.sh
├── run-env-linux.sh
└── update-strings.sh
├── i18n
└── af.ts
├── plugins.xml
├── help
├── source
│ ├── index.rst
│ └── conf.py
├── make.bat
└── Makefile
├── .github
└── dependabot.yml
├── README.txt
├── LICENSE
├── __init__.py
├── README.html
├── SGTool_dockwidget.py
├── pb_tool.cfg
├── .gitignore
├── plugin_upload.py
├── metadata.txt
├── resources.py
├── Makefile
└── pylintrc
/calcs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/calcs/euler/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/calcs/igrf/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/calcs/worms/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | scikit-learn
3 |
--------------------------------------------------------------------------------
/dialog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swaxi/SGTool/HEAD/dialog.png
--------------------------------------------------------------------------------
/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swaxi/SGTool/HEAD/icon.png
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/resources.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | icon.png
4 |
5 |
6 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.TiltAngle.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20250829 17023200 1.0 TRUE
3 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.ReductionToPole.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20251003 18001600 1.0 TRUE
3 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.RemoveRegionalTrend.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20250829 17172000 1.0 TRUE
3 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.VerticalIntegration.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20250829 17075000 1.0 TRUE
3 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.TotalHorizontalGradient.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20250906 12555400 1.0 TRUE
3 |
--------------------------------------------------------------------------------
/repository.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 3.0.0
4 | https://github.com/swaxi/SGTool/archive/refs/heads/main.zip
5 |
6 |
7 |
--------------------------------------------------------------------------------
/scripts/compile-strings.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | LRELEASE=$1
3 | LOCALES=$2
4 |
5 |
6 | for LOCALE in ${LOCALES}
7 | do
8 | echo "Processing: ${LOCALE}.ts"
9 | # Note we don't use pylupdate with qt .pro file approach as it is flakey
10 | # about what is made available.
11 | $LRELEASE i18n/${LOCALE}.ts
12 | done
13 |
--------------------------------------------------------------------------------
/i18n/af.ts:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | @default
5 |
6 |
7 | Good morning
8 | Goeie more
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Simple Potential Field Processing
5 | https://github.com/swaxi/SGTool
6 | 3.34.0
7 | main.zip
8 | Mark Jessell
9 | https://github.com/swaxi/SGTool/archive/refs/heads/main.zip
10 |
11 |
12 |
--------------------------------------------------------------------------------
/help/source/index.rst:
--------------------------------------------------------------------------------
1 | .. SGTool documentation master file, created by
2 | sphinx-quickstart on Sun Feb 12 17:11:03 2012.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to SGTool's documentation!
7 | ============================================
8 |
9 | Contents:
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 |
14 | Indices and tables
15 | ==================
16 |
17 | * :ref:`genindex`
18 | * :ref:`modindex`
19 | * :ref:`search`
20 |
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "github-actions" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 | 20250829 16022300 1.0 TRUE 20250829 171303 c:\program files\arcgis\pro\Resources\Help\gp GeophysicalProcessor ArcToolbox Toolbox
3 |
--------------------------------------------------------------------------------
/scripts/run-env-linux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | QGIS_PREFIX_PATH=/usr/local/qgis-2.0
4 | if [ -n "$1" ]; then
5 | QGIS_PREFIX_PATH=$1
6 | fi
7 |
8 | echo ${QGIS_PREFIX_PATH}
9 |
10 |
11 | export QGIS_PREFIX_PATH=${QGIS_PREFIX_PATH}
12 | export QGIS_PATH=${QGIS_PREFIX_PATH}
13 | export LD_LIBRARY_PATH=${QGIS_PREFIX_PATH}/lib
14 | export PYTHONPATH=${QGIS_PREFIX_PATH}/share/qgis/python:${QGIS_PREFIX_PATH}/share/qgis/python/plugins:${PYTHONPATH}
15 |
16 | echo "QGIS PATH: $QGIS_PREFIX_PATH"
17 | export QGIS_DEBUG=0
18 | export QGIS_LOG_FILE=/tmp/inasafe/realtime/logs/qgis.log
19 |
20 | export PATH=${QGIS_PREFIX_PATH}/bin:$PATH
21 |
22 | echo "This script is intended to be sourced to set up your shell to"
23 | echo "use a QGIS 2.0 built in $QGIS_PREFIX_PATH"
24 | echo
25 | echo "To use it do:"
26 | echo "source $BASH_SOURCE /your/optional/install/path"
27 | echo
28 | echo "Then use the make file supplied here e.g. make guitest"
29 |
--------------------------------------------------------------------------------
/calcs/igrf/pyIGRF_acknowledgements.md:
--------------------------------------------------------------------------------
1 | pyIGRF is partially based on ChaosMagPy by Clemens Kloss (DTU Space, Denmark)
2 |
3 | ChaosMagPy can be found at: https://doi.org/10.5281/zenodo.3352398
4 |
5 | and is based on work by the following authors:
6 |
7 | Finlay, C.C., Olsen, N., Kotsiaros, S., Gillet, N. and Toeffner-Clausen, L. (2016),
8 | Recent geomagnetic secular variation from Swarm and ground observatories
9 | as estimated in the CHAOS-6 geomagnetic field model Earth Planets Space,
10 | Vol 68, 112. doi: 10.1186/s40623-016-0486-1
11 |
12 | Some of the functions were taken from the 2019 IAGA Summer School examples at
13 | https://github.com/MagneticEarth/IAGA_SummerSchool2019/
14 |
15 | This was written by:
16 | - David J. Kerridge (British Geological Survey, UK)
17 |
18 | with contributions from:
19 | - William Brown (British Geological Survey, UK)
20 | - Grace Cox (British Geological Survey, UK)
21 | - Ashley R.A. Smith (University of Edinburgh, UK)
22 |
23 |
--------------------------------------------------------------------------------
/README.txt:
--------------------------------------------------------------------------------
1 | Plugin Builder Results
2 |
3 | Your plugin SGTool was created in:
4 | C:/Users/00073294/Dropbox/WAXI4/gis/SGTool\sgtool
5 |
6 | Your QGIS plugin directory is located at:
7 | C:/Users/00073294/AppData/Roaming/QGIS/QGIS3/profiles/default/python/plugins
8 |
9 | What's Next:
10 |
11 | * Copy the entire directory containing your new plugin to the QGIS plugin
12 | directory
13 |
14 | * Compile the resources file using pyrcc5
15 |
16 | * Run the tests (``make test``)
17 |
18 | * Test the plugin by enabling it in the QGIS plugin manager
19 |
20 | * Customize it by editing the implementation file: ``SGTool.py``
21 |
22 | * Create your own custom icon, replacing the default icon.png
23 |
24 | * Modify your user interface by opening SGTool_dockwidget_base.ui in Qt Designer
25 |
26 | * You can use the Makefile to compile your Ui and resource files when
27 | you make changes. This requires GNU make (gmake)
28 |
29 | For more information, see the PyQGIS Developer Cookbook at:
30 | http://www.qgis.org/pyqgis-cookbook/index.html
31 |
32 | (C) 2011-2018 GeoApt LLC - geoapt.com
33 |
--------------------------------------------------------------------------------
/calcs/GridData_no_pandas.py:
--------------------------------------------------------------------------------
1 | import processing
2 |
3 |
4 | class QGISGridData:
5 | def __init__(self, iface):
6 | self.iface = iface
7 |
8 | def launch_idw_dialog(self, input, zcolumn, cell_size, mask):
9 | """
10 | Launch the v.surf.idw dialog from the Processing Toolbox.
11 | """
12 | pre_filled_params = {
13 | "input": input,
14 | "column": zcolumn,
15 | "GRASS_REGION_CELLSIZE_PARAMETER": cell_size, # cell size from sgtoosl dialog
16 | }
17 | alg_id = "grass7:v.surf.idw"
18 |
19 | processing.execAlgorithmDialog(alg_id, pre_filled_params)
20 |
21 | def launch_multi_bspline_dialog(self, input, zcolumn, cell_size, mask):
22 |
23 | # Set up the parameters you want pre-filled
24 | pre_filled_params = {
25 | "SHAPES": input, # Reference to your input layer
26 | "FIELD": zcolumn, # Z-value field
27 | "TARGET_USER_SIZE": cell_size, # cell size
28 | }
29 |
30 | alg_id = "sagang:multilevelbspline"
31 | processing.execAlgorithmDialog(alg_id, pre_filled_params)
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 markjessell
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/calcs/igrf/pyIGRF_LICENCE.txt:
--------------------------------------------------------------------------------
1 | License
2 | =======
3 |
4 | MIT License
5 |
6 | Copyright (c) 2024 Ciaran Beggan
7 |
8 | Permission is hereby granted, free of charge, to any person obtaining a copy
9 | of this software and associated documentation files (the "Software"), to deal
10 | in the Software without restriction, including without limitation the rights
11 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | copies of the Software, and to permit persons to whom the Software is
13 | furnished to do so, subject to the following conditions:
14 |
15 | The above copyright notice and this permission notice shall be included in all
16 | copies or substantial portions of the Software.
17 |
18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | SOFTWARE.
25 |
--------------------------------------------------------------------------------
/calcs/worms/BSDWorms_COPYRIGHT.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013-2021, Franklin G. Horowitz
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without modification,
5 | are permitted provided that the following conditions are met:
6 |
7 | Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 |
--------------------------------------------------------------------------------
/calcs/euler/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright (c) 2019 Felipe F. Melo and Valéria C.F. Barbosa.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice,
8 | this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above copyright notice,
10 | this list of conditions and the following disclaimer in the documentation
11 | and/or other materials provided with the distribution.
12 | * Neither the names of the copyright holders nor the names of any contributors
13 | may be used to endorse or promote products derived from this software
14 | without specific prior written permission.
15 |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/scripts/update-strings.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | LOCALES=$*
3 |
4 | # Get newest .py files so we don't update strings unnecessarily
5 |
6 | CHANGED_FILES=0
7 | PYTHON_FILES=`find . -regex ".*\(ui\|py\)$" -type f`
8 | for PYTHON_FILE in $PYTHON_FILES
9 | do
10 | CHANGED=$(stat -c %Y $PYTHON_FILE)
11 | if [ ${CHANGED} -gt ${CHANGED_FILES} ]
12 | then
13 | CHANGED_FILES=${CHANGED}
14 | fi
15 | done
16 |
17 | # Qt translation stuff
18 | # for .ts file
19 | UPDATE=false
20 | for LOCALE in ${LOCALES}
21 | do
22 | TRANSLATION_FILE="i18n/$LOCALE.ts"
23 | if [ ! -f ${TRANSLATION_FILE} ]
24 | then
25 | # Force translation string collection as we have a new language file
26 | touch ${TRANSLATION_FILE}
27 | UPDATE=true
28 | break
29 | fi
30 |
31 | MODIFICATION_TIME=$(stat -c %Y ${TRANSLATION_FILE})
32 | if [ ${CHANGED_FILES} -gt ${MODIFICATION_TIME} ]
33 | then
34 | # Force translation string collection as a .py file has been updated
35 | UPDATE=true
36 | break
37 | fi
38 | done
39 |
40 | if [ ${UPDATE} == true ]
41 | # retrieve all python files
42 | then
43 | echo ${PYTHON_FILES}
44 | # update .ts
45 | echo "Please provide translations by editing the translation files below:"
46 | for LOCALE in ${LOCALES}
47 | do
48 | echo "i18n/"${LOCALE}".ts"
49 | # Note we don't use pylupdate with qt .pro file approach as it is flakey
50 | # about what is made available.
51 | pylupdate4 -noobsolete ${PYTHON_FILES} -ts i18n/${LOCALE}.ts
52 | done
53 | else
54 | echo "No need to edit any translation files (.ts) because no python files"
55 | echo "has been updated since the last update translation. "
56 | fi
57 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | /***************************************************************************
4 | SGTool
5 | A QGIS plugin
6 | Simple Potential Field Processing
7 | Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
8 | -------------------
9 | begin : 2024-11-17
10 | copyright : (C) 2024 by Mark Jessell
11 | email : mark.jessell@uwa.edu.au
12 | git sha : $Format:%H$
13 | ***************************************************************************/
14 |
15 | /***************************************************************************
16 | * *
17 | * This program is free software; you can redistribute it and/or modify *
18 | * it under the terms of the GNU General Public License as published by *
19 | * the Free Software Foundation; either version 2 of the License, or *
20 | * (at your option) any later version. *
21 | * *
22 | ***************************************************************************/
23 | This script initializes the plugin, making it known to QGIS.
24 | """
25 |
26 |
27 | # noinspection PyPep8Naming
28 | def classFactory(iface): # pylint: disable=invalid-name
29 | """Load SGTool class from file SGTool.
30 |
31 | :param iface: A QGIS interface instance.
32 | :type iface: QgsInterface
33 | """
34 | #
35 | from .SGTool import SGTool
36 |
37 | return SGTool(iface)
38 |
--------------------------------------------------------------------------------
/README.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Plugin Builder Results
4 |
5 | Congratulations! You just built a plugin for QGIS!
6 |
7 |
8 | Your plugin
SGTool was created in:
9 |
C:/Users/00073294/Dropbox/WAXI4/gis/SGTool\sgtool
10 |
11 | Your QGIS plugin directory is located at:
12 | C:/Users/00073294/AppData/Roaming/QGIS/QGIS3/profiles/default/python/plugins
13 |
14 |
What's Next
15 |
16 | In your plugin directory, compile the resources file using pyrcc5 (simply run make if you have automake or use pb_tool )
17 | Test the generated sources using make test (or run tests from your IDE)
18 | Copy the entire directory containing your new plugin to the QGIS plugin directory (see Notes below)
19 | Test the plugin by enabling it in the QGIS plugin manager
20 | Customize it by editing the implementation file SGTool.py
21 | Create your own custom icon, replacing the default icon.png
22 | Modify your user interface by opening SGTool_dockwidget_base.ui in Qt Designer
23 |
24 | Notes:
25 |
26 | You can use the Makefile to compile and deploy when you
27 | make changes. This requires GNU make (gmake). The Makefile is ready to use, however you
28 | will have to edit it to add addional Python source files, dialogs, and translations.
29 | You can also use pb_tool to compile and deploy your plugin. Tweak the pb_tool.cfg file included with your plugin as you add files. Install pb_tool using
30 | pip or easy_install . See http://loc8.cc/pb_tool for more information.
31 |
32 |
33 |
34 |
35 | For information on writing PyQGIS code, see http://loc8.cc/pyqgis_resources for a list of resources.
36 |
37 |
38 |
39 | ©2011-2018 GeoApt LLC - geoapt.com
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/SGTool_dockwidget.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | /***************************************************************************
4 | SGToolDockWidget
5 | A QGIS plugin
6 | Simple Potential Field Processing
7 | Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
8 | -------------------
9 | begin : 2024-11-17
10 | git sha : $Format:%H$
11 | copyright : (C) 2024 by Mark Jessell
12 | email : mark.jessell@uwa.edu.au
13 | ***************************************************************************/
14 |
15 | /***************************************************************************
16 | * *
17 | * This program is free software; you can redistribute it and/or modify *
18 | * it under the terms of the GNU General Public License as published by *
19 | * the Free Software Foundation; either version 2 of the License, or *
20 | * (at your option) any later version. *
21 | * *
22 | ***************************************************************************/
23 | """
24 |
25 | import os
26 |
27 | from qgis.PyQt import QtGui, QtWidgets, uic
28 | from qgis.PyQt.QtCore import pyqtSignal
29 |
30 | FORM_CLASS, _ = uic.loadUiType(
31 | os.path.join(os.path.dirname(__file__), "SGTool_dockwidget_base.ui")
32 | )
33 |
34 |
35 | class SGToolDockWidget(QtWidgets.QDockWidget, FORM_CLASS):
36 |
37 | closingPlugin = pyqtSignal()
38 |
39 | def __init__(self, parent=None):
40 | """Constructor."""
41 | super(SGToolDockWidget, self).__init__(parent)
42 | # Set up the user interface from Designer.
43 | # After setupUI you can access any designer object by doing
44 | # self., and you can use autoconnect slots - see
45 | # http://doc.qt.io/qt-5/designer-using-a-ui-file.html
46 | # #widgets-and-dialogs-with-auto-connect
47 | self.setupUi(self)
48 |
49 | def closeEvent(self, event):
50 | self.closingPlugin.emit()
51 | event.accept()
52 |
--------------------------------------------------------------------------------
/pb_tool.cfg:
--------------------------------------------------------------------------------
1 | #/***************************************************************************
2 | # SGTool
3 | #
4 | # Configuration file for plugin builder tool (pb_tool)
5 | # Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
6 | # -------------------
7 | # begin : 2024-11-17
8 | # copyright : (C) 2024 by Mark Jessell
9 | # email : mark.jessell@uwa.edu.au
10 | # ***************************************************************************/
11 | #
12 | #/***************************************************************************
13 | # * *
14 | # * This program is free software; you can redistribute it and/or modify *
15 | # * it under the terms of the GNU General Public License as published by *
16 | # * the Free Software Foundation; either version 2 of the License, or *
17 | # * (at your option) any later version. *
18 | # * *
19 | # ***************************************************************************/
20 | #
21 | #
22 | # You can install pb_tool using:
23 | # pip install http://geoapt.net/files/pb_tool.zip
24 | #
25 | # Consider doing your development (and install of pb_tool) in a virtualenv.
26 | #
27 | # For details on setting up and using pb_tool, see:
28 | # http://g-sherman.github.io/plugin_build_tool/
29 | #
30 | # Issues and pull requests here:
31 | # https://github.com/g-sherman/plugin_build_tool:
32 | #
33 | # Sane defaults for your plugin generated by the Plugin Builder are
34 | # already set below.
35 | #
36 | # As you add Python source files and UI files to your plugin, add
37 | # them to the appropriate [files] section below.
38 |
39 | [plugin]
40 | # Name of the plugin. This is the name of the directory that will
41 | # be created in .qgis2/python/plugins
42 | name: SGTool
43 |
44 | # Full path to where you want your plugin directory copied. If empty,
45 | # the QGIS default path will be used. Don't include the plugin name in
46 | # the path.
47 | plugin_path:
48 |
49 | [files]
50 | # Python files that should be deployed with the plugin
51 | python_files: __init__.py SGTool.py SGTool_dockwidget.py
52 |
53 | # The main dialog file that is loaded (not compiled)
54 | main_dialog: SGTool_dockwidget_base.ui
55 |
56 | # Other ui files for dialogs you create (these will be compiled)
57 | compiled_ui_files:
58 |
59 | # Resource file(s) that will be compiled
60 | resource_files: resources.qrc
61 |
62 | # Other files required for the plugin
63 | extras: metadata.txt icon.png
64 |
65 | # Other directories to be deployed with the plugin.
66 | # These must be subdirectories under the plugin directory
67 | extra_dirs:
68 |
69 | # ISO code(s) for any locales (translations), separated by spaces.
70 | # Corresponding .ts files must exist in the i18n directory
71 | locales:
72 |
73 | [help]
74 | # the built help directory that should be deployed with the plugin
75 | dir: help/build/html
76 | # the name of the directory to target in the deployed plugin
77 | target: help
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.AutomaticGainControl.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 20250820
6 | 15495500
7 | 1.0
8 | TRUE
9 | 20250820
10 | 16055000
11 |
12 | 150000000
13 | 5000
14 |
15 | ItemDescription
16 |
17 |
18 | c:\program files\arcgis\pro\Resources\Help\gp
19 |
20 |
21 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input geophysical raster for AGC normalization processing.</SPAN></P></DIV></DIV>
22 |
23 |
24 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Size of the moving window for local amplitude normalization. Larger windows provide more smoothing.</SPAN></P></DIV></DIV>
25 |
26 |
27 |
28 | Apply AGC normalization to highlight low amplitude features and balance survey data
29 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Automatic Gain Control (AGC) normalizes local amplitude variations to highlight subtle features that might be masked by stronger anomalies. Each pixel is normalized by the local RMS amplitude within a moving window.</SPAN></P><P><SPAN>Applications:</SPAN></P><P><SPAN>• Highlighting weak anomalies in the presence of strong ones</SPAN></P><P><SPAN>• Balancing amplitude variations across surveys</SPAN></P><P><SPAN>• Enhancing subtle geological features</SPAN></P><P><SPAN>• Data preparation for interpretation</SPAN></P></DIV></DIV>
30 |
31 |
32 |
33 | Automatic Gain Control
34 |
35 |
36 | automatic gain control
37 | AGC
38 | amplitude normalization
39 | enhancement
40 | weak anomalies
41 | amplitude balancing
42 |
43 |
44 |
45 |
46 |
47 | ArcToolbox Tool
48 |
49 |
50 |
51 |
52 |
53 |
54 | 20250820
55 |
--------------------------------------------------------------------------------
/calcs/worms/FourierDomainGrid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class FourierDomainGrid(object):
4 | """Deal with Fourier Domain entities and Operators.
5 |
6 | Usage example (and Doctest)
7 |
8 | >>> foo = FourierDomainGrid()
9 | >>> assert isinstance(foo,FourierDomainGrid)
10 |
11 | >>> assert (foo.spatial_grid == None)
12 |
13 | >>> grid = np.zeros((512,512),dtype=complex)
14 | >>> foo.setSpatialGrid(grid)
15 | >>> assert np.allclose(foo.spatial_grid,grid)
16 | >>> assert foo.spatial_grid.dtype == np.dtype(complex)
17 |
18 | >>> foo.setHatGrid(grid)
19 | >>> assert np.allclose(foo.hat_grid,grid)
20 | >>> assert foo.hat_grid.dtype == np.dtype(complex)
21 |
22 | # Testing that the FFT of a delta function at the origin is 1 in the transform domain
23 | # The origin of coordinates from the viewpoint of the FFT is [0,0]
24 | >>> foo.spatial_grid[0,0] = 1.+0.j
25 | >>> foo.setHatGrid(foo.simpleFFT(foo.spatial_grid))
26 | >>> assert np.allclose(foo.hat_grid,(1.+0j))
27 | >>> assert np.allclose(foo.simpleIFFT(foo.hat_grid),foo.spatial_grid)
28 |
29 | # Testing that the IFT of a delta function at the origin is 1 in the REAL domain
30 | # The origin of coordinates from the viewpoint of the IFT is [0,0]
31 | >>> hat_grid = np.zeros((512,512),dtype=complex)
32 | >>> hat_grid[0,0] = (1.+0.j)
33 | >>> foo.setSpatialGrid(foo.simpleIFFT(hat_grid))
34 | >>> assert np.allclose(foo.spatial_grid,(1.+0j)/(512.*512.))
35 | >>> assert np.allclose(foo.simpleFFT(foo.spatial_grid),hat_grid)
36 |
37 | >>> foo.buildWavenumbers(grid)
38 | >>> assert foo.grid_shape == (512,512)
39 | >>> assert np.allclose(foo.grid_x_len, 512)
40 | >>> assert np.allclose(foo.grid_y_len, 512)
41 | >>> assert np.allclose(foo.kx[1], 1./512.)
42 | >>> assert np.allclose(foo.ky[1], 1./512.)
43 | >>> assert np.allclose(max(foo.kx), 0.5- (1./512.))
44 | >>> assert np.allclose(min(foo.kx), -0.5)
45 | >>> assert foo.kx[0] == 0.0
46 | >>> assert foo.kx[0] == 0.0
47 | >>> assert len(foo.kx) == 512
48 | >>> assert len(foo.ky) == 512
49 | """
50 |
51 | def __init__(self,dx=1.0,dy=1.0):
52 | self.spatial_grid = None
53 | self.dx = dx
54 | self.dy = dy
55 |
56 | def buildWavenumbers(self,grid):
57 | ''' Get kx and ky based on size of 2d input grid
58 | '''
59 | self.grid_shape = np.shape(grid)
60 | ''' Output: Tuple (rows, columns)'''
61 | self.grid_x_len = self.grid_shape[1]
62 | self.grid_y_len = self.grid_shape[0]
63 | ''' fftfreq returns the DFT sample frequencies'''
64 | self.kx = np.fft.fftfreq(self.grid_x_len, d=self.dx)
65 | self.ky = np.fft.fftfreq(self.grid_y_len, d=self.dy)
66 |
67 | def setSpatialGrid(self,grid):
68 | """Setter for spatial_grid
69 | """
70 | self.spatial_grid = grid
71 |
72 | def setHatGrid(self,grid):
73 | """Setter for hat_grid (wavenumber domain)
74 | """
75 | self.hat_grid = grid
76 |
77 | def simpleFFT(self,spatial_grid):
78 | """ Perform a simple FFT without pre-conditioning
79 | Input: complex; Output: complex
80 | """
81 | return np.fft.fft2(spatial_grid)
82 |
83 | def simpleIFFT(self,hat_grid):
84 | """ Perform a simple inverse FFT without pre-conditioning
85 | Input: complex; Output: complex
86 | """
87 | return np.fft.ifft2(hat_grid)
88 |
89 | if __name__ == '__main__':
90 | import doctest
91 | doctest.testmod()
--------------------------------------------------------------------------------
/calcs/euler/synthetic_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Synthetic test 1
3 |
4 | A Python program to compute the Synthetic test 1
5 | Distinct SIs and strong nonlinear magnetic base level
6 |
7 | This code is released from the paper:
8 | Reliable Euler deconvolution estimates throughout the
9 | vertical derivatives of the total-field anomaly
10 |
11 | The program is under the conditions terms in the file README.txt
12 |
13 | authors: Felipe F. Melo and Valeria C.F. Barbosa, 2019
14 | email: felipe146@hotmail.com, valcris@on.br
15 | """
16 |
17 | """
18 | Input:
19 |
20 | input/synthetic_data.dat - 2d-array with "n" rows by 4 columns:
21 | x-coordinate, y-coordinate, z-coordinate, anomaly. Where "n" rows
22 | correspond to the size of the data.
23 |
24 | Parameters:
25 |
26 | Size of the moving data window:
27 | winsize - an odd integer number.
28 | Ex.: for a moving data window of 5 x 5 grid points -> winsize = 5
29 |
30 | Percentage of the solutions that will be keep:
31 | filt - a float number ranging from 0.0 to 1.0.
32 | Ex.: to keep 10% of the solutions -> filt = 0.1
33 |
34 | Structural indices used:
35 | SI_vet - an array that can store any of the four SIs.
36 | Ex.: to test only the SI = 1 -> SI_vet = [1]
37 | to test the four SIs -> SI_vet = [0.01,1,2,3]
38 |
39 | The areas to compute the statistics about the mean of the northing,
40 | easting and depth estimates:
41 | area_cla - array defining the four vertices of a polygon
42 | [south,north,west,east]
43 | """
44 |
45 | import numpy as np
46 | import plot_functions as plt_fc
47 | import euler_python as euler
48 | import estimates_statistics as est_stats
49 |
50 | # Input data
51 | data_input=np.loadtxt('input/synthetic_data.dat')
52 | shape = (120, 140)
53 | area = [0, 24000, 0, 28000]
54 | xi=data_input[:,0]
55 | yi=data_input[:,1]
56 | zi=data_input[:,2]
57 | data=data_input[:,3]
58 |
59 | '''
60 | Plot input data - Figure 2d
61 | '''
62 | plt_fc.plot_input_data(data,xi,yi,zi,shape)
63 | '''
64 | These are the two parameters of our methodology for Euler deconvolution:
65 | window size and the percentage of solutions to keep
66 | '''
67 | #moving data window size
68 | winsize=7
69 | #percentage of the solutions that will be keep
70 | filt=0.1
71 | #empty array for multiple SIs
72 | est_classic=[]
73 | #Define below the SIs to be tested
74 | SI_vet=[0.001,1,2,3]
75 | '''
76 | Euler deconvolution for multiple SIs
77 | '''
78 | for SI in (SI_vet):
79 | classic = euler.euler_deconv(data,xi,yi,zi,shape,area,SI,winsize,
80 | filt)
81 | est_classic.append(classic)
82 | #Here finishes Euler deconvolution
83 | '''
84 | Plot Figures 4 and 7 - Selected depth and base level estimates for all SIs
85 | '''
86 | plt_fc.plot_classic(data,est_classic,xi,yi,zi,shape)
87 | '''
88 | Areas used to get the statistics - Defined after the classic plot
89 | south,north,west,east
90 | '''
91 | area_cla0=[0.,25000,24000,28000]
92 | area_cla1=[9200,25000,15000,20000]
93 | area_cla2=[14000,18000,5000,10000]
94 | area_cla3=[5000,8000,5000,8000]
95 |
96 | est_stats.classic(est_classic,area_cla0,SI_vet,'classic_plt0')
97 | est_stats.classic(est_classic,area_cla1,SI_vet,'classic_plt1')
98 | est_stats.classic(est_classic,area_cla2,SI_vet,'classic_plt2')
99 | est_stats.classic(est_classic,area_cla3,SI_vet,'classic_plt3')
100 |
101 |
102 | """
103 | To save the estimates in a txt file:
104 |
105 | #convert the list to an array
106 | output=np.asarray(est_classic)
107 | #save the estimates in distinct files according to the SI
108 | for i in range(len(SI_vet)):
109 | np.savetxt("estimates_SI_" + str(i) + ".txt",output[i],delimiter=",")
110 | """
111 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.AnalyticSignal.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input magnetic or gravity raster dataset for analytic signal calculation.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Buffer zone size for edge effect reduction during derivative calculations.</SPAN></P></DIV></DIV>
24 |
25 |
26 |
27 | Calculate analytic signal amplitude for source location and edge detection
28 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>The analytic signal amplitude is calculated as sqrt(dx² + dy² + dz²) where dx, dy, dz are the first derivatives in x, y, and z directions. This transformation produces anomalies that are symmetric and directly over their causative sources.</SPAN></P><P><SPAN>Advantages:</SPAN></P><P><SPAN>• Independent of magnetic field direction</SPAN></P><P><SPAN>• Independent of source magnetization direction</SPAN></P><P><SPAN>• Peaks directly over source edges and contacts</SPAN></P><P><SPAN>• Always positive values</SPAN></P></DIV></DIV>
29 |
30 |
31 |
32 | Analytic Signal
33 |
34 | The analytic signal amplitude transforms geophysical data to locate sources and edges independent of magnetic field direction and source magnetization. Results are always positive.
35 | Source location, edge detection, and structural interpretation in magnetic and gravity surveys.
36 | Nabighian (1972, 1984), Roest et al. (1992)
37 |
38 | analytic signal
39 | AS
40 | amplitude
41 | source location
42 | edge detection
43 | magnetic
44 | gravity
45 | derivatives
46 |
47 | Works best with magnetic data but can be applied to gravity data. May amplify noise - consider filtering input data first. Results are always positive values.
48 |
49 |
50 |
51 |
52 | ArcToolbox Tool
53 |
54 |
55 |
56 |
57 |
58 |
59 | 20250820
60 |
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 | .DS_Store
162 |
163 |
--------------------------------------------------------------------------------
/plugin_upload.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """This script uploads a plugin package to the plugin repository.
4 | Authors: A. Pasotti, V. Picavet
5 | git sha : $TemplateVCSFormat
6 | """
7 |
8 | import sys
9 | import getpass
10 | import xmlrpc.client
11 | from optparse import OptionParser
12 |
13 | standard_library.install_aliases()
14 |
15 | # Configuration
16 | PROTOCOL = 'https'
17 | SERVER = 'plugins.qgis.org'
18 | PORT = '443'
19 | ENDPOINT = '/plugins/RPC2/'
20 | VERBOSE = False
21 |
22 |
23 | def main(parameters, arguments):
24 | """Main entry point.
25 |
26 | :param parameters: Command line parameters.
27 | :param arguments: Command line arguments.
28 | """
29 | address = "{protocol}://{username}:{password}@{server}:{port}{endpoint}".format(
30 | protocol=PROTOCOL,
31 | username=parameters.username,
32 | password=parameters.password,
33 | server=parameters.server,
34 | port=parameters.port,
35 | endpoint=ENDPOINT)
36 | print("Connecting to: %s" % hide_password(address))
37 |
38 | server = xmlrpc.client.ServerProxy(address, verbose=VERBOSE)
39 |
40 | try:
41 | with open(arguments[0], 'rb') as handle:
42 | plugin_id, version_id = server.plugin.upload(
43 | xmlrpc.client.Binary(handle.read()))
44 | print("Plugin ID: %s" % plugin_id)
45 | print("Version ID: %s" % version_id)
46 | except xmlrpc.client.ProtocolError as err:
47 | print("A protocol error occurred")
48 | print("URL: %s" % hide_password(err.url, 0))
49 | print("HTTP/HTTPS headers: %s" % err.headers)
50 | print("Error code: %d" % err.errcode)
51 | print("Error message: %s" % err.errmsg)
52 | except xmlrpc.client.Fault as err:
53 | print("A fault occurred")
54 | print("Fault code: %d" % err.faultCode)
55 | print("Fault string: %s" % err.faultString)
56 |
57 |
58 | def hide_password(url, start=6):
59 | """Returns the http url with password part replaced with '*'.
60 |
61 | :param url: URL to upload the plugin to.
62 | :type url: str
63 |
64 | :param start: Position of start of password.
65 | :type start: int
66 | """
67 | start_position = url.find(':', start) + 1
68 | end_position = url.find('@')
69 | return "%s%s%s" % (
70 | url[:start_position],
71 | '*' * (end_position - start_position),
72 | url[end_position:])
73 |
74 |
75 | if __name__ == "__main__":
76 | parser = OptionParser(usage="%prog [options] plugin.zip")
77 | parser.add_option(
78 | "-w", "--password", dest="password",
79 | help="Password for plugin site", metavar="******")
80 | parser.add_option(
81 | "-u", "--username", dest="username",
82 | help="Username of plugin site", metavar="user")
83 | parser.add_option(
84 | "-p", "--port", dest="port",
85 | help="Server port to connect to", metavar="80")
86 | parser.add_option(
87 | "-s", "--server", dest="server",
88 | help="Specify server name", metavar="plugins.qgis.org")
89 | options, args = parser.parse_args()
90 | if len(args) != 1:
91 | print("Please specify zip file.\n")
92 | parser.print_help()
93 | sys.exit(1)
94 | if not options.server:
95 | options.server = SERVER
96 | if not options.port:
97 | options.port = PORT
98 | if not options.username:
99 | # interactive mode
100 | username = getpass.getuser()
101 | print("Please enter user name [%s] :" % username, end=' ')
102 |
103 | res = input()
104 | if res != "":
105 | options.username = res
106 | else:
107 | options.username = username
108 | if not options.password:
109 | # interactive mode
110 | options.password = getpass.getpass()
111 | main(options, args)
112 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.DirectionalButterworthBandPass.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 20250820
6 | 15495500
7 | 1.0
8 | TRUE
9 | 20250820
10 | 16055000
11 |
12 | 150000000
13 | 5000
14 |
15 | ItemDescription
16 |
17 |
18 | c:\program files\arcgis\pro\Resources\Help\gp
19 |
20 |
21 |
22 |
23 |
24 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Primary direction to emphasize (0-360 degrees, 0=North, clockwise positive).</SPAN></P></DIV></DIV>
25 |
26 |
27 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Angular width of directional filter. Wider values include more orientations.</SPAN></P></DIV></DIV>
28 |
29 |
30 |
31 |
32 |
33 | Combined directional and band-pass filtering with Butterworth transitions
34 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Combines band-pass and directional filtering to enhance linear features in specific orientations within defined wavelength ranges. Uses Butterworth filter design for smooth frequency transitions.</SPAN></P><P><SPAN>Excellent for: Fault and dyke mapping, structural trend analysis, linear anomaly enhancement.</SPAN></P></DIV></DIV>
35 |
36 |
37 |
38 | Directional Butterworth Band Pass
39 |
40 |
41 | directional filter
42 | butterworth
43 | band pass
44 | structural
45 | linear features
46 | orientation
47 |
48 |
49 |
50 |
51 |
52 | ArcToolbox Tool
53 |
54 |
55 |
56 |
57 |
58 |
59 | 20250820
60 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.LowPassFilter.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input geophysical raster dataset for low-pass filtering.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Wavelengths shorter than this will be removed (noise removal). Typical values: 2000-10000 meters.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Transition zone width to reduce ringing artifacts. Start with cutoff value and adjust as needed.</SPAN></P></DIV></DIV>
27 |
28 |
29 |
30 |
31 | Remove short wavelength noise to enhance regional trends
32 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Low-pass filtering removes short-wavelength noise while preserving longer-wavelength features. This smooths data and enhances regional geological trends.</SPAN></P><P><SPAN>Applications:</SPAN></P><P><SPAN>• Noise reduction and data smoothing</SPAN></P><P><SPAN>• Regional trend enhancement</SPAN></P><P><SPAN>• Data preparation for interpretation</SPAN></P><P><SPAN>• Cross-survey leveling</SPAN></P><P><SPAN>Similar to upward continuation but provides more control over frequency response.</SPAN></P></DIV></DIV>
33 |
34 |
35 |
36 | Low Pass Filter
37 |
38 | Low-pass filtering removes short-wavelength noise while preserving regional trends. Provides controlled data smoothing and noise reduction.
39 | Noise reduction, data smoothing, and regional trend enhancement in geophysical surveys.
40 | Standard Fourier domain filtering methods
41 |
42 | low pass
43 | LP
44 | noise reduction
45 | smoothing
46 | regional
47 | filtering
48 | wavelength
49 | geophysics
50 |
51 | Choose cutoff wavelength based on noise characteristics and desired smoothing level. Too aggressive filtering may remove geological signal.
52 |
53 |
54 |
55 |
56 | ArcToolbox Tool
57 |
58 |
59 |
60 |
61 |
62 |
63 | 20250820
64 |
65 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.ComputeDerivative.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input magnetic or gravity raster dataset for upward continuation processing.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Height above current observation level. Larger values provide more smoothing and noise reduction.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Buffer zone size for edge effect reduction. Recommend 20% of grid dimensions.</SPAN></P></DIV></DIV>
27 |
28 |
29 |
30 | Upward continue magnetic/gravity data to higher observation levels for noise reduction and regional enhancement
31 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Upward continuation simulates the magnetic or gravity field at a higher observation level. This process attenuates high-frequency noise and short-wavelength anomalies while preserving regional trends. The tool applies an exponential decay filter exp(-k*height) in the frequency domain.</SPAN></P><P><SPAN>Common applications:</SPAN></P><P><SPAN>• Noise reduction in geophysical surveys</SPAN></P><P><SPAN>• Regional/residual separation</SPAN></P><P><SPAN>• Preparation for derivative calculations</SPAN></P><P><SPAN>• Cross-survey leveling and merging</SPAN></P></DIV></DIV>
32 |
33 |
34 |
35 | Upward Continuation
36 |
37 | Upward continuation simulates magnetic or gravity field measurements at higher observation levels. This frequency domain technique attenuates high-frequency noise while preserving regional trends.
38 | Noise reduction, regional enhancement, and data preparation for geophysical interpretation workflows.
39 | Based on potential field theory (Blakely, 1995 - Potential Theory in Gravity and Magnetic Applications)
40 |
41 | upward continuation
42 | magnetic
43 | gravity
44 | noise reduction
45 | regional
46 | filtering
47 | geophysics
48 |
49 | Requires gridded magnetic or gravity data. Large continuation heights may over-smooth data. Buffer size should be approximately 20% of grid dimensions for optimal edge effect reduction.
50 |
51 |
52 |
53 |
54 | ArcToolbox Tool
55 |
56 |
57 |
58 |
59 |
60 |
61 | 20250820
62 |
63 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.UpwardContinuation.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input magnetic or gravity raster dataset for upward continuation processing.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Height above current observation level. Larger values provide more smoothing and noise reduction.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Buffer zone size for edge effect reduction. Recommend 20% of grid dimensions.</SPAN></P></DIV></DIV>
27 |
28 |
29 |
30 | Upward continue magnetic/gravity data to higher observation levels for noise reduction and regional enhancement
31 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Upward continuation simulates the magnetic or gravity field at a higher observation level. This process attenuates high-frequency noise and short-wavelength anomalies while preserving regional trends. The tool applies an exponential decay filter exp(-k*height) in the frequency domain.</SPAN></P><P><SPAN>Common applications:</SPAN></P><P><SPAN>• Noise reduction in geophysical surveys</SPAN></P><P><SPAN>• Regional/residual separation</SPAN></P><P><SPAN>• Preparation for derivative calculations</SPAN></P><P><SPAN>• Cross-survey leveling and merging</SPAN></P></DIV></DIV>
32 |
33 |
34 |
35 | Upward Continuation
36 |
37 | Upward continuation simulates magnetic or gravity field measurements at higher observation levels. This frequency domain technique attenuates high-frequency noise while preserving regional trends.
38 | Noise reduction, regional enhancement, and data preparation for geophysical interpretation workflows.
39 | Based on potential field theory (Blakely, 1995 - Potential Theory in Gravity and Magnetic Applications)
40 |
41 | upward continuation
42 | magnetic
43 | gravity
44 | noise reduction
45 | regional
46 | filtering
47 | geophysics
48 |
49 | Requires gridded magnetic or gravity data. Large continuation heights may over-smooth data. Buffer size should be approximately 20% of grid dimensions for optimal edge effect reduction.
50 |
51 |
52 |
53 |
54 | ArcToolbox Tool
55 |
56 |
57 |
58 |
59 |
60 |
61 | 20250820
62 |
63 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.DownwardContinuation.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input magnetic or gravity raster dataset for downward continuation processing.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Downward continuation distance. Use small values as this process amplifies noise significantly.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Buffer zone size for edge effect reduction.</SPAN></P></DIV></DIV>
27 |
28 |
29 |
30 | Downward continue magnetic/gravity data to enhance shallow sources (use with caution - amplifies noise)
31 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Downward continuation enhances high-frequency signals and shallow anomalies by simulating the field at a lower observation level. This process is inherently unstable and dramatically amplifies noise.</SPAN></P><P><SPAN>WARNING: Use very small continuation heights (typically <200m).</SPAN></P><P><SPAN>Applications:</SPAN></P><P><SPAN>• Enhance shallow geological features</SPAN></P><P><SPAN>• Increase resolution of near-surface anomalies</SPAN></P><P><SPAN>• Should only be applied to high-quality, low-noise data</SPAN></P></DIV></DIV>
32 |
33 |
34 |
35 | Downward Continuation
36 |
37 | Downward continuation enhances high-frequency signals by simulating field measurements at lower observation levels. This unstable process dramatically amplifies noise.
38 | Enhancement of shallow geological features and near-surface anomaly resolution in high-quality geophysical data.
39 | Based on potential field theory with stabilization techniques
40 |
41 | downward continuation
42 | magnetic
43 | gravity
44 | enhancement
45 | shallow sources
46 | high frequency
47 | geophysics
48 |
49 | WARNING: Amplifies noise significantly. Use only on high-quality, low-noise data with small continuation heights (<200m). Results may be unstable for noisy datasets.
50 |
51 |
52 |
53 |
54 | ArcToolbox Tool
55 |
56 |
57 |
58 |
59 |
60 |
61 | 20250820
62 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.HighPassFilter.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input geophysical raster dataset for high-pass filtering.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Wavelengths longer than this will be removed (regional removal). Typical values: 10000-50000 meters.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Transition zone width to reduce ringing artifacts. Start with cutoff value and adjust as needed.</SPAN></P></DIV></DIV>
27 |
28 |
29 |
30 |
31 | Remove long wavelength regional trends to enhance local anomalies
32 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>High-pass filtering removes long-wavelength regional trends while preserving shorter-wavelength anomalies. This enhances local geological features and removes broad regional effects.</SPAN></P><P><SPAN>Applications:</SPAN></P><P><SPAN>• Regional/residual separation</SPAN></P><P><SPAN>• Enhancement of local anomalies</SPAN></P><P><SPAN>• Removal of survey-scale trends</SPAN></P><P><SPAN>• Preparation for derivative calculations</SPAN></P><P><SPAN>Transition width creates smooth cutoffs to reduce ringing artifacts.</SPAN></P></DIV></DIV>
33 |
34 |
35 |
36 | High Pass Filter
37 |
38 | High-pass filtering removes long-wavelength regional trends while preserving local anomalies. Essential for regional/residual separation in geophysical surveys.
39 | Regional trend removal and enhancement of local geological features in magnetic and gravity data.
40 | Standard Fourier domain filtering methods
41 |
42 | high pass
43 | HP
44 | regional removal
45 | filtering
46 | residual
47 | local anomalies
48 | wavelength
49 | geophysics
50 |
51 | Choose cutoff wavelength based on regional geology and survey scale. Transition width reduces ringing but may blur boundaries. Test parameters on data subsets.
52 |
53 |
54 |
55 |
56 | ArcToolbox Tool
57 |
58 |
59 |
60 |
61 |
62 |
63 | 20250820
64 |
--------------------------------------------------------------------------------
/help/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | REM Command file for Sphinx documentation
4 |
5 | if "%SPHINXBUILD%" == "" (
6 | set SPHINXBUILD=sphinx-build
7 | )
8 | set BUILDDIR=build
9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
10 | if NOT "%PAPER%" == "" (
11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
12 | )
13 |
14 | if "%1" == "" goto help
15 |
16 | if "%1" == "help" (
17 | :help
18 | echo.Please use `make ^` where ^ is one of
19 | echo. html to make standalone HTML files
20 | echo. dirhtml to make HTML files named index.html in directories
21 | echo. singlehtml to make a single large HTML file
22 | echo. pickle to make pickle files
23 | echo. json to make JSON files
24 | echo. htmlhelp to make HTML files and a HTML help project
25 | echo. qthelp to make HTML files and a qthelp project
26 | echo. devhelp to make HTML files and a Devhelp project
27 | echo. epub to make an epub
28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
29 | echo. text to make text files
30 | echo. man to make manual pages
31 | echo. changes to make an overview over all changed/added/deprecated items
32 | echo. linkcheck to check all external links for integrity
33 | echo. doctest to run all doctests embedded in the documentation if enabled
34 | goto end
35 | )
36 |
37 | if "%1" == "clean" (
38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
39 | del /q /s %BUILDDIR%\*
40 | goto end
41 | )
42 |
43 | if "%1" == "html" (
44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
45 | echo.
46 | echo.Build finished. The HTML pages are in %BUILDDIR%/html.
47 | goto end
48 | )
49 |
50 | if "%1" == "dirhtml" (
51 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
52 | echo.
53 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
54 | goto end
55 | )
56 |
57 | if "%1" == "singlehtml" (
58 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
59 | echo.
60 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
61 | goto end
62 | )
63 |
64 | if "%1" == "pickle" (
65 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
66 | echo.
67 | echo.Build finished; now you can process the pickle files.
68 | goto end
69 | )
70 |
71 | if "%1" == "json" (
72 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
73 | echo.
74 | echo.Build finished; now you can process the JSON files.
75 | goto end
76 | )
77 |
78 | if "%1" == "htmlhelp" (
79 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
80 | echo.
81 | echo.Build finished; now you can run HTML Help Workshop with the ^
82 | .hhp project file in %BUILDDIR%/htmlhelp.
83 | goto end
84 | )
85 |
86 | if "%1" == "qthelp" (
87 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
88 | echo.
89 | echo.Build finished; now you can run "qcollectiongenerator" with the ^
90 | .qhcp project file in %BUILDDIR%/qthelp, like this:
91 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\template_class.qhcp
92 | echo.To view the help file:
93 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\template_class.ghc
94 | goto end
95 | )
96 |
97 | if "%1" == "devhelp" (
98 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
99 | echo.
100 | echo.Build finished.
101 | goto end
102 | )
103 |
104 | if "%1" == "epub" (
105 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
106 | echo.
107 | echo.Build finished. The epub file is in %BUILDDIR%/epub.
108 | goto end
109 | )
110 |
111 | if "%1" == "latex" (
112 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
113 | echo.
114 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
115 | goto end
116 | )
117 |
118 | if "%1" == "text" (
119 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
120 | echo.
121 | echo.Build finished. The text files are in %BUILDDIR%/text.
122 | goto end
123 | )
124 |
125 | if "%1" == "man" (
126 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
127 | echo.
128 | echo.Build finished. The manual pages are in %BUILDDIR%/man.
129 | goto end
130 | )
131 |
132 | if "%1" == "changes" (
133 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
134 | echo.
135 | echo.The overview file is in %BUILDDIR%/changes.
136 | goto end
137 | )
138 |
139 | if "%1" == "linkcheck" (
140 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
141 | echo.
142 | echo.Link check complete; look for any errors in the above output ^
143 | or in %BUILDDIR%/linkcheck/output.txt.
144 | goto end
145 | )
146 |
147 | if "%1" == "doctest" (
148 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
149 | echo.
150 | echo.Testing of doctests in the sources finished, look at the ^
151 | results in %BUILDDIR%/doctest/output.txt.
152 | goto end
153 | )
154 |
155 | :end
156 |
--------------------------------------------------------------------------------
/help/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
14 |
15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
16 |
17 | help:
18 | @echo "Please use \`make ' where is one of"
19 | @echo " html to make standalone HTML files"
20 | @echo " dirhtml to make HTML files named index.html in directories"
21 | @echo " singlehtml to make a single large HTML file"
22 | @echo " pickle to make pickle files"
23 | @echo " json to make JSON files"
24 | @echo " htmlhelp to make HTML files and a HTML help project"
25 | @echo " qthelp to make HTML files and a qthelp project"
26 | @echo " devhelp to make HTML files and a Devhelp project"
27 | @echo " epub to make an epub"
28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
29 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
30 | @echo " text to make text files"
31 | @echo " man to make manual pages"
32 | @echo " changes to make an overview of all changed/added/deprecated items"
33 | @echo " linkcheck to check all external links for integrity"
34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
35 |
36 | clean:
37 | -rm -rf $(BUILDDIR)/*
38 |
39 | html:
40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
41 | @echo
42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
43 |
44 | dirhtml:
45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
48 |
49 | singlehtml:
50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
51 | @echo
52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
53 |
54 | pickle:
55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
56 | @echo
57 | @echo "Build finished; now you can process the pickle files."
58 |
59 | json:
60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
61 | @echo
62 | @echo "Build finished; now you can process the JSON files."
63 |
64 | htmlhelp:
65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
66 | @echo
67 | @echo "Build finished; now you can run HTML Help Workshop with the" \
68 | ".hhp project file in $(BUILDDIR)/htmlhelp."
69 |
70 | qthelp:
71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
72 | @echo
73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/template_class.qhcp"
76 | @echo "To view the help file:"
77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/template_class.qhc"
78 |
79 | devhelp:
80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
81 | @echo
82 | @echo "Build finished."
83 | @echo "To view the help file:"
84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/template_class"
85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/template_class"
86 | @echo "# devhelp"
87 |
88 | epub:
89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
90 | @echo
91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
92 |
93 | latex:
94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
95 | @echo
96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
98 | "(use \`make latexpdf' here to do that automatically)."
99 |
100 | latexpdf:
101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
102 | @echo "Running LaTeX files through pdflatex..."
103 | make -C $(BUILDDIR)/latex all-pdf
104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
105 |
106 | text:
107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
108 | @echo
109 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
110 |
111 | man:
112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
113 | @echo
114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
115 |
116 | changes:
117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
118 | @echo
119 | @echo "The overview file is in $(BUILDDIR)/changes."
120 |
121 | linkcheck:
122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
123 | @echo
124 | @echo "Link check complete; look for any errors in the above output " \
125 | "or in $(BUILDDIR)/linkcheck/output.txt."
126 |
127 | doctest:
128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
129 | @echo "Testing of doctests in the sources finished, look at the " \
130 | "results in $(BUILDDIR)/doctest/output.txt."
131 |
--------------------------------------------------------------------------------
/ArcGIS_Pro/GeophysicalProcessor.BandPassFilter.pyt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 20250820
5 | 15495500
6 | 1.0
7 | TRUE
8 | 20250820
9 | 16055000
10 |
11 | 150000000
12 | 5000
13 |
14 | ItemDescription
15 |
16 |
17 | c:\program files\arcgis\pro\Resources\Help\gp
18 |
19 |
20 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Input geophysical raster dataset for band-pass filtering.</SPAN></P></DIV></DIV>
21 |
22 |
23 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Wavelengths longer than this will be attenuated (regional removal). Typical values: 20000-100000 meters.</SPAN></P></DIV></DIV>
24 |
25 |
26 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Wavelengths shorter than this will be attenuated (noise removal). Typical values: 1000-10000 meters.</SPAN></P></DIV></DIV>
27 |
28 |
29 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Transition width for high-pass component to reduce ringing artifacts.</SPAN></P></DIV></DIV>
30 |
31 |
32 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Transition width for low-pass component to reduce ringing artifacts.</SPAN></P></DIV></DIV>
33 |
34 |
35 |
36 |
37 | Isolate anomalies within specific wavelength ranges by removing regional trends and noise
38 | <DIV STYLE="text-align:Left;"><DIV><P><SPAN>Band-pass filtering isolates geophysical anomalies within a specific wavelength range by removing both long-wavelength regional trends and short-wavelength noise. The filter uses smooth transitions to minimize ringing artifacts.</SPAN></P><P><SPAN>Filter design:</SPAN></P><P><SPAN>• Low cutoff removes longer wavelengths (regional trends)</SPAN></P><P><SPAN>• High cutoff removes shorter wavelengths (noise, shallow sources)</SPAN></P><P><SPAN>• Transition widths create smooth cutoffs to reduce ringing</SPAN></P><P><SPAN>Choose cutoff wavelengths based on target source dimensions and survey characteristics.</SPAN></P></DIV></DIV>
39 |
40 |
41 |
42 | Band Pass Filter
43 |
44 | Band-pass filtering isolates geophysical anomalies within specific wavelength ranges by removing regional trends and noise using frequency domain techniques.
45 | Isolation of anomalies within specific wavelength ranges for targeted geophysical interpretation.
46 | Standard Fourier domain filtering methods
47 |
48 | band pass
49 | BP
50 | filtering
51 | wavelength
52 | frequency domain
53 | regional removal
54 | noise reduction
55 | geophysics
56 |
57 | Cutoff wavelengths should be chosen based on target source dimensions. Very narrow bands may amplify noise. Test different parameters on subsets first.
58 |
59 |
60 |
61 |
62 | ArcToolbox Tool
63 |
64 |
65 |
66 |
67 |
68 |
69 | 20250820
70 |
--------------------------------------------------------------------------------
/calcs/euler/README.md:
--------------------------------------------------------------------------------
1 | # Reliable Euler deconvolution estimates throughout the vertical derivatives of the total-field anomaly
2 |
3 | by
4 | Felipe F. Melo and Valéria C.F. Barbosa
5 |
6 | ## About
7 |
8 | This paper has been published in the journal *Computers & Geosciences*. `Melo, F.F., Barbosa, V.C.F., 2020. Reliable Euler deconvolution estimates throughout the vertical derivatives of the total-field anomaly. Computers & Geosciences, v. 138, p. 104436. https://doi.org/10.1016/j.cageo.2020.104436`.
9 |
10 | This repository contains the source code to perform the first synthetic test presented. The codes `euler_python.py`, the synthetic data `synthetic_data.dat` of the first test presented in the paper and the codes `synthetic_test.py`, `estimates_statistics.py` and `plot_functions.py` to generate the results of the synthetic test related to our methodology.
11 |
12 | The *euler_python* program is compatible with both Python 2.7 and Python 3.7 programming language.
13 |
14 | ## Abstract
15 |
16 | We propose a novel methodology to select reliable Euler deconvolution estimates throughout the vertical derivatives of the total-field anomaly, grounded on the capability of this quantity to locate anomalies due to its higher signal decay with distance. In applying Euler deconvolution to a small moving-data window, we compute the standard deviation of the vertical derivatives of the total-field anomaly for each data window. Then, we define the reliable source-location estimates as those estimates that are obtained by using the data windows with the largest standard deviations of the vertical derivatives of the total-field anomaly. For all tentative values of the structural index (SI), the reliable estimates with tight clustering define the correct SI and the mean of these estimates define the source position. We compared our methodology to select reliable Euler source-position estimates with two available methodologies in the literature based on the rejection criteria of data amplitude and of depth uncertainty. We conducted tests on synthetic noise-corrupted data to investigate the sensitivity of our method to deal with the presence of: i) an additive nonlinear background that simulates a regional field; ii) interfering anomalies with distinct amplitudes; iii) weak-, mid-, and strong-interfering anomalies; and iv) distinct noise levels. Most of tests in the sensitivity analysis shows that our methodology to select reliable Euler source-position estimates yielded better interpretation of the simulated magnetic sources than the methodology based on the rejection criteria of data amplitude and of depth uncertainty. The only exception was the tests with high noise levels in which the reliable Euler estimates selected either by our method or by the rejection criteria yielded poor interpretations. Applications to a real aeromagnetic survey from southern Brazil interpreted an isolated plug intrusion over the Anitápolis anomaly and a swarm of shallow-seated dikes with northwest-southeast trend over the Paranaguá Terrane.
17 |
18 | ## Content
19 |
20 | - euler_python.py:
21 | General Python module containing the functions to compute de derivatives and
22 | Euler deconvolution.
23 |
24 | - synthetic_test.py:
25 | Python script to generate the synthetic results. The script loads the total-field
26 | anomaly of a synthetic model from the file "synthetic_data.dat" and computes the
27 | Euler deconvolution using the functions in "euler_python.py". The results are
28 | generated using the functions "plot_functions.py" for the plots and
29 | "estimates_statistics.py" to compute the statistics of the data.
30 |
31 | - plot_functions.py:
32 | Python script to generate the figures 2d, 4 and 7 of the first synthetic test.
33 |
34 | - estimates_statistics.py:
35 | Python script to compute the mean of the northing, easting and depth estimates.
36 |
37 | Test data:
38 |
39 | - synthetic_data.dat:
40 | Synthetic total-field anomaly data generated using the Python packaged
41 | "Fatiando a Terra": http://fatiando.org/. This data is an example used
42 | in the current publication and shown in figure 2d.
43 |
44 | ## Getting the code
45 |
46 | You can download a copy of all the files in this repository by cloning the
47 | [git](https://git-scm.com/) repository:
48 |
49 | git clone https://github.com/ffigura/Euler-deconvolution-python.git
50 |
51 | or [download a zip archive](https://github.com/ffigura/Euler-deconvolution-python/archive/master.zip).
52 |
53 |
54 | ## Dependencies
55 |
56 | The Python program for Euler deconvolution "euler_python.py" and the scripts "synthetic_test.py"
57 | and "estimates_statistics.py" require the Python package "numpy", and the script "plot_functions.py"
58 | requires the Python packages "numpy" and "matplotlib".
59 | The easier way to get Python and all libraries installed is through the Anaconda Python
60 | distribution (https://www.anaconda.com/distribution/). After installed Anaconda, install the libraries
61 | by running the following command in your terminal:
62 |
63 | conda install numpy matplotlib
64 |
65 | The program for Euler deconvolution "euler_python.py" and the additional codes "synthetic_test.py",
66 | "plot_functions.py" and "estimates_statistics.py" are compatible with both Python 2.7 and 3.7.
67 |
68 | ## Reproducing the results
69 |
70 | The results and figures (2d, 4 and 7) for the synthetic test are reproducible from the folder `/test_4_sources`.
71 | Running the code `synthetic_test.py` will allow the reprodution of the results of our methodology. For more information
72 | read the file `README.MD` or `README.txt` in the folder `/code`.
73 |
74 |
75 | ## License
76 |
77 | All source code is made available under a BSD 3-clause license. You can freely
78 | use and modify the code, without warranty, so long as you provide attribution
79 | to the authors. See `LICENSE.md` for the full license text.
80 |
81 | The manuscript text is not open source. The authors reserve the rights to the
82 | article content, which is currently submitted for publication in the
83 | *Computers & Geosciences*.
84 |
--------------------------------------------------------------------------------
/metadata.txt:
--------------------------------------------------------------------------------
1 | # This file contains metadata for your plugin.
2 |
3 | # This file should be included when you package your plugin.# Mandatory items:
4 |
5 | [general]
6 | name=SGTool
7 | qgisMinimumVersion=3.24
8 | qgisMaximumVersion=4.99
9 | description=Simple Potential Field Processing
10 | version=0.3.00
11 | supportsQt6=True
12 | author=Mark Jessell
13 | email=mark.jessell@uwa.edu.au
14 |
15 | about=Simple Potential Field Processing using FFT grid filters, e.g. RTP, 1VD, Band Pass, Analytic Signal, Upward Continuation, worms etc.
16 |
17 | tracker=https://github.com/swaxi/SGTool/issues
18 | repository=https://github.com/swaxi/SGTool
19 | # End of mandatory metadata
20 |
21 | # Recommended items:
22 |
23 | hasProcessingProvider=no
24 | # Uncomment the following line and add your changelog:
25 | changelog=0.3.00
26 | * Add Spatial stats and convolutions to ArcGIS Pro toolbox
27 | * Move ArcGIS Pro files to their own directory
28 | * Update IGRF to allow up to 2030
29 | * Move Euler and IGRF to calcs directory
30 | * Compatibility with both QGIS4/QT6 and QGIS3/QT5
31 | * Add RGB triplets as LUT definition for RGB to greyscale convertor
32 | 0.2.18
33 | * Fix (again) clipping of csv worms
34 | * Fix bug in Filling FFT NaNs in NS direction (Thanks Feargal!)
35 | 0.2.17
36 | * Fix bug saving grid boundary filename
37 | * Add ArcPro Toolbox support (very Beta!!)
38 | * Remove all QGIS library calls from calc engines
39 | * Fix bug in worms spatial filtering
40 | * Force processed ers output files to have tif suffix
41 | * Check for recognised CRS before processing
42 | 0.2.16
43 | * Add Euler Deconvolution derived from https://github.com/ffigura/Euler-deconvolution-python
44 | * Add Noddy grid (grv & mag) import
45 | 0.2.15 Use cosine rolloff for high/low and bandpass filters to reduce ringing
46 | * Change Remove Regional to 1st or 2nd order polynomial
47 | * New grids use stdev scaling of grid to better highlight features
48 | 0.2.14 Don't try to load LINE_ID or LINE codes if they don't exist in WTMM
49 | * added PCA & ICA calculations for multiband grids
50 | * Speed up Grass-like relief calc
51 | * added DAT format points import based on ASEG-GDF2 standard (tested against example datasets only)
52 | * TA demoted to needing RTP_E first in GUI
53 | * save all CSV/XYZ/DAT imports as shapefiles
54 | * fix import matplotlib bug and test for LINE_ID for points in WTMM code
55 | 0.2.13 Remove scikit-image functions as they are hard to install on macos
56 | 0.2.12 Fix loading of RGB tiffs on Linux
57 | * warn instead of autoload of libraries
58 | * refactor rte so it works like other FFT
59 | * recalc buffers so they are factors of powers of 2, 3 or 5 (should make large grid calcs faster)
60 | * added tooltips to explain dotpoint
61 | * added plugins.xml to allow direct access via plugin manager
62 | * added 1D circular windowed spatial stats functions
63 | * added DTM curvature classifier
64 | * added Butterworth filter and scaler to DC filter
65 | * added Wavelet Transform Modulus Maxima calcs extracted from grids or for XYZ data (beta!!)
66 | * fix azimuth direction for relief shading
67 | * replace bicubic spline dialog with multilevel bicubic spline dialog (from sagang)
68 | * remove +1x -1y offset from Gaussian filter output
69 | 0.2.11 Remove path insertion in worms code
70 | * add relief shading option (based on Grass algorithm)
71 | * generalise a bit epsg parsing for GRD
72 | * remove degree test for DirCos filter
73 | * Fix problem with toWkt() on Linux and maybe macs
74 | * Improved connectivity of worm polylines
75 | * Another attempt to fix RTP
76 | * Fix missing buffer for THG calc
77 | * Another attempt to fix RTE
78 | * Delete xml file when updating a grid
79 | * Autocheck boxes when parameter changed
80 | 0.2.10 Bug fixes and GUI Update
81 | * GUI now split over 5 tabs
82 | * Add tool to produce outline polygon from a grid
83 | * Improved null handling in Normalise Grids tool
84 | * Remove unused joblib import
85 | 0.2.9 Docked widget now adds in tabified mode if another widget already there
86 | * Reformulated RTP & RTE codes
87 | * Add grid outline tool
88 | * Add extra Utils tab to declutter GUI (for now!)
89 | 0.2.8 Added Threshold to NaN tool
90 | * New conversion of worms to polyline shapefile option
91 | * New grid normalisation tool
92 | * New threshold to NaN tool
93 | * Sun shading for 90 zenith reset to 88 as this can blow up otherwise
94 | * Better handling of irregular grids by using linear infill of NaNs
95 | * Checkboxes labels defined directly
96 | * GUI rearranged so gradient tools all together
97 | * Vertical Integration now uses user-defined buffer
98 | 0.2.7 Add worming and improve installation of plugin on PC & Mac
99 | 0.2.6 Add RGB conversion to grayscale
100 | 0.2.5 2024-12-11
101 | * add convolution filtering
102 | * add simple gridding from csv and xyz format files
103 | * change to pyIGRF for RTP_E calcs
104 | 0.2.3 2024-11-26
105 | * Bug fixes to Directional Cleaning
106 | * Change of behaviour: existing layer now overwritten in QGIS
107 | * Loading of GRD format optimised and now massively quicker
108 | * Check for unlikely lengths (>100) with geographic grids
109 | 0.2.2 2024-11-25: Initial upload to plugin repository.
110 |
111 |
112 | # Tags are comma separated with spaces allowed
113 | tags=python, geophysics, gravity, magnetics, filters
114 |
115 | homepage=https://github.com/swaxi/SGTool
116 | category=Plugins
117 | icon=icon.png
118 | # experimental flag
119 | experimental=False
120 |
121 | # deprecated flag (applies to the whole plugin, not just a single version)
122 | deprecated=False
123 |
124 | # Since QGIS 3.8, a comma separated list of plugins to be installed
125 | # (or upgraded) can be specified.
126 | # Check the documentation for more information.
127 | # plugin_dependencies=
128 |
129 | Category of the plugin: Raster, Vector, Database or Web
130 | # category=
131 |
132 | # If the plugin can run on QGIS Server.
133 | server=False
134 |
135 |
--------------------------------------------------------------------------------
/calcs/igrf/SHC_files/IGRF3.SHC:
--------------------------------------------------------------------------------
1 | # 3rd Generation International Geomagnetic Reference Field Schmidt semi-normalised spherical harmonic coefficients, degree n=1,13, Released 1981
2 | # in units nanoTesla for IGRF and definitive DGRF main-field models (degree n=1,8 nanoTesla/year for secular variation (SV))
3 | # see http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html for detailed description
4 | 1 13 5 2 1 1965.0 1985.0
5 | 1965.0 1970.0 1975.0 1980.0 1985.0
6 | 1 0 -30334 -30220 -30100 -29988 -29876.0
7 | 1 1 -2119 -2068 -2013 -1957 -1900.5
8 | 1 1 5776 5737 5675 5606 5526.5
9 | 2 0 -1662 -1781 -1902 -1997 -2088.5
10 | 2 1 2997 3000 3010 3028 3044.0
11 | 2 1 -2016 -2047 -2067 -2129 -2192.5
12 | 2 2 1594 1611 1632 1662 1697.0
13 | 2 2 114 25 -68 -199 -325.0
14 | 3 0 1297 1287 1276 1279 1279.0
15 | 3 1 -2038 -2091 -2144 -2181 -2213.5
16 | 3 1 -404 -366 -333 -335 -334.0
17 | 3 2 1292 1278 1260 1251 1247.5
18 | 3 2 240 251 262 271 284.5
19 | 3 3 856 838 830 833 838.0
20 | 3 3 -165 -196 -223 -252 -291.5
21 | 4 0 957 952 946 938 931.0
22 | 4 1 804 800 791 783 776.0
23 | 4 1 148 167 191 212 235.0
24 | 4 2 479 461 438 398 357.0
25 | 4 2 -269 -266 -265 -257 -249.0
26 | 4 3 -390 -395 -405 -419 -428.0
27 | 4 3 13 26 39 53 67.5
28 | 4 4 252 234 216 199 174.0
29 | 4 4 -269 -279 -288 -298 -296.0
30 | 5 0 -219 -216 -218 -219 -211.5
31 | 5 1 358 359 356 357 359.0
32 | 5 1 19 26 31 46 55.0
33 | 5 2 254 262 264 261 257.0
34 | 5 2 128 139 148 149 147.0
35 | 5 3 -31 -42 -59 -74 -90.5
36 | 5 3 -126 -139 -152 -150 -150.0
37 | 5 4 -157 -160 -159 -162 -161.0
38 | 5 4 -97 -91 -83 -78 -71.5
39 | 5 5 -62 -56 -49 -48 -41.0
40 | 5 5 81 83 88 92 102.5
41 | 6 0 45 43 45 49 51.0
42 | 6 1 61 64 66 65 65.0
43 | 6 1 -11 -12 -13 -15 -17.5
44 | 6 2 8 15 28 42 59.0
45 | 6 2 100 100 99 93 86.0
46 | 6 3 -228 -212 -198 -192 -188.0
47 | 6 3 68 72 75 71 71.0
48 | 6 4 4 2 1 4 8.0
49 | 6 4 -32 -37 -41 -43 -51.0
50 | 6 5 1 3 6 14 15.5
51 | 6 5 -8 -6 -4 -2 0.5
52 | 6 6 -111 -112 -111 -108 -108.5
53 | 6 6 -7 1 11 17 17.0
54 | 7 0 75 72 71 70 65.0
55 | 7 1 -57 -57 -56 -59 -63.0
56 | 7 1 -61 -70 -77 -83 -85.0
57 | 7 2 4 1 1 2 4.0
58 | 7 2 -27 -27 -26 -28 -26.0
59 | 7 3 13 14 16 20 22.5
60 | 7 3 -2 -4 -5 -5 -4.0
61 | 7 4 -26 -22 -14 -13 -5.0
62 | 7 4 6 8 10 16 23.0
63 | 7 5 -6 -2 0 1 1.5
64 | 7 5 26 23 22 18 15.5
65 | 7 6 13 13 12 11 11.5
66 | 7 6 -23 -23 -23 -23 -23.5
67 | 7 7 1 -2 -5 -2 -2.0
68 | 7 7 -12 -11 -12 -10 -4.5
69 | 8 0 13 14 14 20 24.0
70 | 8 1 5 6 6 7 6.0
71 | 8 1 7 7 6 7 6.5
72 | 8 2 -4 -2 -1 1 -0.5
73 | 8 2 -12 -15 -16 -18 -21.5
74 | 8 3 -14 -13 -12 -11 -9.5
75 | 8 3 9 6 4 4 4.0
76 | 8 4 0 -3 -8 -7 -11.0
77 | 8 4 -16 -17 -19 -22 -26.0
78 | 8 5 8 5 4 4 3.0
79 | 8 5 4 6 6 9 10.0
80 | 8 6 -1 0 0 3 6.5
81 | 8 6 24 21 18 16 17.0
82 | 8 7 11 11 10 7 5.5
83 | 8 7 -3 -6 -10 -13 -18.5
84 | 8 8 4 3 1 -1 5.0
85 | 8 8 -17 -16 -17 -15 -11.0
86 | 9 0 8 8 7 6 6
87 | 9 1 10 10 10 11 11
88 | 9 1 -22 -21 -21 -21 -21
89 | 9 2 2 2 2 2 2
90 | 9 2 15 16 16 16 16
91 | 9 3 -13 -12 -12 -12 -12
92 | 9 3 7 6 7 9 9
93 | 9 4 10 10 10 9 9
94 | 9 4 -4 -4 -4 -5 -5
95 | 9 5 -1 -1 -1 -3 -3
96 | 9 5 -5 -5 -5 -7 -7
97 | 9 6 -1 0 -1 -1 -1
98 | 9 6 10 10 10 9 9
99 | 9 7 5 3 4 7 7
100 | 9 7 10 11 11 10 10
101 | 9 8 1 1 1 1 1
102 | 9 8 -4 -2 -3 -6 -6
103 | 9 9 -2 -1 -2 -5 -5
104 | 9 9 1 1 1 2 2
105 | 10 0 -2 -3 -3 -3 -3
106 | 10 1 -3 -3 -3 -4 -4
107 | 10 1 2 1 1 1 1
108 | 10 2 2 2 2 2 2
109 | 10 2 1 1 1 1 1
110 | 10 3 -5 -5 -5 -5 -5
111 | 10 3 2 3 3 2 2
112 | 10 4 -2 -1 -2 -2 -2
113 | 10 4 6 4 4 5 5
114 | 10 5 4 6 5 5 5
115 | 10 5 -4 -4 -4 -4 -4
116 | 10 6 4 4 4 3 3
117 | 10 6 0 0 -1 -1 -1
118 | 10 7 0 1 1 1 1
119 | 10 7 -2 -1 -1 -2 -2
120 | 10 8 2 0 0 2 2
121 | 10 8 3 3 3 4 4
122 | 10 9 2 3 3 3 3
123 | 10 9 0 1 1 -1 -1
124 | 10 10 0 -1 -1 0 0
125 | 10 10 -6 -4 -5 -6 -6
126 | 11 0 0 0 0 0 0
127 | 11 1 0 0 0 0 0
128 | 11 1 0 0 0 0 0
129 | 11 2 0 0 0 0 0
130 | 11 2 0 0 0 0 0
131 | 11 3 0 0 0 0 0
132 | 11 3 0 0 0 0 0
133 | 11 4 0 0 0 0 0
134 | 11 4 0 0 0 0 0
135 | 11 5 0 0 0 0 0
136 | 11 5 0 0 0 0 0
137 | 11 6 0 0 0 0 0
138 | 11 6 0 0 0 0 0
139 | 11 7 0 0 0 0 0
140 | 11 7 0 0 0 0 0
141 | 11 8 0 0 0 0 0
142 | 11 8 0 0 0 0 0
143 | 11 9 0 0 0 0 0
144 | 11 9 0 0 0 0 0
145 | 11 10 0 0 0 0 0
146 | 11 10 0 0 0 0 0
147 | 11 11 0 0 0 0 0
148 | 11 11 0 0 0 0 0
149 | 12 0 0 0 0 0 0
150 | 12 1 0 0 0 0 0
151 | 12 1 0 0 0 0 0
152 | 12 2 0 0 0 0 0
153 | 12 2 0 0 0 0 0
154 | 12 3 0 0 0 0 0
155 | 12 3 0 0 0 0 0
156 | 12 4 0 0 0 0 0
157 | 12 4 0 0 0 0 0
158 | 12 5 0 0 0 0 0
159 | 12 5 0 0 0 0 0
160 | 12 6 0 0 0 0 0
161 | 12 6 0 0 0 0 0
162 | 12 7 0 0 0 0 0
163 | 12 7 0 0 0 0 0
164 | 12 8 0 0 0 0 0
165 | 12 8 0 0 0 0 0
166 | 12 9 0 0 0 0 0
167 | 12 9 0 0 0 0 0
168 | 12 10 0 0 0 0 0
169 | 12 10 0 0 0 0 0
170 | 12 11 0 0 0 0 0
171 | 12 11 0 0 0 0 0
172 | 12 12 0 0 0 0 0
173 | 12 12 0 0 0 0 0
174 | 13 0 0 0 0 0 0
175 | 13 1 0 0 0 0 0
176 | 13 1 0 0 0 0 0
177 | 13 2 0 0 0 0 0
178 | 13 2 0 0 0 0 0
179 | 13 3 0 0 0 0 0
180 | 13 3 0 0 0 0 0
181 | 13 4 0 0 0 0 0
182 | 13 4 0 0 0 0 0
183 | 13 5 0 0 0 0 0
184 | 13 5 0 0 0 0 0
185 | 13 6 0 0 0 0 0
186 | 13 6 0 0 0 0 0
187 | 13 7 0 0 0 0 0
188 | 13 7 0 0 0 0 0
189 | 13 8 0 0 0 0 0
190 | 13 8 0 0 0 0 0
191 | 13 9 0 0 0 0 0
192 | 13 9 0 0 0 0 0
193 | 13 10 0 0 0 0 0
194 | 13 10 0 0 0 0 0
195 | 13 11 0 0 0 0 0
196 | 13 11 0 0 0 0 0
197 | 13 12 0 0 0 0 0
198 | 13 12 0 0 0 0 0
199 | 13 13 0 0 0 0 0
200 | 13 13 0 0 0 0 0
201 |
--------------------------------------------------------------------------------
/resources.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Resource object code
4 | #
5 | # Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from qgis.PyQt import QtCore
10 |
11 | qt_resource_data = b"\
12 | \x00\x00\x04\xfe\
13 | \x89\
14 | \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
15 | \x00\x00\x17\x00\x00\x00\x18\x08\x03\x00\x00\x00\x26\xa2\x96\x47\
16 | \x00\x00\x00\x15\x74\x45\x58\x74\x43\x72\x65\x61\x74\x69\x6f\x6e\
17 | \x20\x54\x69\x6d\x65\x00\x07\xe7\x05\x1f\x10\x29\x2c\xb8\xb6\xfa\
18 | \x2b\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xe8\x0b\x12\x10\x34\x1d\
19 | \x7f\x09\xa1\x14\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x18\
20 | \x00\x00\x2e\x18\x01\x2a\xaa\x27\x20\x00\x00\x03\x00\x50\x4c\x54\
21 | \x45\x00\x00\x00\x10\x10\x10\x18\x18\x18\x29\x29\x29\x31\x31\x31\
22 | \x39\x39\x39\x42\x42\x42\x5a\x5a\x5a\x63\x63\x63\x6b\x6b\x6b\x7b\
23 | \x7b\x7b\xad\xa5\x94\xb5\xad\xa5\xbd\x9c\x7b\xd6\xc6\xa5\xef\xc6\
24 | \x73\xef\xce\x9c\xf7\xad\x5a\xf7\xbd\x42\xf7\xf7\xf7\xff\x5a\x00\
25 | \xff\x63\x00\xff\x63\x08\xff\x63\x10\xff\x6b\x00\xff\x6b\x08\xff\
26 | \x6b\x18\xff\x73\x00\xff\x73\x21\xff\x7b\x00\xff\x7b\x08\xff\x7b\
27 | \x39\xff\x84\x00\xff\x84\x08\xff\x84\x39\xff\x84\x42\xff\x8c\x00\
28 | \xff\x8c\x10\xff\x8c\x39\xff\x94\x00\xff\x94\x52\xff\x9c\x00\xff\
29 | \x9c\x08\xff\x9c\x18\xff\x9c\x42\xff\x9c\x5a\xff\xa5\x00\xff\xa5\
30 | \x4a\xff\xad\x00\xff\xad\x21\xff\xad\x31\xff\xad\x5a\xff\xad\x6b\
31 | \xff\xad\x73\xff\xad\x7b\xff\xb5\x18\xff\xb5\x31\xff\xb5\x39\xff\
32 | \xb5\x42\xff\xb5\x84\xff\xbd\x4a\xff\xc6\x73\xff\xc6\x84\xff\xc6\
33 | \x8c\xff\xc6\x94\xff\xce\x7b\xff\xce\xad\xff\xd6\x73\xff\xd6\x7b\
34 | \xff\xd6\x9c\xff\xd6\xad\xff\xd6\xb5\xff\xd6\xbd\xff\xde\xc6\xff\
35 | \xe7\xb5\xff\xe7\xd6\xff\xe7\xde\xff\xef\xc6\xff\xef\xce\xff\xef\
36 | \xd6\xff\xef\xe7\xff\xf7\xe7\xff\xf7\xef\xff\xff\xf7\xff\xff\xff\
37 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
38 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
39 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
40 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
41 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
42 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
43 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
44 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
45 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
46 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
47 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
48 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
49 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
50 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
51 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
52 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
53 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
54 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
55 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
56 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
57 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
58 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
59 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
60 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
61 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
62 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
63 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
64 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
65 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
66 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
67 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
68 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
69 | \xff\xf8\x6d\x0b\x5a\x00\x00\x00\x55\x74\x52\x4e\x53\xff\xff\xff\
70 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
71 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
72 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
73 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
74 | \xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
75 | \xff\x00\xb0\xa1\x07\xcf\x00\x00\x01\x0f\x49\x44\x41\x54\x78\xda\
76 | \x6d\xd1\x89\x4e\x83\x40\x10\x80\xe1\xf5\x08\x5a\xbc\x05\xca\x2a\
77 | \xc5\xae\x15\xed\xa1\x22\x2a\xe2\x7d\x57\x6c\xc4\x36\x15\xe6\xfd\
78 | \x9f\xa5\x33\x53\xd4\xc5\xf8\x07\x92\xe5\x0b\x99\xec\x82\x00\x80\
79 | \xe2\xab\xf8\xe8\xbf\xf6\x41\x4f\xe0\xbd\xba\x19\x45\x51\x78\x39\
80 | \x2c\xf2\x51\xc5\x8d\xf9\xb5\x28\xc4\xae\xee\xba\xa7\x37\xd7\xc3\
81 | \xd2\x2d\x43\x88\xb9\xf5\x30\xec\x52\xed\x76\x6f\x54\xba\x29\x30\
82 | \xe3\x99\x11\x0b\xee\x4b\x9f\x25\x17\xcb\x3d\x46\x2a\x9f\xba\xc1\
83 | \x5e\x3b\x43\xe5\x95\x7a\x60\xb7\x66\xf8\xc9\x3c\x0c\x90\x15\x5e\
84 | \x6a\x37\xe5\xf7\x6b\xec\x2b\x41\xa0\xd8\x7d\x2c\x7d\x4c\x71\x9f\
85 | \x3c\x68\x43\x29\x42\x72\x0f\xcb\xd0\x2d\x73\x61\x71\xe9\xed\x64\
86 | \xdf\x47\x15\x1e\x17\xf3\xb9\xe8\x53\x00\x9c\xfb\xa5\x4a\x19\xc3\
87 | \xb7\x63\x47\x9e\x47\x03\xa5\x94\x17\xa0\x7b\xb6\x43\x28\x84\x73\
88 | \x00\x15\x87\xb8\xf4\xd6\x1f\x7f\x92\x34\xc7\x71\xea\x63\xc8\x75\
89 | \x6f\x39\x98\x8d\x65\xd0\xc9\x34\xdf\x62\xb4\xeb\x49\xd1\x74\x73\
90 | \xcd\x13\x66\xfb\x16\xa0\x71\xac\xcf\xf9\x9c\x7a\x02\x85\xfb\xa2\
91 | \x3b\x34\x88\xf7\x70\x31\xae\xec\x07\xde\x69\xfa\xe0\xf7\xff\xfe\
92 | \xe4\x6e\x37\x07\xf0\x8f\x6b\x4d\x00\xe4\xb6\x73\xb8\x5e\xaf\xaf\
93 | \x7e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
94 | "
95 |
96 | qt_resource_name = b"\
97 | \x00\x07\
98 | \x07\x3b\xe0\xb3\
99 | \x00\x70\
100 | \x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
101 | \x00\x06\
102 | \x05\x7c\xb6\x5c\
103 | \x00\x53\
104 | \x00\x47\x00\x54\x00\x6f\x00\x6f\x00\x6c\
105 | \x00\x08\
106 | \x0a\x61\x5a\xa7\
107 | \x00\x69\
108 | \x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
109 | "
110 |
111 | qt_resource_struct_v1 = b"\
112 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
113 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
114 | \x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
115 | \x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
116 | "
117 |
118 | qt_resource_struct_v2 = b"\
119 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
120 | \x00\x00\x00\x00\x00\x00\x00\x00\
121 | \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
122 | \x00\x00\x00\x00\x00\x00\x00\x00\
123 | \x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
124 | \x00\x00\x00\x00\x00\x00\x00\x00\
125 | \x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
126 | \x00\x00\x01\x93\x3e\x79\x30\x42\
127 | "
128 |
129 | qt_version = [int(v) for v in QtCore.qVersion().split(".")]
130 | if qt_version < [5, 8, 0]:
131 | rcc_version = 1
132 | qt_resource_struct = qt_resource_struct_v1
133 | else:
134 | rcc_version = 2
135 | qt_resource_struct = qt_resource_struct_v2
136 |
137 |
138 | def qInitResources():
139 | QtCore.qRegisterResourceData(
140 | rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
141 | )
142 |
143 |
144 | def qCleanupResources():
145 | QtCore.qUnregisterResourceData(
146 | rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
147 | )
148 |
149 |
150 | qInitResources()
151 |
--------------------------------------------------------------------------------
/help/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # SGTool documentation build configuration file, created by
4 | # sphinx-quickstart on Sun Feb 12 17:11:03 2012.
5 | #
6 | # This file is execfile()d with the current directory set to its containing dir.
7 | #
8 | # Note that not all possible configuration values are present in this
9 | # autogenerated file.
10 | #
11 | # All configuration values have a default; values that are commented out
12 | # serve to show the default.
13 |
14 | import sys, os
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #sys.path.insert(0, os.path.abspath('.'))
20 |
21 | # -- General configuration -----------------------------------------------------
22 |
23 | # If your documentation needs a minimal Sphinx version, state it here.
24 | #needs_sphinx = '1.0'
25 |
26 | # Add any Sphinx extension module names here, as strings. They can be extensions
27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 | extensions = ['sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode']
29 |
30 | # Add any paths that contain templates here, relative to this directory.
31 | templates_path = ['_templates']
32 |
33 | # The suffix of source filenames.
34 | source_suffix = '.rst'
35 |
36 | # The encoding of source files.
37 | #source_encoding = 'utf-8-sig'
38 |
39 | # The master toctree document.
40 | master_doc = 'index'
41 |
42 | # General information about the project.
43 | project = u'SGTool'
44 | copyright = u'2013, Mark Jessell'
45 |
46 | # The version info for the project you're documenting, acts as replacement for
47 | # |version| and |release|, also used in various other places throughout the
48 | # built documents.
49 | #
50 | # The short X.Y version.
51 | version = '0.1'
52 | # The full version, including alpha/beta/rc tags.
53 | release = '0.1'
54 |
55 | # The language for content autogenerated by Sphinx. Refer to documentation
56 | # for a list of supported languages.
57 | #language = None
58 |
59 | # There are two options for replacing |today|: either, you set today to some
60 | # non-false value, then it is used:
61 | #today = ''
62 | # Else, today_fmt is used as the format for a strftime call.
63 | #today_fmt = '%B %d, %Y'
64 |
65 | # List of patterns, relative to source directory, that match files and
66 | # directories to ignore when looking for source files.
67 | exclude_patterns = []
68 |
69 | # The reST default role (used for this markup: `text`) to use for all documents.
70 | #default_role = None
71 |
72 | # If true, '()' will be appended to :func: etc. cross-reference text.
73 | #add_function_parentheses = True
74 |
75 | # If true, the current module name will be prepended to all description
76 | # unit titles (such as .. function::).
77 | #add_TemplateModuleNames = True
78 |
79 | # If true, sectionauthor and moduleauthor directives will be shown in the
80 | # output. They are ignored by default.
81 | #show_authors = False
82 |
83 | # The name of the Pygments (syntax highlighting) style to use.
84 | pygments_style = 'sphinx'
85 |
86 | # A list of ignored prefixes for module index sorting.
87 | #modindex_common_prefix = []
88 |
89 |
90 | # -- Options for HTML output ---------------------------------------------------
91 |
92 | # The theme to use for HTML and HTML Help pages. See the documentation for
93 | # a list of builtin themes.
94 | html_theme = 'default'
95 |
96 | # Theme options are theme-specific and customize the look and feel of a theme
97 | # further. For a list of options available for each theme, see the
98 | # documentation.
99 | #html_theme_options = {}
100 |
101 | # Add any paths that contain custom themes here, relative to this directory.
102 | #html_theme_path = []
103 |
104 | # The name for this set of Sphinx documents. If None, it defaults to
105 | # " v documentation".
106 | #html_title = None
107 |
108 | # A shorter title for the navigation bar. Default is the same as html_title.
109 | #html_short_title = None
110 |
111 | # The name of an image file (relative to this directory) to place at the top
112 | # of the sidebar.
113 | #html_logo = None
114 |
115 | # The name of an image file (within the static path) to use as favicon of the
116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
117 | # pixels large.
118 | #html_favicon = None
119 |
120 | # Add any paths that contain custom static files (such as style sheets) here,
121 | # relative to this directory. They are copied after the builtin static files,
122 | # so a file named "default.css" will overwrite the builtin "default.css".
123 | html_static_path = ['_static']
124 |
125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
126 | # using the given strftime format.
127 | #html_last_updated_fmt = '%b %d, %Y'
128 |
129 | # If true, SmartyPants will be used to convert quotes and dashes to
130 | # typographically correct entities.
131 | #html_use_smartypants = True
132 |
133 | # Custom sidebar templates, maps document names to template names.
134 | #html_sidebars = {}
135 |
136 | # Additional templates that should be rendered to pages, maps page names to
137 | # template names.
138 | #html_additional_pages = {}
139 |
140 | # If false, no module index is generated.
141 | #html_domain_indices = True
142 |
143 | # If false, no index is generated.
144 | #html_use_index = True
145 |
146 | # If true, the index is split into individual pages for each letter.
147 | #html_split_index = False
148 |
149 | # If true, links to the reST sources are added to the pages.
150 | #html_show_sourcelink = True
151 |
152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
153 | #html_show_sphinx = True
154 |
155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
156 | #html_show_copyright = True
157 |
158 | # If true, an OpenSearch description file will be output, and all pages will
159 | # contain a tag referring to it. The value of this option must be the
160 | # base URL from which the finished HTML is served.
161 | #html_use_opensearch = ''
162 |
163 | # This is the file name suffix for HTML files (e.g. ".xhtml").
164 | #html_file_suffix = None
165 |
166 | # Output file base name for HTML help builder.
167 | htmlhelp_basename = 'TemplateClassdoc'
168 |
169 |
170 | # -- Options for LaTeX output --------------------------------------------------
171 |
172 | # The paper size ('letter' or 'a4').
173 | #latex_paper_size = 'letter'
174 |
175 | # The font size ('10pt', '11pt' or '12pt').
176 | #latex_font_size = '10pt'
177 |
178 | # Grouping the document tree into LaTeX files. List of tuples
179 | # (source start file, target name, title, author, documentclass [howto/manual]).
180 | latex_documents = [
181 | ('index', 'SGTool.tex', u'SGTool Documentation',
182 | u'Mark Jessell', 'manual'),
183 | ]
184 |
185 | # The name of an image file (relative to this directory) to place at the top of
186 | # the title page.
187 | #latex_logo = None
188 |
189 | # For "manual" documents, if this is true, then toplevel headings are parts,
190 | # not chapters.
191 | #latex_use_parts = False
192 |
193 | # If true, show page references after internal links.
194 | #latex_show_pagerefs = False
195 |
196 | # If true, show URL addresses after external links.
197 | #latex_show_urls = False
198 |
199 | # Additional stuff for the LaTeX preamble.
200 | #latex_preamble = ''
201 |
202 | # Documents to append as an appendix to all manuals.
203 | #latex_appendices = []
204 |
205 | # If false, no module index is generated.
206 | #latex_domain_indices = True
207 |
208 |
209 | # -- Options for manual page output --------------------------------------------
210 |
211 | # One entry per manual page. List of tuples
212 | # (source start file, name, description, authors, manual section).
213 | man_pages = [
214 | ('index', 'TemplateClass', u'SGTool Documentation',
215 | [u'Mark Jessell'], 1)
216 | ]
217 |
--------------------------------------------------------------------------------
/calcs/PSplot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class PowerSpectrumDock:
5 | def __init__(self, grid, gridname, dx, dy, x, y):
6 | """
7 | Initialize the PowerSpectrumDock class to display plots in a docked widget.
8 |
9 | Parameters:
10 | dock_widget (QDockWidget): The docked widget to display the plot.
11 | grid (numpy.ndarray): The 2D grid data.
12 | dx (float): Grid spacing in the x-direction.
13 | dy (float): Grid spacing in the y-direction.
14 | """
15 | self.grid = grid
16 | self.dx = dx
17 | self.dy = dy
18 | self.gridname = gridname
19 | self.x = y
20 | self.y = x
21 |
22 | def plot_grid_and_power_spectrum(self):
23 | """
24 | Plot the grid and its radially averaged power spectrum.
25 | """
26 | import matplotlib.pyplot as plt
27 |
28 | # Compute and plot radially averaged power spectrum
29 | self.grid = np.nan_to_num(self.grid, nan=0.0)
30 |
31 | kx, ky, pds = self.power_density_spectra(
32 | self.x, self.y, self.grid, self.grid.shape
33 | )
34 | wavenumbers, power_spectrum = self.radial_average_spectrum(kx, ky, pds)
35 | valid_indices = ~np.isinf(wavenumbers) & ~np.isnan(
36 | wavenumbers
37 | ) # Exclude invalid wavelengths
38 | wavenumbers = wavenumbers[valid_indices]
39 | power_spectrum = power_spectrum[valid_indices]
40 |
41 | # Create a standalone figure
42 | fig, (ax_image, ax_spectrum) = plt.subplots(1, 2, figsize=(12, 6))
43 |
44 | # Plot the grid with the correct aspect ratio
45 | # Compute 5th and 95th percentiles
46 | vmin, vmax = np.percentile(self.grid, [5, 95])
47 | ax_image.imshow(
48 | np.flipud(self.grid),
49 | cmap="viridis",
50 | origin="lower",
51 | vmin=vmin,
52 | vmax=vmax,
53 | aspect=self.dy / self.dx,
54 | )
55 | ax_image.set_title(self.gridname)
56 | ax_image.set_xlabel("X")
57 | ax_image.set_ylabel("Y")
58 |
59 | # Plot the power spectrum with log-wavelength axis and linear power
60 | if len(wavenumbers) > 0 and len(power_spectrum) > 0:
61 | ax_spectrum.plot(
62 | wavenumbers * self.dx, np.log(power_spectrum), linestyle="-"
63 | )
64 | ax_spectrum.set_title("Radially Averaged Power Spectrum")
65 | ax_spectrum.set_xlabel("Wavenumber (linear scale)")
66 | ax_spectrum.set_ylabel("ln(Power)")
67 | ax_spectrum.grid(True)
68 | else:
69 | ax_spectrum.text(
70 | 0.5,
71 | 0.5,
72 | "No data",
73 | transform=ax_spectrum.transAxes,
74 | ha="center",
75 | va="center",
76 | fontsize=12,
77 | )
78 |
79 | # Display the plot
80 | plt.tight_layout()
81 | plt.show()
82 |
83 | def power_density_spectra(self, x, y, data, shape):
84 | """
85 | Calculates the Power Density Spectra of a 2D gridded potential field
86 | through the FFT:
87 |
88 | .. math::
89 |
90 | Phi_{Delta T}(k_x, k_y) = | F div left{Delta T right}(k_x, k_y) |^2
91 |
92 | .. note:: Requires gridded data.
93 |
94 | .. note:: x, y, z and height should be in meters.
95 |
96 | Parameters:
97 |
98 | * x, y : 1D-arrays
99 | The x and y coordinates of the grid points
100 | * data : 1D-array
101 | The potential field at the grid points
102 | * shape : tuple = (nx, ny)
103 | The shape of the grid
104 |
105 | Returns:
106 |
107 | * kx, ky : 2D-arrays
108 | The wavenumbers of each Power Density Spectra point
109 | * pds : 2D-array
110 | The Power Density Spectra of the data
111 | """
112 | kx, ky = self._fftfreqs(x, y, shape, shape)
113 | pds = abs(np.fft.fft2(np.reshape(data, shape))) ** 2
114 | return kx, ky, pds
115 |
116 | def radial_average_spectrum(self, kx, ky, pds, max_radius=None, ring_width=None):
117 | """
118 | Calculates the average of the Power Density Spectra points that falls
119 | inside concentric rings built around the origin of the wavenumber
120 | coordinate system with constant width.
121 |
122 | The width of the rings and the inner radius of the biggest ring can be
123 | changed by setting the optional parameters ring_width and max_radius,
124 | respectively.
125 |
126 | .. note:: To calculate the radially averaged power density spectra
127 | use the outputs of the function power_density_spectra as
128 | input of this one.
129 |
130 | Parameters:
131 |
132 | * kx, ky : 2D-arrays
133 | The x and y coordinates of the grid points
134 | * data : 1D-array
135 | The potential field at the grid points
136 | * shape : tuple = (nx, ny)
137 | The shape of the grid
138 | * max_radius : float (optional)
139 | Inner radius of the biggest ring.
140 | By default it's set as the minimum of kx.max() and ky.max().
141 | Making it smaller leaves points outside of the averaging,
142 | and making it bigger includes points nearer to the boundaries.
143 | * ring_width : float (optional)
144 | Width of the rings.
145 | By default it's set as the largest value of :math:`Delta k_x` and
146 | :math:`Delta k_y`, being them the equidistances of the kx and ky
147 | arrays.
148 | Making it bigger gives more populated averages, and
149 | making it smaller lowers the ammount of points per ring
150 | (use it carefully).
151 |
152 | Returns:
153 |
154 | * k_radial : 1D-array
155 | Wavenumbers of each Radially Averaged Power Spectrum point.
156 | Also, the inner radius of the rings.
157 | * pds_radial : 1D array
158 | Radially Averaged Power Spectrum
159 | """
160 | nx, ny = pds.shape
161 | if max_radius is None:
162 | max_radius = min(kx.max(), ky.max())
163 | if ring_width is None:
164 | ring_width = max(kx[1, 0], ky[0, 1])
165 | k = np.sqrt(kx**2 + ky**2)
166 | pds_radial = []
167 | k_radial = []
168 | radius_i = -1
169 | while True:
170 | radius_i += 1
171 | if radius_i * ring_width > max_radius:
172 | break
173 | else:
174 | if radius_i == 0:
175 | inside = k <= 0.5 * ring_width
176 | else:
177 | inside = np.logical_and(
178 | k > (radius_i - 0.5) * ring_width,
179 | k <= (radius_i + 0.5) * ring_width,
180 | )
181 | pds_radial.append(pds[inside].mean())
182 | k_radial.append(radius_i * ring_width)
183 | return np.array(k_radial), np.array(pds_radial)
184 |
185 | def _pad_data(self, data, shape):
186 | n = self._nextpow2(np.max(shape))
187 | nx, ny = shape
188 | padx = (n - nx) // 2
189 | pady = (n - ny) // 2
190 | padded = np.pad(data.reshape(shape), ((padx, padx), (pady, pady)), mode="edge")
191 | return padded, padx, pady
192 |
193 | def _nextpow2(self, i):
194 | buf = np.ceil(np.log(i) / np.log(2))
195 | return int(2**buf)
196 |
197 | def _fftfreqs(self, x, y, shape, padshape):
198 | """
199 | Get two 2D-arrays with the wave numbers in the x and y directions.
200 | """
201 | nx, ny = shape
202 | dx = (x.max() - x.min()) / (nx - 1)
203 | fx = 2 * np.pi * np.fft.fftfreq(padshape[0], dx)
204 | dy = (y.max() - y.min()) / (ny - 1)
205 | fy = 2 * np.pi * np.fft.fftfreq(padshape[1], dy)
206 | return np.meshgrid(fy, fx)[::-1]
207 |
--------------------------------------------------------------------------------
/calcs/euler/plot_functions.py:
--------------------------------------------------------------------------------
1 | """
2 | Plot functions
3 |
4 | A Python program to plot the total-field anomaly and the estimates
5 | on classic plot.
6 |
7 | This code plot the figures 2d, 4 and 7 in the folder 'figures'.
8 |
9 | This code is released from the paper:
10 | Reliable Euler deconvolution estimates throughout the
11 | vertical derivatives of the total-field anomaly
12 |
13 | The program is under the conditions terms in the file README.txt
14 |
15 | authors: Felipe F. Melo and Valeria C.F. Barbosa, 2019
16 | email: felipe146@hotmail.com, valcris@on.br
17 | """
18 |
19 | import numpy as np
20 | import matplotlib.pylab as plt
21 | import matplotlib.patches as patches
22 |
23 | #######################################################################
24 |
25 | def plot_input_data(data,xi,yi,zi,shape):
26 |
27 | '''
28 | Plot the input data - Figure 2d
29 | '''
30 |
31 | fig=plt.figure(figsize=(5, 4))
32 |
33 | rect0 = patches.Rectangle((26,0),2,24,linewidth=1,edgecolor='black',
34 | facecolor='none',linestyle='-',zorder=2)
35 | rect1 = patches.Rectangle((16.850,10),0.3,24,linewidth=1,
36 | edgecolor='black',facecolor='none',
37 | linestyle='-',zorder=2)
38 | rect2 = patches.Rectangle((6.850,15.850),0.3,0.3,linewidth=1,
39 | edgecolor='black',facecolor='none',
40 | linestyle='-',zorder=2)
41 | rect3 = patches.Rectangle((6.800,6.800),0.4,0.4,linewidth=1,
42 | edgecolor='black',facecolor='none',
43 | linestyle='-',zorder=2)
44 |
45 | im=plt.contourf(yi.reshape(shape)/1000.,xi.reshape(shape)/1000.,
46 | data.reshape(shape), 30, cmap='jet')
47 | ax = plt.gca()
48 | ax.set_ylabel('Northing (km)', fontsize = 14)
49 | ax.set_xlabel('Easting (km)', fontsize = 14)
50 | ax.tick_params(labelsize=13)
51 | cbar=fig.colorbar(im,pad=0.01,shrink=1)
52 | cbar.set_label('nT',labelpad=-21,y=-0.03, rotation=0,fontsize=13)
53 | ax.add_patch(rect0)
54 | ax.add_patch(rect1)
55 | ax.add_patch(rect2)
56 | ax.add_patch(rect3)
57 | cbar.ax.tick_params(labelsize=13)
58 | ax.set_xlim(np.min(yi/1000.),np.max(yi/1000.))
59 | ax.set_ylim(np.min(xi/1000.),np.max(xi/1000.))
60 | plt.text(22.5,5,'P0',color='k', size='large')
61 | plt.text(13.5,14,'P1',color='k', size='large')
62 | plt.text(3,16,'P2',color='k', size='large')
63 | plt.text(3,5,'P3',color='k', size='large')
64 |
65 | plt.savefig('figures/FIG2d.png', bbox_inches='tight', dpi = 600)
66 | plt.close('all')
67 |
68 | return
69 |
70 |
71 | def plot_classic(data,est_classic,xi,yi,zi,shape):
72 | '''
73 | Classic plot of the depth and base level estimates for all SIs
74 | '''
75 |
76 | '''
77 | Figure 4 source-position (depth) estimates
78 | '''
79 | vet_title=["(a)","(b)","(c)","(d)"]
80 |
81 |
82 | minz = np.min(0.)
83 | maxz = np.max(2.)
84 | levelsz = np.linspace(minz,maxz,11)
85 |
86 | #depth plots
87 | plt.figure(figsize=(12, 8.5))
88 | for i in range (4):
89 |
90 | plt.subplot(2,2,i+1)
91 | plt.title(vet_title[i], fontsize = 14, loc='center',y=-0.27)
92 | plt.contourf(yi.reshape(shape)/1000.,xi.reshape(shape)/1000.,
93 | data.reshape(shape), 30, cmap='gray')
94 | ax = plt.gca()
95 | ax.set_ylabel('Northing (km)', fontsize = 14)
96 | ax.set_xlabel('Easting (km)', fontsize = 14)
97 | ax.tick_params(labelsize=13)
98 | scat=plt.scatter(est_classic[i][:,1]/1000.,
99 | est_classic[i][:,0]/1000.,s=40,
100 | c=(est_classic[i][:,2]/1000.),
101 | cmap='terrain_r',vmin=minz,vmax=maxz,
102 | edgecolors='k')
103 | cbar=plt.colorbar(scat,ticks=levelsz,pad=0.01,shrink=1,
104 | format='%0.1f')
105 | cbar.set_label('$\^z_o$ (km)',labelpad=-18,y=-0.03, rotation=0,
106 | fontsize=13)
107 | cbar.ax.tick_params(labelsize=13)
108 | ax.set_xlim(np.min(yi/1000.),np.max(yi/1000.))
109 | ax.set_ylim(np.min(xi/1000.),np.max(xi/1000.))
110 | ax.set_xticks([0,5,10,15,20,25])
111 | plt.text(22.5,5,'P0',color='w', size='large')
112 | plt.text(13.5,14,'P1',color='w', size='large')
113 | plt.text(3,16,'P2',color='w', size='large')
114 | plt.text(3,5,'P3',color='w', size='large')
115 |
116 | plt.subplots_adjust(wspace=0.15,hspace=0.32)
117 |
118 | plt.savefig('figures/FIG4.png',bbox_inches='tight', dpi = 600)
119 | plt.close('all')
120 |
121 | '''
122 | Figure 7 base level-position (base-level) estimates
123 | '''
124 | plt.figure(figsize=(12, 8.5))
125 | for i in range (4):
126 |
127 | if i == 0:
128 | #base level estimates for SI = 0 have higher amplitude
129 | minb = np.min(-70)
130 | maxb = np.max(20)
131 | levelsb = np.linspace(minb,maxb,7)
132 |
133 | plt.subplot(2,2,i+1)
134 | plt.title(vet_title[i], fontsize = 14, loc='center',
135 | y=-0.27)
136 | plt.contourf(yi.reshape(shape)/1000.,xi.reshape(shape)/1000.,
137 | data.reshape(shape), 30, cmap='gray')
138 | ax=plt.gca()
139 | ax.set_ylabel('Northing (km)', fontsize = 14)
140 | ax.set_xlabel('Easting (km)', fontsize = 14)
141 | ax.tick_params(labelsize=13)
142 | scat=plt.scatter(est_classic[i][:,1]/1000.,
143 | est_classic[i][:,0]/1000.,s=40,
144 | c=(est_classic[i][:,3]/1000.),
145 | cmap='jet',vmin=minb,
146 | vmax=maxb,edgecolors='k')
147 | cbar=plt.colorbar(scat,ticks=levelsb,pad=0.01,shrink=1,
148 | format='%d')
149 | cbar.set_label('$\^b$ (nT)',labelpad=-18,y=-0.03, rotation=0,
150 | fontsize=13)
151 | cbar.ax.tick_params(labelsize=13)
152 | ax.set_xlim(np.min(yi/1000.),np.max(yi/1000.))
153 | ax.set_ylim(np.min(xi/1000.),np.max(xi/1000.))
154 | ax.set_xticks([0,5,10,15,20,25])
155 | plt.text(22.5,5,'P0',color='w', size='large')
156 | plt.text(13.5,14,'P1',color='w', size='large')
157 | plt.text(3,16,'P2',color='w', size='large')
158 | plt.text(3,5,'P3',color='w', size='large')
159 | plt.text(25,25,'x10$^{3}$',color='k',size='medium')
160 |
161 | else:
162 | minb = np.min(-30)
163 | maxb = np.max(210)
164 | levelsb = np.linspace(minb,maxb,7)
165 |
166 | plt.subplot(2,2,i+1)
167 | plt.title(vet_title[i], fontsize = 14, loc='center',
168 | y=-0.27)
169 | plt.contourf(yi.reshape(shape)/1000.,xi.reshape(shape)/1000.,
170 | data.reshape(shape), 30, cmap='gray')
171 | ax=plt.gca()
172 | ax.set_ylabel('Northing (km)', fontsize = 14)
173 | ax.set_xlabel('Easting (km)', fontsize = 14)
174 | ax.tick_params(labelsize=13)
175 | scat=plt.scatter(est_classic[i][:,1]/1000.,
176 | est_classic[i][:,0]/1000.,s=40,
177 | c=(est_classic[i][:,3]),cmap='jet',
178 | vmin=minb, vmax=maxb,edgecolors='k')
179 | cbar=plt.colorbar(scat,ticks=levelsb,pad=0.01,shrink=1,
180 | format='%d')
181 | cbar.set_label('$\^b$ (nT)',labelpad=-18,y=-0.03, rotation=0,
182 | fontsize=13)
183 | cbar.ax.tick_params(labelsize=13)
184 | ax.set_xlim(np.min(yi/1000.),np.max(yi/1000.))
185 | ax.set_ylim(np.min(xi/1000.),np.max(xi/1000.))
186 | ax.set_xticks([0,5,10,15,20,25])
187 | plt.text(22.5,5,'P0',color='w', size='large')
188 | plt.text(13.5,14,'P1',color='w', size='large')
189 | plt.text(3,16,'P2',color='w', size='large')
190 | plt.text(3,5,'P3',color='w', size='large')
191 |
192 | plt.subplots_adjust(wspace=0.15,hspace=0.32)
193 |
194 | plt.savefig('figures/FIG7.png',bbox_inches='tight', dpi = 600)
195 | plt.close('all')
196 |
197 | return
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | #/***************************************************************************
2 | # SGTool
3 | #
4 | # Simple Potential Field Processing
5 | # -------------------
6 | # begin : 2024-11-17
7 | # git sha : $Format:%H$
8 | # copyright : (C) 2024 by Mark Jessell
9 | # email : mark.jessell@uwa.edu.au
10 | # ***************************************************************************/
11 | #
12 | #/***************************************************************************
13 | # * *
14 | # * This program is free software; you can redistribute it and/or modify *
15 | # * it under the terms of the GNU General Public License as published by *
16 | # * the Free Software Foundation; either version 2 of the License, or *
17 | # * (at your option) any later version. *
18 | # * *
19 | # ***************************************************************************/
20 |
21 | #################################################
22 | # Edit the following to match your sources lists
23 | #################################################
24 |
25 |
26 | #Add iso code for any locales you want to support here (space separated)
27 | # default is no locales
28 | # LOCALES = af
29 | LOCALES =
30 |
31 | # If locales are enabled, set the name of the lrelease binary on your system. If
32 | # you have trouble compiling the translations, you may have to specify the full path to
33 | # lrelease
34 | #LRELEASE = lrelease
35 | #LRELEASE = lrelease-qt4
36 |
37 |
38 | # translation
39 | SOURCES = \
40 | __init__.py \
41 | SGTool.py SGTool_dockwidget.py
42 |
43 | PLUGINNAME = SGTool
44 |
45 | PY_FILES = \
46 | __init__.py \
47 | SGTool.py SGTool_dockwidget.py
48 |
49 | UI_FILES = SGTool_dockwidget_base.ui
50 |
51 | EXTRAS = metadata.txt icon.png
52 |
53 | EXTRA_DIRS =
54 |
55 | COMPILED_RESOURCE_FILES = resources.py
56 |
57 | PEP8EXCLUDE=pydev,resources.py,conf.py,third_party,ui
58 |
59 | # QGISDIR points to the location where your plugin should be installed.
60 | # This varies by platform, relative to your HOME directory:
61 | # * Linux:
62 | # .local/share/QGIS/QGIS3/profiles/default/python/plugins/
63 | # * Mac OS X:
64 | # Library/Application Support/QGIS/QGIS3/profiles/default/python/plugins
65 | # * Windows:
66 | # AppData\Roaming\QGIS\QGIS3\profiles\default\python\plugins'
67 |
68 | QGISDIR=C:\Users\00073294\AppData/Roaming/QGIS/QGIS3/profiles/default/python/plugins
69 |
70 | #################################################
71 | # Normally you would not need to edit below here
72 | #################################################
73 |
74 | HELP = help/build/html
75 |
76 | PLUGIN_UPLOAD = $(c)/plugin_upload.py
77 |
78 | RESOURCE_SRC=$(shell grep '^ *@@g;s/.*>//g' | tr '\n' ' ')
79 |
80 | .PHONY: default
81 | default:
82 | @echo While you can use make to build and deploy your plugin, pb_tool
83 | @echo is a much better solution.
84 | @echo A Python script, pb_tool provides platform independent management of
85 | @echo your plugins and runs anywhere.
86 | @echo You can install pb_tool using: pip install pb_tool
87 | @echo See https://g-sherman.github.io/plugin_build_tool/ for info.
88 |
89 | compile: $(COMPILED_RESOURCE_FILES)
90 |
91 | %.py : %.qrc $(RESOURCES_SRC)
92 | pyrcc5 -o $*.py $<
93 |
94 | %.qm : %.ts
95 | $(LRELEASE) $<
96 |
97 | test: compile transcompile
98 | @echo
99 | @echo "----------------------"
100 | @echo "Regression Test Suite"
101 | @echo "----------------------"
102 |
103 | @# Preceding dash means that make will continue in case of errors
104 | @-export PYTHONPATH=`pwd`:$(PYTHONPATH); \
105 | export QGIS_DEBUG=0; \
106 | export QGIS_LOG_FILE=/dev/null; \
107 | nosetests -v --with-id --with-coverage --cover-package=. \
108 | 3>&1 1>&2 2>&3 3>&- || true
109 | @echo "----------------------"
110 | @echo "If you get a 'no module named qgis.core error, try sourcing"
111 | @echo "the helper script we have provided first then run make test."
112 | @echo "e.g. source run-env-linux.sh ; make test"
113 | @echo "----------------------"
114 |
115 | deploy: compile doc transcompile
116 | @echo
117 | @echo "------------------------------------------"
118 | @echo "Deploying plugin to your .qgis2 directory."
119 | @echo "------------------------------------------"
120 | # The deploy target only works on unix like operating system where
121 | # the Python plugin directory is located at:
122 | # $HOME/$(QGISDIR)/python/plugins
123 | mkdir -p $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
124 | cp -vf $(PY_FILES) $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
125 | cp -vf $(UI_FILES) $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
126 | cp -vf $(COMPILED_RESOURCE_FILES) $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
127 | cp -vf $(EXTRAS) $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
128 | cp -vfr i18n $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
129 | cp -vfr $(HELP) $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)/help
130 | # Copy extra directories if any
131 | (foreach EXTRA_DIR,(EXTRA_DIRS), cp -R (EXTRA_DIR) (HOME)/(QGISDIR)/python/plugins/(PLUGINNAME)/;)
132 |
133 |
134 | # The dclean target removes compiled python files from plugin directory
135 | # also deletes any .git entry
136 | dclean:
137 | @echo
138 | @echo "-----------------------------------"
139 | @echo "Removing any compiled python files."
140 | @echo "-----------------------------------"
141 | find $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME) -iname "*.pyc" -delete
142 | find $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME) -iname ".git" -prune -exec rm -Rf {} \;
143 |
144 |
145 | derase:
146 | @echo
147 | @echo "-------------------------"
148 | @echo "Removing deployed plugin."
149 | @echo "-------------------------"
150 | rm -Rf $(HOME)/$(QGISDIR)/python/plugins/$(PLUGINNAME)
151 |
152 | zip: deploy dclean
153 | @echo
154 | @echo "---------------------------"
155 | @echo "Creating plugin zip bundle."
156 | @echo "---------------------------"
157 | # The zip target deploys the plugin and creates a zip file with the deployed
158 | # content. You can then upload the zip file on http://plugins.qgis.org
159 | rm -f $(PLUGINNAME).zip
160 | cd $(HOME)/$(QGISDIR)/python/plugins; zip -9r $(CURDIR)/$(PLUGINNAME).zip $(PLUGINNAME)
161 |
162 | package: compile
163 | # Create a zip package of the plugin named $(PLUGINNAME).zip.
164 | # This requires use of git (your plugin development directory must be a
165 | # git repository).
166 | # To use, pass a valid commit or tag as follows:
167 | # make package VERSION=Version_0.3.2
168 | @echo
169 | @echo "------------------------------------"
170 | @echo "Exporting plugin to zip package. "
171 | @echo "------------------------------------"
172 | rm -f $(PLUGINNAME).zip
173 | git archive --prefix=$(PLUGINNAME)/ -o $(PLUGINNAME).zip $(VERSION)
174 | echo "Created package: $(PLUGINNAME).zip"
175 |
176 | upload: zip
177 | @echo
178 | @echo "-------------------------------------"
179 | @echo "Uploading plugin to QGIS Plugin repo."
180 | @echo "-------------------------------------"
181 | $(PLUGIN_UPLOAD) $(PLUGINNAME).zip
182 |
183 | transup:
184 | @echo
185 | @echo "------------------------------------------------"
186 | @echo "Updating translation files with any new strings."
187 | @echo "------------------------------------------------"
188 | @chmod +x scripts/update-strings.sh
189 | @scripts/update-strings.sh $(LOCALES)
190 |
191 | transcompile:
192 | @echo
193 | @echo "----------------------------------------"
194 | @echo "Compiled translation files to .qm files."
195 | @echo "----------------------------------------"
196 | @chmod +x scripts/compile-strings.sh
197 | @scripts/compile-strings.sh $(LRELEASE) $(LOCALES)
198 |
199 | transclean:
200 | @echo
201 | @echo "------------------------------------"
202 | @echo "Removing compiled translation files."
203 | @echo "------------------------------------"
204 | rm -f i18n/*.qm
205 |
206 | clean:
207 | @echo
208 | @echo "------------------------------------"
209 | @echo "Removing uic and rcc generated files"
210 | @echo "------------------------------------"
211 | rm $(COMPILED_UI_FILES) $(COMPILED_RESOURCE_FILES)
212 |
213 | doc:
214 | @echo
215 | @echo "------------------------------------"
216 | @echo "Building documentation using sphinx."
217 | @echo "------------------------------------"
218 | cd help; make html
219 |
220 | pylint:
221 | @echo
222 | @echo "-----------------"
223 | @echo "Pylint violations"
224 | @echo "-----------------"
225 | @pylint --reports=n --rcfile=pylintrc . || true
226 | @echo
227 | @echo "----------------------"
228 | @echo "If you get a 'no module named qgis.core' error, try sourcing"
229 | @echo "the helper script we have provided first then run make pylint."
230 | @echo "e.g. source run-env-linux.sh ; make pylint"
231 | @echo "----------------------"
232 |
233 |
234 | # Run pep8 style checking
235 | #http://pypi.python.org/pypi/pep8
236 | pep8:
237 | @echo
238 | @echo "-----------"
239 | @echo "PEP8 issues"
240 | @echo "-----------"
241 | @pep8 --repeat --ignore=E203,E121,E122,E123,E124,E125,E126,E127,E128 --exclude $(PEP8EXCLUDE) . || true
242 | @echo "-----------"
243 | @echo "Ignored in PEP8 check:"
244 | @echo $(PEP8EXCLUDE)
245 |
--------------------------------------------------------------------------------
/pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 |
3 | # Specify a configuration file.
4 | #rcfile=
5 |
6 | # Python code to execute, usually for sys.path manipulation such as
7 | # pygtk.require().
8 | #init-hook=
9 |
10 | # Profiled execution.
11 | profile=no
12 |
13 | # Add files or directories to the blacklist. They should be base names, not
14 | # paths.
15 | ignore=CVS
16 |
17 | # Pickle collected data for later comparisons.
18 | persistent=yes
19 |
20 | # List of plugins (as comma separated values of python modules names) to load,
21 | # usually to register additional checkers.
22 | load-plugins=
23 |
24 |
25 | [MESSAGES CONTROL]
26 |
27 | # Enable the message, report, category or checker with the given id(s). You can
28 | # either give multiple identifier separated by comma (,) or put this option
29 | # multiple time. See also the "--disable" option for examples.
30 | #enable=
31 |
32 | # Disable the message, report, category or checker with the given id(s). You
33 | # can either give multiple identifiers separated by comma (,) or put this
34 | # option multiple times (only on the command line, not in the configuration
35 | # file where it should appear only once).You can also use "--disable=all" to
36 | # disable everything first and then reenable specific checks. For example, if
37 | # you want to run only the similarities checker, you can use "--disable=all
38 | # --enable=similarities". If you want to run only the classes checker, but have
39 | # no Warning level messages displayed, use"--disable=all --enable=classes
40 | # --disable=W"
41 | # see http://stackoverflow.com/questions/21487025/pylint-locally-defined-disables-still-give-warnings-how-to-suppress-them
42 | disable=locally-disabled,C0103
43 |
44 |
45 | [REPORTS]
46 |
47 | # Set the output format. Available formats are text, parseable, colorized, msvs
48 | # (visual studio) and html. You can also give a reporter class, eg
49 | # mypackage.mymodule.MyReporterClass.
50 | output-format=text
51 |
52 | # Put messages in a separate file for each module / package specified on the
53 | # command line instead of printing them on stdout. Reports (if any) will be
54 | # written in a file name "pylint_global.[txt|html]".
55 | files-output=no
56 |
57 | # Tells whether to display a full report or only the messages
58 | reports=yes
59 |
60 | # Python expression which should return a note less than 10 (10 is the highest
61 | # note). You have access to the variables errors warning, statement which
62 | # respectively contain the number of errors / warnings messages and the total
63 | # number of statements analyzed. This is used by the global evaluation report
64 | # (RP0004).
65 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
66 |
67 | # Add a comment according to your evaluation note. This is used by the global
68 | # evaluation report (RP0004).
69 | comment=no
70 |
71 | # Template used to display messages. This is a python new-style format string
72 | # used to format the message information. See doc for all details
73 | #msg-template=
74 |
75 |
76 | [BASIC]
77 |
78 | # Required attributes for module, separated by a comma
79 | required-attributes=
80 |
81 | # List of builtins function names that should not be used, separated by a comma
82 | bad-functions=map,filter,apply,input
83 |
84 | # Regular expression which should only match correct module names
85 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
86 |
87 | # Regular expression which should only match correct module level names
88 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
89 |
90 | # Regular expression which should only match correct class names
91 | class-rgx=[A-Z_][a-zA-Z0-9]+$
92 |
93 | # Regular expression which should only match correct function names
94 | function-rgx=[a-z_][a-z0-9_]{2,30}$
95 |
96 | # Regular expression which should only match correct method names
97 | method-rgx=[a-z_][a-z0-9_]{2,30}$
98 |
99 | # Regular expression which should only match correct instance attribute names
100 | attr-rgx=[a-z_][a-z0-9_]{2,30}$
101 |
102 | # Regular expression which should only match correct argument names
103 | argument-rgx=[a-z_][a-z0-9_]{2,30}$
104 |
105 | # Regular expression which should only match correct variable names
106 | variable-rgx=[a-z_][a-z0-9_]{2,30}$
107 |
108 | # Regular expression which should only match correct attribute names in class
109 | # bodies
110 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
111 |
112 | # Regular expression which should only match correct list comprehension /
113 | # generator expression variable names
114 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
115 |
116 | # Good variable names which should always be accepted, separated by a comma
117 | good-names=i,j,k,ex,Run,_
118 |
119 | # Bad variable names which should always be refused, separated by a comma
120 | bad-names=foo,bar,baz,toto,tutu,tata
121 |
122 | # Regular expression which should only match function or class names that do
123 | # not require a docstring.
124 | no-docstring-rgx=__.*__
125 |
126 | # Minimum line length for functions/classes that require docstrings, shorter
127 | # ones are exempt.
128 | docstring-min-length=-1
129 |
130 |
131 | [MISCELLANEOUS]
132 |
133 | # List of note tags to take in consideration, separated by a comma.
134 | notes=FIXME,XXX,TODO
135 |
136 |
137 | [TYPECHECK]
138 |
139 | # Tells whether missing members accessed in mixin class should be ignored. A
140 | # mixin class is detected if its name ends with "mixin" (case insensitive).
141 | ignore-mixin-members=yes
142 |
143 | # List of classes names for which member attributes should not be checked
144 | # (useful for classes with attributes dynamically set).
145 | ignored-classes=SQLObject
146 |
147 | # When zope mode is activated, add a predefined set of Zope acquired attributes
148 | # to generated-members.
149 | zope=no
150 |
151 | # List of members which are set dynamically and missed by pylint inference
152 | # system, and so shouldn't trigger E0201 when accessed. Python regular
153 | # expressions are accepted.
154 | generated-members=REQUEST,acl_users,aq_parent
155 |
156 |
157 | [VARIABLES]
158 |
159 | # Tells whether we should check for unused import in __init__ files.
160 | init-import=no
161 |
162 | # A regular expression matching the beginning of the name of dummy variables
163 | # (i.e. not used).
164 | dummy-variables-rgx=_$|dummy
165 |
166 | # List of additional names supposed to be defined in builtins. Remember that
167 | # you should avoid to define new builtins when possible.
168 | additional-builtins=
169 |
170 |
171 | [FORMAT]
172 |
173 | # Maximum number of characters on a single line.
174 | max-line-length=80
175 |
176 | # Regexp for a line that is allowed to be longer than the limit.
177 | ignore-long-lines=^\s*(# )??$
178 |
179 | # Allow the body of an if to be on the same line as the test if there is no
180 | # else.
181 | single-line-if-stmt=no
182 |
183 | # List of optional constructs for which whitespace checking is disabled
184 | no-space-check=trailing-comma,dict-separator
185 |
186 | # Maximum number of lines in a module
187 | max-module-lines=1000
188 |
189 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
190 | # tab).
191 | indent-string=' '
192 |
193 |
194 | [SIMILARITIES]
195 |
196 | # Minimum lines number of a similarity.
197 | min-similarity-lines=4
198 |
199 | # Ignore comments when computing similarities.
200 | ignore-comments=yes
201 |
202 | # Ignore docstrings when computing similarities.
203 | ignore-docstrings=yes
204 |
205 | # Ignore imports when computing similarities.
206 | ignore-imports=no
207 |
208 |
209 | [IMPORTS]
210 |
211 | # Deprecated modules which should not be used, separated by a comma
212 | deprecated-modules=regsub,TERMIOS,Bastion,rexec
213 |
214 | # Create a graph of every (i.e. internal and external) dependencies in the
215 | # given file (report RP0402 must not be disabled)
216 | import-graph=
217 |
218 | # Create a graph of external dependencies in the given file (report RP0402 must
219 | # not be disabled)
220 | ext-import-graph=
221 |
222 | # Create a graph of internal dependencies in the given file (report RP0402 must
223 | # not be disabled)
224 | int-import-graph=
225 |
226 |
227 | [DESIGN]
228 |
229 | # Maximum number of arguments for function / method
230 | max-args=5
231 |
232 | # Argument names that match this expression will be ignored. Default to name
233 | # with leading underscore
234 | ignored-argument-names=_.*
235 |
236 | # Maximum number of locals for function / method body
237 | max-locals=15
238 |
239 | # Maximum number of return / yield for function / method body
240 | max-returns=6
241 |
242 | # Maximum number of branch for function / method body
243 | max-branches=12
244 |
245 | # Maximum number of statements in function / method body
246 | max-statements=50
247 |
248 | # Maximum number of parents for a class (see R0901).
249 | max-parents=7
250 |
251 | # Maximum number of attributes for a class (see R0902).
252 | max-attributes=7
253 |
254 | # Minimum number of public methods for a class (see R0903).
255 | min-public-methods=2
256 |
257 | # Maximum number of public methods for a class (see R0904).
258 | max-public-methods=20
259 |
260 |
261 | [CLASSES]
262 |
263 | # List of interface methods to ignore, separated by a comma. This is used for
264 | # instance to not check methods defines in Zope's Interface base class.
265 | ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
266 |
267 | # List of method names used to declare (i.e. assign) instance attributes.
268 | defining-attr-methods=__init__,__new__,setUp
269 |
270 | # List of valid names for the first argument in a class method.
271 | valid-classmethod-first-arg=cls
272 |
273 | # List of valid names for the first argument in a metaclass class method.
274 | valid-metaclass-classmethod-first-arg=mcs
275 |
276 |
277 | [EXCEPTIONS]
278 |
279 | # Exceptions that will emit a warning when being caught. Defaults to
280 | # "Exception"
281 | overgeneral-exceptions=Exception
282 |
--------------------------------------------------------------------------------
/calcs/euler/euler_python.py:
--------------------------------------------------------------------------------
1 | """
2 | Euler deconvolution
3 |
4 | A Python program to perform Euler deconvolution on gridded data.
5 |
6 | This code is released from the paper:
7 | Reliable Euler deconvolution estimates throughout the
8 | vertical derivatives of the total-field anomaly
9 |
10 | The program is under the conditions terms in the file README.txt
11 |
12 | authors: Felipe F. Melo and Valeria C.F. Barbosa, 2019
13 | email: felipe146@hotmail.com, valcris@on.br
14 | """
15 |
16 | import numpy as np
17 |
18 |
19 | def fft_pad_data(data, mode="edge"):
20 | """
21 | Pad data and compute the coeficients in Fourier domain
22 | The data is padded until reach the length of the next higher power
23 | of two and the values of the pad are the values of the edge
24 |
25 | Parameters:
26 |
27 | * data: 2d-array
28 | the input data set - gridded
29 |
30 | Returns:
31 |
32 | * fpdat: 2d-array
33 | the coefficients of the data in Fourier domain
34 | * mask: 2d-array
35 | Location of padding points -
36 | {True: data points.
37 | False: padded points.}
38 | """
39 | n_points = int(2 ** (np.ceil(np.log(np.max(data.shape)) / np.log(2))))
40 | nx, ny = data.shape
41 |
42 | padx = (n_points - nx) // 2
43 | pady = (n_points - ny) // 2
44 | padded_data = np.pad(data, ((padx, padx), (pady, pady)), mode)
45 |
46 | mask = np.zeros_like(padded_data, dtype=bool)
47 | mask[padx : padx + nx, pady : pady + ny] = True
48 | fpdat = np.fft.fft2(padded_data)
49 | return fpdat, mask
50 |
51 |
52 | def ifft_unpad_data(data_p, mask, shape_dat):
53 | """
54 | Computes the inverse Fourier Transform of a padded array and mask
55 | the data to the original shape.
56 |
57 | Parameters:
58 |
59 | * data_p: 2d-array
60 | Array with the padded data.
61 | * mask: 2d-array
62 | Location of padding points -
63 | {True: Points to be kept .
64 | False: Points to be removed.}
65 | * shape_dat: tube = (ny, nx)
66 | The number of data points in each direction before padding.
67 |
68 | Returns:
69 |
70 | * data: 2d-array
71 | The unpadded data in space-domain.
72 | """
73 | ifft_data = np.real(np.fft.ifft2(data_p))
74 | data = ifft_data[mask]
75 | return np.reshape(data, shape_dat)
76 |
77 |
78 | def fft_wavenumbers(x, y, shape, padshape):
79 | """
80 | Computes the wavenumbers 2d-arrays
81 |
82 | Parameters:
83 |
84 | * x,y: 2d-array
85 | grid of the coordinates.
86 | * shape: tuple = (ny, nx)
87 | the number of data points in each direction before padding.
88 | * padshape: tuple = (ny, nx)
89 | the number of data points in each direction after padding.
90 |
91 | Returns:
92 |
93 | * u,v: 2d-array
94 | wavenumbers in each direction
95 | """
96 |
97 | nx, ny = shape
98 | dx = (x.max() - x.min()) / (nx - 1)
99 | u = 2 * np.pi * np.fft.fftfreq(padshape[0], dx)
100 | dy = (y.max() - y.min()) / (ny - 1)
101 | v = 2 * np.pi * np.fft.fftfreq(padshape[1], dy)
102 | return np.meshgrid(v, u)[::-1]
103 |
104 |
105 | def deriv(data, shape, area):
106 | """
107 | Compute the first derivative of a potential field
108 | in Fourier domain in the x, y and z directions.
109 |
110 | Parameters:
111 |
112 | * data: 2d-array
113 | the input data set - gridded
114 | * shape : tuple = (nx, ny)
115 | the shape of the grid
116 | * area : list
117 | the area of the input data - [south, north, west, east]
118 |
119 | Returns:
120 |
121 | * derivx, derivy, derivz : 2D-array
122 | derivatives in x-, y- and z-directions
123 | """
124 |
125 | anom_FFT, mask = fft_pad_data(data)
126 |
127 | nx, ny = shape
128 | xa, xb, ya, yb = area
129 | xs = np.linspace(xa, xb, nx)
130 | ys = np.linspace(ya, yb, ny)
131 | Y, X = np.meshgrid(ys, xs)
132 |
133 | u, v = fft_wavenumbers(X, Y, data.shape, anom_FFT.shape)
134 |
135 | derivx_ft = anom_FFT * (u * 1j)
136 | derivy_ft = anom_FFT * (v * 1j)
137 | derivz_ft = anom_FFT * np.sqrt(u**2 + v**2)
138 | derivx = ifft_unpad_data(derivx_ft, mask, data.shape)
139 | derivy = ifft_unpad_data(derivy_ft, mask, data.shape)
140 | derivz = ifft_unpad_data(derivz_ft, mask, data.shape)
141 |
142 | return derivx, derivy, derivz
143 |
144 |
145 | def moving_window(data, dx, dy, dz, xi, yi, zi, windowSize):
146 | """
147 | Moving data window that selects the data, derivatives and coordinates
148 | for solve the system of Euler deconvolution.
149 | For a 2d-array, the window runs from left to right and up to down
150 | The window moves 1 step for iteration
151 |
152 | Parameters:
153 |
154 | * data : 2d-array
155 | the input data set - gridded
156 | * dx, dy, dz : 2d-array
157 | derivatives in x-, y- and z-directions
158 | * xi, yi, zi : 2d-array
159 | grid of coordinates in x-, y- and z-directions
160 | * windowSize : tuple (x,y)
161 | size of the window - equal in both directions
162 |
163 | Returns:
164 |
165 | * data : 2d-array
166 | windowed input data set
167 | * dx, dy, dz : 2d-array
168 | windowed derivatives in x-, y- and z-directions
169 | * xi, yi, zi : 2d-array
170 | windowed grid of coordinates in x-, y- and z-directions
171 | """
172 | for y in range(0, data.shape[0]):
173 | for x in range(0, data.shape[1]):
174 | # yield the current window
175 | yield (
176 | x,
177 | y,
178 | data[y : y + windowSize[1], x : x + windowSize[0]],
179 | dx[y : y + windowSize[1], x : x + windowSize[0]],
180 | dy[y : y + windowSize[1], x : x + windowSize[0]],
181 | dz[y : y + windowSize[1], x : x + windowSize[0]],
182 | xi[y : y + windowSize[1], x : x + windowSize[0]],
183 | yi[y : y + windowSize[1], x : x + windowSize[0]],
184 | zi[y : y + windowSize[1], x : x + windowSize[0]],
185 | )
186 |
187 |
188 | def euler_deconv(data, xi, yi, zi, shape, area, SI, windowSize, filt):
189 | """
190 | Euler deconvolution - solves the system of equations
191 | for each moving data window
192 |
193 | Parameters:
194 |
195 | * data : 1d-array
196 | the input data set
197 | * xi, yi, zi : 1d-array
198 | grid of coordinates in x-, y- and z-directions
199 | * shape : tuple = (nx, ny)
200 | the shape of the grid
201 | * area : list
202 | the area of the input data - [south, north, west, east]
203 | * SI : int
204 | structural index - 0, 1, 2 or 3
205 | * windowSize : tuple (dx,dy)
206 | size of the window - equal in both directions
207 | * filt : float
208 | percentage of the solutions that will be keep
209 |
210 | Returns:
211 |
212 | * classic_est : 2d-array
213 | x, y, z and base-level best estimates kept after select a percentage
214 |
215 | * classic : 2d-array
216 | x, y, z, base-level and standard deviation of all estimates
217 | """
218 | data = data.reshape(shape)
219 | dx, dy, dz = deriv(data, shape, area)
220 |
221 | xi = xi.reshape(shape)
222 | yi = yi.reshape(shape)
223 | zi = zi.reshape(shape)
224 |
225 | delta = windowSize // 2
226 | estx = np.zeros_like(data)
227 | esty = np.zeros_like(data)
228 | estz = np.zeros_like(data)
229 | estb = np.zeros_like(data)
230 | stdzmat = np.zeros_like(data)
231 |
232 | # run the moving data window and perform the computations
233 | for east, south, windata, windx, windy, windz, winx, winy, winz in moving_window(
234 | data, dx, dy, dz, xi, yi, zi, (windowSize, windowSize)
235 | ):
236 | # to keep the same size of the window throughout the grid
237 | if windata.shape[0] != windowSize or windata.shape[1] != windowSize:
238 | continue
239 | # system of equations on Euler deconvolution
240 | A = np.zeros((windowSize * windowSize, 4))
241 | A[:, 0] = windx.ravel()
242 | A[:, 1] = windy.ravel()
243 | A[:, 2] = windz.ravel()
244 | A[:, 3] = SI * np.ones_like(winx.ravel())
245 |
246 | vety = np.zeros((windowSize * windowSize, 1))
247 | vety = (
248 | windx.ravel() * winx.ravel()
249 | + windy.ravel() * winy.ravel()
250 | + windz.ravel() * winz.ravel()
251 | + SI * windata.ravel()
252 | )
253 | # compute the estimates
254 | ATA = np.linalg.inv(np.dot(A.T, A))
255 | ATy = np.dot(A.T, vety)
256 | p = np.dot(ATA, ATy)
257 |
258 | # standard deviation of z derivative (for populations population)
259 | stdz = np.sqrt(
260 | np.sum(abs(A[:, 2] - A[:, 2].mean()) ** 2) / (len(A[:, 2]) - 1.0)
261 | )
262 |
263 | estx[south + windowSize // 2][east + windowSize // 2] = p[0]
264 | esty[south + windowSize // 2][east + windowSize // 2] = p[1]
265 | estz[south + windowSize // 2][east + windowSize // 2] = p[2]
266 | estb[south + windowSize // 2][east + windowSize // 2] = p[3]
267 | stdzmat[south + windowSize // 2][east + windowSize // 2] = stdz
268 |
269 | # get rid of zeros in the border
270 | estx = estx[delta:-delta, delta:-delta]
271 | esty = esty[delta:-delta, delta:-delta]
272 | estz = estz[delta:-delta, delta:-delta]
273 | estb = estb[delta:-delta, delta:-delta]
274 | stdzmat = stdzmat[delta:-delta, delta:-delta]
275 | xi = xi[delta:-delta, delta:-delta]
276 | yi = yi[delta:-delta, delta:-delta]
277 | # group the solutions for the classic plot
278 | classic = np.stack(
279 | (estx.ravel(), esty.ravel(), estz.ravel(), estb.ravel(), stdzmat.ravel()),
280 | axis=-1,
281 | )
282 | # sort the solutions according to the std of df/dz and filter a percentage
283 | classic_est = np.array(sorted(classic, key=lambda l: l[-1], reverse=True))[
284 | : int(len(classic) * filt), :-1
285 | ]
286 | return classic_est
287 |
--------------------------------------------------------------------------------
/calcs/ConvolutionFilter.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class ConvolutionFilter:
5 | def __init__(self, grid):
6 | """
7 | Initialize the ConvolutionFilter with a grid.
8 |
9 | :param grid: 2D numpy array representing the input grid
10 | """
11 | self.grid = np.array(grid, dtype=float)
12 | self.padded_grid = None
13 |
14 | def apply_padding(self, pad_width):
15 | """
16 | Apply reflective padding to the grid.
17 |
18 | :param pad_width: Width of the padding
19 | :return: Padded grid
20 | """
21 | self.padded_grid = np.pad(self.grid, pad_width, mode="reflect")
22 | return self.padded_grid
23 |
24 | def nan_convolution(self, kernel, mode="reflect"):
25 | """
26 | Perform convolution while handling NaN values.
27 |
28 | :param kernel: Convolution kernel
29 | :param mode: Padding mode (default is 'reflect')
30 | :return: Convolved grid with NaN handling
31 | """
32 | from scipy.ndimage import convolve
33 |
34 | # Create a mask for non-NaN values
35 | valid_mask = ~np.isnan(self.grid)
36 |
37 | # Replace NaNs with 0 for convolution
38 | grid_filled = np.nan_to_num(self.grid, nan=0.0)
39 |
40 | # For scipy.ndimage.convolve, we don't need to manually flip the kernel
41 | # Instead, we just need to ensure the kernel is properly centered
42 |
43 | # Convolve the filled grid and the valid mask - using default origin=0
44 | # This will center the kernel properly
45 | convolved_values = convolve(grid_filled, kernel, mode=mode)
46 | valid_counts = convolve(valid_mask.astype(float), kernel, mode=mode)
47 |
48 | # Avoid division by zero
49 | valid_counts[valid_counts == 0] = np.nan
50 |
51 | # Calculate the mean of valid values
52 | return convolved_values / valid_counts
53 |
54 | def mean_filter(self, n):
55 | """
56 | Apply mean filter while handling NaN values.
57 |
58 | :param n: Size of the kernel (n x n)
59 | :return: Filtered grid
60 | """
61 | kernel = np.ones((n, n), dtype=float)
62 | return self.nan_convolution(kernel)
63 |
64 | def median_filter(self, n):
65 | """
66 | Apply median filter while handling NaN values.
67 |
68 | :param n: Size of the kernel (n x n)
69 | :return: Filtered grid
70 | """
71 |
72 | # Use a sliding window approach with NaN handling
73 | def nanmedian(values):
74 | return np.nan if np.isnan(values).all() else np.nanmedian(values)
75 |
76 | output = np.zeros_like(self.grid, dtype=float)
77 | pad_width = n // 2
78 | padded_grid = np.pad(
79 | self.grid, pad_width, mode="constant", constant_values=np.nan
80 | )
81 |
82 | for i in range(output.shape[0]):
83 | for j in range(output.shape[1]):
84 | window = padded_grid[i : i + n, j : j + n]
85 | output[i, j] = nanmedian(window)
86 |
87 | return output
88 |
89 | def gaussian_filter(self, sigma):
90 | """
91 | Apply Gaussian filter while handling NaN values.
92 |
93 | :param sigma: Standard deviation for Gaussian kernel
94 | :return: Filtered grid
95 | """
96 | # Create a Gaussian kernel
97 | size = int(2 * np.ceil(2 * sigma) + 1)
98 |
99 | # Ensure size is odd (required for proper centering)
100 | if size % 2 == 0:
101 | size += 1
102 |
103 | # Use meshgrid to create properly centered coordinates
104 | half_size = size // 2
105 | x = np.arange(-half_size, half_size + 1)
106 | y = np.arange(-half_size, half_size + 1)
107 | X, Y = np.meshgrid(x, y)
108 |
109 | # Create 2D Gaussian kernel directly
110 | gaussian_kernel = np.exp(-(X**2 + Y**2) / (2 * sigma**2))
111 | gaussian_kernel /= gaussian_kernel.sum()
112 |
113 | # Let's check if the kernel shape is odd in both dimensions
114 | if gaussian_kernel.shape[0] % 2 == 0 or gaussian_kernel.shape[1] % 2 == 0:
115 | raise ValueError("Kernel dimensions must be odd for proper centering")
116 |
117 | return self.nan_convolution(gaussian_kernel)
118 |
119 | def directional_filter(self, direction, n=3):
120 | """
121 | Apply directional filter (NE, N, NW, W, SW, S, SE, E).
122 |
123 | :param direction: Direction of the filter ('NE', 'N', 'NW', 'W', 'SW', 'S', 'SE', 'E')
124 | :param n: Size of the kernel (n x n, default is 3x3)
125 | :return: Filtered grid
126 | """
127 | from scipy.ndimage import convolve
128 |
129 | direction_kernels = {
130 | "N": np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]),
131 | "S": np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]),
132 | "E": np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]),
133 | "W": np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]]),
134 | "NE": np.array([[0, -1, -1], [1, 0, -1], [1, 1, 0]]),
135 | "NW": np.array([[-1, -1, 0], [-1, 0, 1], [0, 1, 1]]),
136 | "SE": np.array([[0, 1, 1], [-1, 0, 1], [-1, -1, 0]]),
137 | "SW": np.array([[1, 1, 0], [1, 0, -1], [0, -1, -1]]),
138 | }
139 |
140 | if direction not in direction_kernels:
141 | raise ValueError(
142 | f"Invalid direction '{direction}'. Must be one of {list(direction_kernels.keys())}."
143 | )
144 |
145 | kernel = direction_kernels[direction]
146 | kernel_size = kernel.shape[0]
147 |
148 | if kernel_size != n:
149 | kernel = np.pad(
150 | kernel,
151 | ((n - kernel_size) // 2, (n - kernel_size) // 2),
152 | mode="constant",
153 | )
154 |
155 | return convolve(self.grid, kernel, mode="reflect")
156 |
157 | def sun_shading_filter(self, elevation, sun_alt=45.0, sun_az=315.0, resolution=1.0):
158 | """
159 | Compute relief shading for a digital elevation model.
160 |
161 | Parameters:
162 | elevation (numpy.ndarray): 2D array of elevation data (DEM).
163 | sun_alt (float): Sun altitude in degrees (default is 45.0).
164 | sun_az (float): Sun azimuth in degrees clockwise from north (default is 315.0).
165 | resolution (float): Resolution of the grid (default is 1.0).
166 |
167 | Returns:
168 | numpy.ndarray: 2D array of relief shading values.
169 | """
170 |
171 | # sometimes doesn't like 90 so offset a bit
172 | if sun_alt == 90.0:
173 | sun_alt = 88.0
174 |
175 | # Convert sun altitude and azimuth to radians
176 | sun_alt_rad = np.radians(sun_alt)
177 | sun_az_rad = np.radians(sun_az)
178 |
179 | # Compute light source vector
180 | sun_vec = np.array(
181 | [
182 | np.cos(sun_alt_rad) * np.sin(sun_az_rad), # x component
183 | np.cos(sun_alt_rad) * np.cos(sun_az_rad), # y component
184 | np.sin(sun_alt_rad), # z component
185 | ]
186 | )
187 |
188 | # Calculate gradients using finite differences
189 | dzdx = (np.roll(elevation, -1, axis=1) - np.roll(elevation, 1, axis=1)) / (
190 | 2 * resolution
191 | )
192 | dzdy = (np.roll(elevation, -1, axis=0) - np.roll(elevation, 1, axis=0)) / (
193 | 2 * resolution
194 | )
195 |
196 | # Compute normal vectors
197 | norm_x = -dzdx
198 | norm_y = -dzdy
199 | norm_z = 1.0
200 |
201 | # Normalize the normal vectors
202 | norm_length = np.sqrt(norm_x**2 + norm_y**2 + norm_z**2)
203 | norm_x /= norm_length
204 | norm_y /= norm_length
205 | norm_z /= norm_length
206 |
207 | # Dot product with sun vector
208 | shading = norm_x * sun_vec[0] + norm_y * sun_vec[1] + norm_z * sun_vec[2]
209 |
210 | # Clamp shading values to range [0, 1]
211 | # shading = np.clip(shading, 0, 1)
212 |
213 | return shading
214 |
215 | def sun_shading_filter_grass(
216 | self,
217 | elevation,
218 | resolution_ns,
219 | resolution_ew,
220 | altitude=30.0,
221 | azimuth=270.0,
222 | scale=1.0,
223 | zscale=1.0,
224 | ):
225 | """
226 | Vectorized implementation of the GRASS r.relief algorithm for shaded relief.
227 | Much faster than the original version with identical results.
228 |
229 | Parameters:
230 | -----------
231 | elevation : np.ndarray
232 | Input elevation data (2D array)
233 | resolution_ns : float
234 | North-south resolution (vertical) in same units as elevation
235 | resolution_ew : float
236 | East-west resolution (horizontal) in same units as elevation
237 | altitude : float
238 | Sun altitude in degrees above horizon (default: 30)
239 | azimuth : float
240 | Sun azimuth in degrees east of north (default: 270)
241 | scale : float
242 | Scale factor for converting meters to elevation units (default: 1.0)
243 | zscale : float
244 | Factor for exaggerating relief (default: 1.0)
245 |
246 | Returns:
247 | --------
248 | np.ndarray
249 | Shaded relief array with values 0-255
250 | """
251 | from scipy import ndimage
252 |
253 | # Convert angles to radians
254 | degrees_to_radians = np.pi / 180.0
255 | altitude_rad = altitude * degrees_to_radians
256 | # Correct azimuth to East (GRASS convention)
257 | azimuth_rad = (azimuth + 90.0) * degrees_to_radians
258 |
259 | # Calculate distances (following GRASS implementation)
260 | H = resolution_ew * 4 * scale / zscale # horizontal run for gradient
261 | V = resolution_ns * 4 * scale / zscale # vertical run for gradient
262 |
263 | # Pad the elevation array to handle edges
264 | elev_padded = np.pad(elevation, pad_width=1, mode="edge")
265 |
266 | # Create convolution kernels for gradient calculation (matching GRASS implementation)
267 | dx_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) / H
268 | dy_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) / V
269 |
270 | # Calculate gradients using convolution
271 | dx = ndimage.convolve(elev_padded, dx_kernel)[1:-1, 1:-1]
272 | dy = ndimage.convolve(elev_padded, dy_kernel)[1:-1, 1:-1]
273 |
274 | # Calculate slope
275 | slope = np.pi / 2.0 - np.arctan(np.sqrt(dx**2 + dy**2))
276 |
277 | # Calculate aspect (GRASS implementation)
278 | aspect = np.arctan2(dy, dx)
279 |
280 | # Handle special cases for aspect
281 | mask_zero = (dx == 0) & (dy == 0)
282 | aspect[mask_zero] = degrees_to_radians
283 |
284 | # Calculate shaded relief
285 | cang = np.sin(altitude_rad) * np.sin(slope) + np.cos(altitude_rad) * np.cos(
286 | slope
287 | ) * np.cos(azimuth_rad - aspect)
288 |
289 | # Scale to 0-255 range
290 | output = 255.0 * cang
291 |
292 | # Handle NaN values in the input
293 | if np.any(np.isnan(elevation)):
294 | # Create a mask for 3x3 windows containing NaN values
295 | nan_mask = np.isnan(elev_padded)
296 | # Use maximum filter to expand the mask by one pixel in all directions
297 | expanded_nan_mask = ndimage.maximum_filter(nan_mask, size=3)
298 | # Apply to our output, removing the padding
299 | output[expanded_nan_mask[1:-1, 1:-1]] = np.nan
300 |
301 | return output
302 |
--------------------------------------------------------------------------------
/calcs/euler/estimates_statistics.py:
--------------------------------------------------------------------------------
1 | """
2 | Enhanced Statistics Module
3 |
4 | A Python program to compute statistics of Euler deconvolution estimates
5 | both globally and within grid windows.
6 |
7 | This enhanced version includes:
8 | 1. Original classic function for global statistics
9 | 2. New window_stats function for grid-based local statistics
10 |
11 | authors: Felipe F. Melo and Valeria C.F. Barbosa, 2019 (original)
12 | Enhanced with window-based analysis
13 | """
14 |
15 | import numpy as np
16 | import os
17 |
18 |
19 | def classic(est_classic, area_plt, SI_vet, name, path):
20 | """
21 | Original function - computes global statistics for the entire grid
22 | """
23 | results = []
24 |
25 | for i in range(len(est_classic)):
26 |
27 | estimates = np.stack(
28 | (
29 | est_classic[i][:, 0],
30 | est_classic[i][:, 1],
31 | est_classic[i][:, 2],
32 | est_classic[i][:, 3],
33 | ),
34 | axis=-1,
35 | )
36 |
37 | masked = np.ma.array(
38 | estimates,
39 | mask=np.repeat(estimates[:, 0] <= area_plt[0], estimates.shape[1]),
40 | )
41 | masked = np.ma.array(
42 | masked, mask=np.repeat(masked[:, 0] >= area_plt[1], estimates.shape[1])
43 | )
44 | masked = np.ma.array(
45 | masked, mask=np.repeat(masked[:, 1] <= area_plt[2], estimates.shape[1])
46 | )
47 | masked = np.ma.array(
48 | masked, mask=np.repeat(masked[:, 1] >= area_plt[3], estimates.shape[1])
49 | )
50 |
51 | meanx = np.mean(masked[:, 0] / 1000.0)
52 | meany = np.mean(masked[:, 1] / 1000.0)
53 | meanz = np.mean(masked[:, 2] / 1000.0)
54 | results.append([SI_vet[i], meanx, meany, meanz])
55 |
56 | output = np.array([(results[i]) for i in range(0, len(SI_vet))])
57 | np.savetxt(
58 | path + "/" + str(name) + ".txt",
59 | output,
60 | fmt="%.3f",
61 | header="SI, mean x, mean y, mean z",
62 | comments="",
63 | )
64 | return
65 |
66 |
67 | def window_stats(
68 | est_classic,
69 | area_plt,
70 | SI_vet,
71 | name,
72 | path,
73 | data_shape,
74 | window_size,
75 | detailed_stats=True,
76 | ):
77 | """
78 | Compute statistics for each window across the grid
79 |
80 | Parameters:
81 | * est_classic: list of arrays with estimates for each SI
82 | * area_plt: [south, north, west, east] - area bounds
83 | * SI_vet: list of structural indices
84 | * name: base name for output files
85 | * path: output directory path
86 | * data_shape: (rows, cols) - shape of original grid
87 | * window_size: size of analysis windows
88 | * detailed_stats: if True, saves detailed statistics; if False, saves only means
89 |
90 | Returns:
91 | * Dictionary with statistics for each SI
92 | """
93 |
94 | # Calculate grid parameters
95 | rows, cols = data_shape
96 | south, north, west, east = area_plt
97 |
98 | # Calculate coordinate ranges
99 | x_range = east - west
100 | y_range = north - south
101 |
102 | # Calculate number of windows that fit in each direction
103 | n_windows_x = cols // window_size
104 | n_windows_y = rows // window_size
105 |
106 | # Calculate actual window size in coordinate units
107 | window_size_x = x_range / n_windows_x
108 | window_size_y = y_range / n_windows_y
109 |
110 | print(f"Grid analysis: {n_windows_y} x {n_windows_x} windows")
111 | print(f"Window size: {window_size_x:.2f} x {window_size_y:.2f} coordinate units")
112 |
113 | # Process each SI
114 | all_results = {}
115 |
116 | for si_idx, SI in enumerate(SI_vet):
117 | estimates = est_classic[si_idx]
118 |
119 | # Create arrays to store window statistics
120 | window_results = []
121 |
122 | # Process each window
123 | for row in range(n_windows_y):
124 | for col in range(n_windows_x):
125 | # Calculate window bounds
126 | win_west = west + col * window_size_x
127 | win_east = west + (col + 1) * window_size_x
128 | win_south = south + row * window_size_y
129 | win_north = south + (row + 1) * window_size_y
130 |
131 | # Window center coordinates
132 | win_center_x = (win_west + win_east) / 2.0
133 | win_center_y = (win_south + win_north) / 2.0
134 |
135 | # Filter estimates within this window
136 | in_window = (
137 | (estimates[:, 1] >= win_west)
138 | & (estimates[:, 1] < win_east)
139 | & (estimates[:, 0] >= win_south)
140 | & (estimates[:, 0] < win_north)
141 | )
142 |
143 | window_estimates = estimates[in_window]
144 | n_points = len(window_estimates)
145 |
146 | if n_points > 0:
147 | # Only save windows with estimates
148 | # Calculate statistics
149 | mean_x = np.mean(window_estimates[:, 0])
150 | mean_y = np.mean(window_estimates[:, 1])
151 | mean_z = np.mean(window_estimates[:, 2])
152 | mean_base = np.mean(window_estimates[:, 3])
153 |
154 | if detailed_stats:
155 | # Always calculate all stats, use 0 for single points
156 | if n_points > 1:
157 | std_x = np.std(window_estimates[:, 0])
158 | std_y = np.std(window_estimates[:, 1])
159 | std_z = np.std(window_estimates[:, 2])
160 | std_base = np.std(window_estimates[:, 3])
161 | min_z = np.min(window_estimates[:, 2])
162 | max_z = np.max(window_estimates[:, 2])
163 | else:
164 | # Single point - no standard deviation possible
165 | std_x = 0.0
166 | std_y = 0.0
167 | std_z = 0.0
168 | std_base = 0.0
169 | min_z = mean_z
170 | max_z = mean_z
171 |
172 | window_results.append(
173 | [
174 | row,
175 | col,
176 | win_center_x,
177 | win_center_y,
178 | n_points,
179 | mean_x,
180 | mean_y,
181 | mean_z / 1000.0,
182 | mean_base,
183 | std_x,
184 | std_y,
185 | std_z / 1000.0,
186 | std_base,
187 | min_z / 1000.0,
188 | max_z / 1000.0,
189 | ]
190 | )
191 | else:
192 | window_results.append(
193 | [
194 | row,
195 | col,
196 | win_center_x,
197 | win_center_y,
198 | n_points,
199 | mean_x,
200 | mean_y,
201 | mean_z / 1000.0,
202 | mean_base,
203 | ]
204 | )
205 | # Skip empty windows - don't append anything
206 |
207 | # Convert to numpy array
208 | window_results = np.array(window_results)
209 | all_results[SI] = window_results
210 |
211 | # Save results for this SI
212 | if detailed_stats:
213 | header = (
214 | "window_row, window_col, center_x, center_y, n_estimates, "
215 | "mean_y, mean_x, mean_depth_km, mean_base_level, "
216 | "std_x, std_y, std_depth_km, std_base_level, "
217 | "min_depth_km, max_depth_km"
218 | )
219 | else:
220 | header = (
221 | "window_row, window_col, center_x, center_y, n_estimates, "
222 | "mean_y, mean_x, mean_depth_km, mean_base_level"
223 | )
224 | SI_name = SI
225 | if SI_name == 0.001:
226 | SI_name = 0
227 |
228 | output_filename = f"{path}/{name}_window_stats_SI_{SI_name}.txt"
229 | np.savetxt(
230 | output_filename,
231 | window_results,
232 | fmt="%.6f",
233 | header=header,
234 | comments="",
235 | delimiter=",",
236 | )
237 |
238 | print(f"Saved window statistics for SI={SI} to {output_filename}")
239 |
240 | # Create summary file with statistics across all windows
241 | summary_results = []
242 | for si_idx, SI in enumerate(SI_vet):
243 | window_data = all_results[SI]
244 | valid_windows = window_data[window_data[:, 4] > 0] # Windows with estimates
245 |
246 | if len(valid_windows) > 0:
247 | total_estimates = np.sum(valid_windows[:, 4])
248 | mean_depth = np.nanmean(valid_windows[:, 7]) # mean depth across windows
249 | std_depth = np.nanstd(valid_windows[:, 7]) # std of window means
250 | n_windows_with_data = len(valid_windows)
251 |
252 | summary_results.append(
253 | [
254 | SI,
255 | n_windows_with_data,
256 | n_windows_x * n_windows_y,
257 | total_estimates,
258 | mean_depth,
259 | std_depth,
260 | ]
261 | )
262 | else:
263 | summary_results.append(
264 | [SI, 0, n_windows_x * n_windows_y, 0, np.nan, np.nan]
265 | )
266 |
267 | summary_output = np.array(summary_results)
268 | summary_filename = f"{path}/{name}_window_summary.txt"
269 | np.savetxt(
270 | summary_filename,
271 | summary_output,
272 | fmt="%.6f",
273 | header="SI, windows_with_data, total_windows, total_estimates, mean_depth_km, std_depth_km",
274 | comments="",
275 | delimiter=",",
276 | )
277 |
278 | print(f"Saved window summary to {summary_filename}")
279 | return all_results
280 |
281 |
282 | def enhanced_analysis(
283 | est_classic, area_plt, SI_vet, name, path, data_shape, window_size
284 | ):
285 | """
286 | Wrapper function that performs both global and window-based analysis
287 |
288 | Parameters:
289 | * est_classic: list of arrays with estimates for each SI
290 | * area_plt: [south, north, west, east] - area bounds
291 | * SI_vet: list of structural indices
292 | * name: base name for output files
293 | * path: output directory path
294 | * data_shape: (rows, cols) - shape of original grid
295 | * window_size: size of analysis windows
296 | """
297 |
298 | print("=== Enhanced Euler Deconvolution Analysis ===")
299 |
300 | # Run original global analysis
301 | print("\n1. Computing global statistics...")
302 | classic(est_classic, area_plt, SI_vet, name, path)
303 | print("Global statistics saved.")
304 |
305 | # Run new window-based analysis
306 | print("\n2. Computing window-based statistics...")
307 | window_results = window_stats(
308 | est_classic,
309 | area_plt,
310 | SI_vet,
311 | name,
312 | path,
313 | data_shape,
314 | window_size,
315 | detailed_stats=True,
316 | )
317 |
318 | print("\n=== Analysis Complete ===")
319 | print(f"Results saved to: {path}")
320 | print(f"Files created:")
321 | print(f" - {name}.txt (global statistics)")
322 | print(f" - {name}_window_stats_SI_*.txt (window statistics for each SI)")
323 | print(f" - {name}_window_summary.txt (summary across all windows)")
324 |
325 | return window_results
326 |
--------------------------------------------------------------------------------
/calcs/PCAICA.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from osgeo import gdal
3 | import os
4 |
5 |
6 | class PCAICA:
7 | def __init__(self, grid):
8 | """
9 | Initialize the class SpatialStats filter:
10 |
11 | :param grid: 2D numpy array representing the input grid
12 | """
13 | self.grid = np.array(grid, dtype=float)
14 |
15 | def pca_with_nans(self, input_raster_path, output_raster_path, n_components=None):
16 | """
17 | Perform PCA on a raster with NaN values and save as a multiband GeoTIFF
18 |
19 | Parameters:
20 | input_raster_path (str): Path to the input GeoTIFF
21 | output_raster_path (str): Path to save the output multiband GeoTIFF with PCA components
22 | n_components (int): Number of principal components to calculate (defaults to all possible)
23 |
24 | Returns:
25 | tuple: (components, explained_variance_ratio)
26 | """
27 | from sklearn.decomposition import PCA
28 | from sklearn.preprocessing import StandardScaler
29 |
30 | # Load raster using PyQGIS to access metadata
31 | """raster_layer = QgsRasterLayer(input_raster_path, "Input Raster")
32 | if not raster_layer.isValid():
33 | raise ValueError(f"Failed to load raster layer: {input_raster_path}")"""
34 |
35 | # Also open with GDAL for data access
36 | ds = gdal.Open(input_raster_path)
37 | if ds is None:
38 | raise ValueError(f"Failed to open raster with GDAL: {input_raster_path}")
39 |
40 | # Get raster dimensions
41 | width = ds.RasterXSize
42 | height = ds.RasterYSize
43 | bands = ds.RasterCount
44 | if bands == 1:
45 | print("Only one band found, PCA not possible")
46 | return None, None
47 |
48 | # If n_components is not specified, use all possible components
49 | if n_components is None or n_components == 0:
50 | n_components = bands
51 | elif n_components > bands:
52 | n_components = bands
53 | print(
54 | f"Warning: Requested {n_components} components but only {bands} bands available. Using {bands} components."
55 | )
56 |
57 | # Create a 3D numpy array to hold all band data
58 | data_array = np.zeros((bands, height, width))
59 |
60 | # Read all bands into the array
61 | for b in range(bands):
62 | band = ds.GetRasterBand(b + 1)
63 | data_array[b] = band.ReadAsArray()
64 |
65 | # Reshape for PCA (bands as features, pixels as samples)
66 | # Transpose from (bands, height, width) to (height*width, bands)
67 | X = data_array.reshape(bands, -1).T
68 |
69 | # Find valid pixels (no NaN in any band)
70 | valid_mask = ~np.isnan(X).any(axis=1)
71 | X_valid = X[valid_mask]
72 |
73 | # If there are no valid pixels, raise an error
74 | if X_valid.shape[0] == 0:
75 | raise ValueError(
76 | "No valid pixels (all pixels contain NaN values in at least one band)"
77 | )
78 |
79 | # Standardize the data (important for PCA)
80 | scaler = StandardScaler()
81 | X_scaled = scaler.fit_transform(X_valid)
82 |
83 | # Perform PCA
84 | pca = PCA(n_components=n_components)
85 | pca_result = pca.fit_transform(X_scaled)
86 |
87 | # Create output array with same shape as input but with n_components bands
88 | output_shape = (n_components, height, width)
89 | pca_full = np.full(output_shape, np.nan)
90 |
91 | # Map the valid pixels back to their original positions
92 | valid_indices = np.where(valid_mask)[0]
93 | for i in range(n_components):
94 | flat_band = np.full(height * width, np.nan)
95 | flat_band[valid_indices] = pca_result[:, i]
96 | pca_full[i] = flat_band.reshape(height, width)
97 |
98 | # Create output directory if it doesn't exist
99 | output_dir = os.path.dirname(output_raster_path)
100 | if output_dir and not os.path.exists(output_dir):
101 | os.makedirs(output_dir)
102 |
103 | # Create the output multiband GeoTIFF
104 | driver = gdal.GetDriverByName("GTiff")
105 | out_ds = driver.Create(
106 | output_raster_path,
107 | width,
108 | height,
109 | n_components, # Number of bands = number of components
110 | gdal.GDT_Float32,
111 | )
112 |
113 | # Copy projection and geotransform from input
114 | out_ds.SetProjection(ds.GetProjection())
115 | out_ds.SetGeoTransform(ds.GetGeoTransform())
116 |
117 | # Write each PCA component as a separate band
118 | for i in range(n_components):
119 | out_band = out_ds.GetRasterBand(i + 1)
120 | out_band.WriteArray(pca_full[i])
121 | out_band.SetNoDataValue(np.nan)
122 |
123 | # Set band description
124 | variance_pct = pca.explained_variance_ratio_[i] * 100
125 | out_band.SetDescription(f"PC{i+1} ({variance_pct:.2f}%)")
126 |
127 | # Write component metadata as dataset description
128 | metadata = {
129 | "EXPLAINED_VARIANCE": ",".join(
130 | [f"{v:.6f}" for v in pca.explained_variance_]
131 | ),
132 | "EXPLAINED_VARIANCE_RATIO": ",".join(
133 | [f"{v:.6f}" for v in pca.explained_variance_ratio_]
134 | ),
135 | "LOADINGS": ";".join(
136 | [
137 | ",".join([f"{v:.6f}" for v in component])
138 | for component in pca.components_
139 | ]
140 | ),
141 | }
142 |
143 | for key, value in metadata.items():
144 | out_ds.SetMetadataItem(key, value)
145 |
146 | # Close datasets
147 | out_ds = None
148 | ds = None
149 |
150 | # Print information about the PCA
151 | print("PCA Summary:")
152 | print(f"Input: {input_raster_path} ({bands} bands)")
153 | print(f"Output: {output_raster_path} ({n_components} components)")
154 | print("\nExplained variance ratio by component:")
155 | for i, var in enumerate(pca.explained_variance_ratio_):
156 | print(f"PC{i+1}: {var:.4f} ({var*100:.2f}%)")
157 | print(
158 | f"\nCumulative explained variance: {np.sum(pca.explained_variance_ratio_)*100:.2f}%"
159 | )
160 |
161 | return pca.components_, pca.explained_variance_ratio_
162 |
163 | def ica_with_nans(
164 | self, input_raster_path, output_raster_path, n_components=None, random_state=42
165 | ):
166 | """
167 | Perform ICA on a raster with NaN values and save as a multiband GeoTIFF
168 |
169 | Parameters:
170 | input_raster_path (str): Path to the input GeoTIFF
171 | output_raster_path (str): Path to save the output multiband GeoTIFF with ICA components
172 | n_components (int): Number of independent components to calculate (defaults to all possible)
173 | random_state (int): Random seed for reproducibility
174 |
175 | Returns:
176 | tuple: (mixing_matrix, unmixing_matrix)
177 | """
178 | from sklearn.decomposition import FastICA
179 | from sklearn.preprocessing import StandardScaler
180 |
181 | # Load raster using PyQGIS to access metadata
182 | """raster_layer = QgsRasterLayer(input_raster_path, "Input Raster")
183 | if not raster_layer.isValid():
184 | raise ValueError(f"Failed to load raster layer: {input_raster_path}")"""
185 |
186 | # Also open with GDAL for data access
187 | ds = gdal.Open(input_raster_path)
188 | if ds is None:
189 | raise ValueError(f"Failed to open raster with GDAL: {input_raster_path}")
190 |
191 | # Get raster dimensions
192 | width = ds.RasterXSize
193 | height = ds.RasterYSize
194 | bands = ds.RasterCount
195 | if bands == 1:
196 | print("Only one band found, PCA not possible")
197 | return None, None
198 |
199 | # If n_components is not specified, use all possible components
200 | if n_components is None or n_components == 0:
201 | n_components = bands
202 | elif n_components > bands:
203 | n_components = bands
204 | print(
205 | f"Warning: Requested {n_components} components but only {bands} bands available. Using {bands} components."
206 | )
207 |
208 | # Create a 3D numpy array to hold all band data
209 | data_array = np.zeros((bands, height, width))
210 |
211 | # Read all bands into the array
212 | for b in range(bands):
213 | band = ds.GetRasterBand(b + 1)
214 | data_array[b] = band.ReadAsArray()
215 |
216 | # Reshape for ICA (bands as features, pixels as samples)
217 | # Transpose from (bands, height, width) to (height*width, bands)
218 | X = data_array.reshape(bands, -1).T
219 |
220 | # Find valid pixels (no NaN in any band)
221 | valid_mask = ~np.isnan(X).any(axis=1)
222 | X_valid = X[valid_mask]
223 |
224 | # If there are no valid pixels, raise an error
225 | if X_valid.shape[0] == 0:
226 | raise ValueError(
227 | "No valid pixels (all pixels contain NaN values in at least one band)"
228 | )
229 |
230 | # Standardize the data (important for ICA)
231 | scaler = StandardScaler()
232 | X_scaled = scaler.fit_transform(X_valid)
233 |
234 | # Perform ICA
235 | ica = FastICA(
236 | n_components=n_components,
237 | random_state=random_state,
238 | max_iter=1000,
239 | tol=0.0001,
240 | )
241 | ica_result = ica.fit_transform(X_scaled)
242 |
243 | # Create output array with same shape as input but with n_components bands
244 | output_shape = (n_components, height, width)
245 | ica_full = np.full(output_shape, np.nan)
246 |
247 | # Map the valid pixels back to their original positions
248 | valid_indices = np.where(valid_mask)[0]
249 | for i in range(n_components):
250 | flat_band = np.full(height * width, np.nan)
251 | flat_band[valid_indices] = ica_result[:, i]
252 | ica_full[i] = flat_band.reshape(height, width)
253 |
254 | # Create output directory if it doesn't exist
255 | output_dir = os.path.dirname(output_raster_path)
256 | if output_dir and not os.path.exists(output_dir):
257 | os.makedirs(output_dir)
258 |
259 | # Create the output multiband GeoTIFF
260 | driver = gdal.GetDriverByName("GTiff")
261 | out_ds = driver.Create(
262 | output_raster_path,
263 | width,
264 | height,
265 | n_components, # Number of bands = number of components
266 | gdal.GDT_Float32,
267 | )
268 |
269 | # Copy projection and geotransform from input
270 | out_ds.SetProjection(ds.GetProjection())
271 | out_ds.SetGeoTransform(ds.GetGeoTransform())
272 |
273 | # Write each ICA component as a separate band
274 | for i in range(n_components):
275 | out_band = out_ds.GetRasterBand(i + 1)
276 | out_band.WriteArray(ica_full[i])
277 | out_band.SetNoDataValue(np.nan)
278 |
279 | # Set band description
280 | out_band.SetDescription(f"IC{i+1}")
281 |
282 | # Write component metadata as dataset description
283 | metadata = {
284 | "MIXING_MATRIX": ";".join(
285 | [",".join([f"{v:.6f}" for v in row]) for row in ica.mixing_]
286 | ),
287 | "UNMIXING_MATRIX": ";".join(
288 | [",".join([f"{v:.6f}" for v in row]) for row in ica.components_]
289 | ),
290 | }
291 |
292 | for key, value in metadata.items():
293 | out_ds.SetMetadataItem(key, value)
294 |
295 | # Close datasets
296 | out_ds = None
297 | ds = None
298 |
299 | # Print information about the ICA
300 | print("ICA Summary:")
301 | print(f"Input: {input_raster_path} ({bands} bands)")
302 | print(f"Output: {output_raster_path} ({n_components} components)")
303 | print("\nIndependent components have been extracted.")
304 | print("Note: Unlike PCA, ICA components are not ordered by importance.")
305 |
306 | return ica.mixing_, ica.components_
307 |
--------------------------------------------------------------------------------
/calcs/worms/FftUtils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 | """
4 | @file fft.py
5 | @brief Utilities for Fourier transforms
6 |
7 | @package libtim.fft
8 | @brief Utilities for Fourier transforms
9 | @author Tim van Werkhoven (werkhoven@strw.leidenuniv.nl)
10 | @copyright Creative Commons Attribution-Share Alike license versions 3.0 or higher, see http://creativecommons.org/licenses/by-sa/3.0/
11 | @date 20120403
12 |
13 | Package for some utilities for Fourier transforms
14 |
15 | Modified by Frank Horowitz from the package libtim found at https://github.com/tvwerkhoven/libtim-py.git
16 | """
17 | """from __future__ import division
18 | from future.builtins import zip"""
19 |
20 | # ============================================================================
21 | # Import libraries here
22 | # ============================================================================
23 |
24 | import numpy as np
25 | from collections.abc import Iterable
26 | from math import ceil, floor
27 |
28 | # ============================================================================
29 | # Defines
30 | # ============================================================================
31 |
32 | # ============================================================================
33 | # Routines
34 | # ============================================================================
35 |
36 |
37 | def mk_apod_mask(
38 | masksz,
39 | apodpos=None,
40 | apodsz=None,
41 | shape="rect",
42 | wsize=-0.3,
43 | apod_f=lambda x: 0.5 * (1.0 - np.cos(np.pi * x)),
44 | ):
45 | """
46 | Generate apodisation mask with custom size, shape, edge.
47 |
48 | The output array mask will be **masksz**, while the actual apodisation masked will **apodsz** big. The position of the mask is given with **apodpos**.
49 |
50 | **apodpos** defaults to the center, **apodsz** defaults to **masksz**
51 |
52 | **apodpos**, **apodsz** and **wsize** can either be given as fraction (if < 0) or as absolute number of pixels (if > 0). If these are given in int or float, the result will be square, if these are tuples, the size can be different in both dimensions.
53 |
54 | If **apodpos** or **apodsz** are fractional, they are relative to **masksz**. Fractional **wsize** is relative to **apodsz**.
55 |
56 | **apod_f** is the windowing function used. It can be a string (see list below), or a lambda function. In the latter case it should take one float coordinate between 1 and 0 as input and return the value of the window at that position.
57 |
58 | Some apodisation functions (for **apod_f**):
59 | - 'Hann': lambda x: 0.5 * (1.0 - np.cos(np.pi*x))
60 | - 'Hamming': lambda x: 0.54 - 0.46 *np.cos(np.pi*x)
61 | - '(Co)sine' window: lambda x: np.sin(np.pi*x*0.5)
62 | - 'Lanczos': lambda x: np.sinc(x-1.0)
63 |
64 | @param [in] masksz Size of the output array containing the apodisation mask
65 | @param [in] apodpos Position of the apodisation mask
66 | @param [in] apodsz Size of the apodisation mask
67 | @param [in] shape Apodisation mask shape, 'rect' or 'circular'
68 | @param [in] wsize Size of the apodisation window, i.e. the transition region to go from 0 to 1.
69 | @param [in] apod_f Apodisation function to use. Can be lambda function
70 | """
71 |
72 | # Check apodpos and apodsz, if not set, use defaults
73 | if apodpos == None:
74 | apodpos = tuple((np.r_[masksz] - 1.0) / 2.0)
75 | if apodsz == None:
76 | apodsz = masksz
77 |
78 | apod_func = lambda x: x
79 | if hasattr(apod_f, "__call__"):
80 | apod_func = apod_f
81 | else:
82 | try:
83 | apod_f = apod_f.lower()
84 | if apod_f[:4] == "hann":
85 | apod_func = lambda x: 0.5 * (1.0 - np.cos(np.pi * x))
86 | elif apod_f[:4] == "hamm":
87 | apod_func = lambda x: 0.54 - 0.46 * np.cos(np.pi * x)
88 | elif apod_f[:3] == "cos" or apod_f[:3] == "sin":
89 | apod_func = lambda x: np.sin(np.pi * x * 0.5)
90 | elif apod_f[:4] == "lanc":
91 | apod_func = lambda x: np.sinc(x - 1.0)
92 | else:
93 | raise ValueError(" not supported!")
94 | except ValueError:
95 | raise
96 | except:
97 | raise ValueError(" should be a string or callable!")
98 |
99 | # Mask size should be iterable (like a list or tuple)
100 | if not isinstance(masksz, Iterable):
101 | raise TypeError(" should be iterable")
102 | if min(masksz) < 1:
103 | raise ValueError("All mask size dimensions should be >= 1")
104 |
105 | # Only the first 4 letters are significant.
106 | try:
107 | shape = shape[:4]
108 | except:
109 | raise ValueError(" should be a string!")
110 |
111 | # Check if shape is legal
112 | if shape not in ("rect", "circ"):
113 | raise ValueError(" should be 'rectangle' or 'circle'")
114 |
115 | # Check if apodpos, apodsz and wsize are legal. They should either be a
116 | # scalar (i.e. non-iterable) or the same length as (which is iterable). Also, if apodpos, apodsz or wsize are just one int or float, repeat them for each dimension.
117 | if isinstance(apodpos, Iterable) and len(apodpos) != len(masksz):
118 | raise TypeError(
119 | " should be either 1 element per dimension or 1 in total."
120 | )
121 | elif not isinstance(apodpos, Iterable):
122 | apodpos = (apodpos,) * len(masksz)
123 |
124 | if isinstance(apodsz, Iterable) and len(apodsz) != len(masksz):
125 | raise TypeError(
126 | " should be either 1 element per dimension or 1 in total."
127 | )
128 | elif not isinstance(apodsz, Iterable):
129 | apodsz = (apodsz,) * len(masksz)
130 |
131 | if isinstance(wsize, Iterable) and len(wsize) != len(masksz):
132 | raise TypeError(
133 | " should be either 1 element per dimension or 1 in total."
134 | )
135 | elif not isinstance(wsize, Iterable):
136 | wsize = (wsize,) * len(masksz)
137 |
138 | # If apodsz or wsize are fractional, calculate the absolute size.
139 | if min(apodpos) < 0:
140 | apodpos *= -np.r_[masksz]
141 | if min(apodsz) < 0:
142 | apodsz *= -np.r_[masksz]
143 | if min(wsize) < 0:
144 | wsize *= -np.r_[apodsz]
145 |
146 | # Generate base mask, which are (x,y) coordinates around the center
147 | mask = np.indices(masksz, dtype=float)
148 |
149 | # Center the mask around for any number of dimensions
150 | for maski, posi in zip(mask, apodpos):
151 | maski -= posi
152 |
153 | # If the mask shape is circular, calculate the radial distance from
154 | #
155 | if shape == "circ":
156 | mask = np.array([np.sum(mask**2.0, 0) ** 0.5])
157 |
158 | # Scale the pixels such that there is only a band going from 1 to 0 between - and
159 | for maski, szi, wszi in zip(mask, apodsz, wsize):
160 | # First take the negative absolute value of the mask, such that 0 is at the origin and the value goes down outward from where the mask should be.
161 | maski[:] = -np.abs(maski)
162 | # Next, add the radius of the apodisation mask size to the values, such that the outside edge of the requested mask is exactly zero.
163 | # TODO Should this be (szi-1)/2 or (szi)/2?
164 | maski += (szi) / 2.0
165 | # Now divide the mask by the windowing area inside the apod. mask, such that the inner edge of the mask is 1.
166 | if wszi != 0:
167 | maski /= wszi / 2.0
168 | else:
169 | maski /= 0.001
170 | # Store masks for inside and outside the mask area
171 | inmask = maski > 1
172 | outmask = maski <= 0
173 | # Apply function to all data
174 | maski[:] = apod_func(maski[:])
175 | # Now everything higher than 1 is inside the mask, and smaller than 0 is outside the mask. Clip these values to (0,1)
176 | maski[inmask] = 1
177 | maski[outmask] = 0
178 |
179 | # Apply apodisation function to all elements, and multiply
180 | if shape == "rect":
181 | return np.prod(mask, 0)
182 | elif shape == "circ":
183 | return mask[0]
184 |
185 |
186 | def descramble(data, direction=1):
187 | """
188 | (de)scramble **data**, usually used for Fourier transform.
189 |
190 | 'Scrambling' data means to swap around quadrant 1 with 3 and 2 with 4 in
191 | a data matrix. The effect is that the zero frequency is no longer at
192 | **data[0,0]** but in the middle of the matrix
193 |
194 | @param [in] data Data to (de)scramble
195 | @param [in] direction 1: scramble, -1: descramble
196 | @return (de)scrambled data
197 | """
198 |
199 | if direction == 1:
200 | return np.fft.fftshift(data)
201 | else:
202 | return np.fft.ifftshift(data)
203 |
204 |
205 | def embed_data(indata, big_shape=(None, None), pad_size=None):
206 | """
207 | Embed **indata** in a zero-filled rectangular array of shape big_shape.
208 |
209 | For apodization purposes, a padding of size pad_size will surround the embedded image.
210 |
211 | To prevent wrapping artifacts in Fourier analysis, this function can embed data in a zero-filled rectangular array of a larger size.
212 |
213 | @param [in] indata Data to embed
214 | @param [in] big_shape Size of embedding matrix
215 | @param [in] pad_size width of apodization pad, which will be filled with indata edge values
216 | @return embedded data, and x/y-direction slicing objects
217 | """
218 | if big_shape == (None, None):
219 | raise ValueError("Need to specify size of returned array!")
220 | if big_shape[0] < indata.shape[0]:
221 | raise ValueError("Returned Y direction must be at least as big as indata's.")
222 | if big_shape[1] < indata.shape[1]:
223 | raise ValueError("Returned X direction must be at least as big as indata's.")
224 |
225 | s = big_shape
226 |
227 | # Generate empty array
228 | retdat = np.zeros(s, dtype=indata.dtype)
229 |
230 | inmean = np.mean(indata)
231 |
232 | # These slices denote the central region where will go
233 | x_pad_2 = s[1] - indata.shape[1]
234 | x_pad = floor(x_pad_2 / 2.0)
235 | slice_x = slice(x_pad, x_pad + indata.shape[1], 1)
236 | if pad_size != None:
237 | pad_slice_x = slice(slice_x.start - pad_size, slice_x.stop + pad_size)
238 |
239 | y_pad_2 = s[0] - indata.shape[0]
240 | y_pad = floor(y_pad_2 / 2.0)
241 | slice_y = slice(y_pad, y_pad + indata.shape[0], 1)
242 | if pad_size != None:
243 | pad_slice_y = slice(slice_y.start - pad_size, slice_y.stop + pad_size)
244 |
245 | #
246 | # Insert the data and return it
247 | if pad_size == None:
248 | retdat[slice_y, slice_x] = indata - inmean
249 | else:
250 | retdat[pad_slice_y, pad_slice_x] = np.pad(
251 | indata - inmean, pad_width=pad_size, mode="edge"
252 | )
253 | return retdat, slice_y, slice_x
254 |
255 |
256 | def embed_data_old(indata, direction=1, scale=2):
257 | """
258 | Embed **indata** in a zero-filled rectangular array **scale** times as big as **indata**.
259 |
260 | To prevent wrapping artifacts in Fourier analysis, this function can embed data in a zero-filled rectangular array of twice the size.
261 |
262 | If **direction** = 1, **indata** will be embedded, if **direction** = -1, it will be dis-embedded.
263 |
264 | @param [in] indata Data to embed
265 | @param [in] direction 1: embed, -1: dis-embed
266 | @param [in] scale Size of embedding matrix wrt **indata**
267 | @return (dis)-embedded data, either scale*indata.shape or 1/scale*indata.shape
268 | """
269 |
270 | s = np.r_[indata.shape]
271 |
272 | if direction == 1:
273 | # Generate empty array
274 | retdat = np.zeros(np.r_[s] * scale, dtype=indata.dtype)
275 | # These slices denote the central region where will go
276 | slice0 = slice(
277 | retdat.shape[0] / 2 - floor(s[0] / 2.0),
278 | retdat.shape[0] / 2 + ceil(s[0] / 2.0),
279 | )
280 | slice1 = slice(
281 | retdat.shape[1] / 2 - floor(s[1] / 2.0),
282 | retdat.shape[1] / 2 + ceil(s[1] / 2.0),
283 | )
284 |
285 | #
286 | # Insert the data and return it
287 | retdat[slice0, slice1] = indata
288 | return retdat, slice0, slice1
289 | else:
290 | # These slices give the central area of the data
291 | slice0 = slice(
292 | s[0] / 2 - floor(s[0] / (2.0 * scale)),
293 | s[0] / 2 + ceil(s[0] / (2.0 * scale)),
294 | )
295 | slice1 = slice(
296 | s[1] / 2 - floor(s[1] / (2.0 * scale)),
297 | s[1] / 2 + ceil(s[1] / (2.0 * scale)),
298 | )
299 |
300 | # Slice out the center and return it
301 | return indata[slice0, slice1]
302 |
--------------------------------------------------------------------------------