├── pytomo3d ├── __init__.py ├── utils │ ├── __init__.py │ ├── io.py │ └── download.py ├── window │ ├── tests │ │ ├── __init__.py │ │ ├── user_module_example.py │ │ ├── test_window_io.py │ │ ├── test_window_weights.py │ │ └── test_window.py │ ├── __init__.py │ ├── io.py │ └── utils.py ├── station │ ├── __init__.py │ ├── tests │ │ ├── test_station.py │ │ └── test_generate_adjoint_stations.py │ ├── extract_staxml_info.py │ ├── utils.py │ └── generate_adjoint_stations.py ├── signal │ ├── __init__.py │ ├── tests │ │ ├── test_compare_trace.py │ │ ├── test_rotate_utils.py │ │ └── test_process.py │ ├── compare_trace.py │ └── rotate_utils.py ├── source │ ├── __init__.py │ ├── tests │ │ └── test_append_cmtsolution.py │ ├── source_weights.py │ └── append_cmtsolution.py └── adjoint │ ├── __init__.py │ ├── tests │ ├── test_utils.py │ ├── test_io.py │ └── test_adjoint_source.py │ ├── io.py │ ├── plot_util.py │ ├── utils.py │ ├── sum_adjoint.py │ └── adjoint_source.py ├── scripts └── window_merge_tool │ ├── example_run │ ├── merge_winfile.py │ └── example_run.bash │ ├── generate_json_files │ └── generate_window_merge_parfile.py │ ├── util.py │ └── merge_winfile.py ├── docs ├── source │ ├── modules.rst │ ├── pytomo3d.rst │ ├── install.rst │ ├── index.rst │ ├── intro.rst │ ├── pytomo3d.signal.rst │ ├── pytomo3d.adjoint.rst │ ├── pytomo3d.window.rst │ └── tutorial.rst └── Makefile ├── tests ├── data │ ├── raw │ │ ├── BW.RJOB.obs.mseed │ │ ├── IU.KBL.obs.mseed │ │ └── IU.KBL.syn.mseed │ ├── proc │ │ ├── IU.KBL.obs.proc.mseed │ │ └── IU.KBL.syn.proc.mseed │ ├── adjoint │ │ ├── waveform.adjoint.config.yaml │ │ ├── cc_traveltime.adjoint.config.yaml │ │ └── multitaper.adjoint.config.yaml │ ├── quakeml │ │ ├── C201009031635A.inv │ │ └── C201009031635A.xml │ ├── window │ │ ├── windows.fake.json │ │ ├── 27_60.BHZ.config.yaml │ │ ├── measurements.fake.json │ │ └── IU.KBL..BHR.window.json │ └── stations │ │ └── stations.fake.json └── test_code_formatting.py ├── requirements.txt ├── README.md ├── .gitignore ├── .travis.yml ├── setup.py ├── INSTALL.md └── LICENSE /pytomo3d/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pytomo3d/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pytomo3d/window/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/window_merge_tool/example_run/merge_winfile.py: -------------------------------------------------------------------------------- 1 | ../merge_winfile.py -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | pytomo3d 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | pytomo3d 8 | -------------------------------------------------------------------------------- /tests/data/raw/BW.RJOB.obs.mseed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/computational-seismology/pytomo3d/HEAD/tests/data/raw/BW.RJOB.obs.mseed -------------------------------------------------------------------------------- /tests/data/raw/IU.KBL.obs.mseed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/computational-seismology/pytomo3d/HEAD/tests/data/raw/IU.KBL.obs.mseed -------------------------------------------------------------------------------- /tests/data/raw/IU.KBL.syn.mseed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/computational-seismology/pytomo3d/HEAD/tests/data/raw/IU.KBL.syn.mseed -------------------------------------------------------------------------------- /tests/data/proc/IU.KBL.obs.proc.mseed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/computational-seismology/pytomo3d/HEAD/tests/data/proc/IU.KBL.obs.proc.mseed -------------------------------------------------------------------------------- /tests/data/proc/IU.KBL.syn.proc.mseed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/computational-seismology/pytomo3d/HEAD/tests/data/proc/IU.KBL.syn.proc.mseed -------------------------------------------------------------------------------- /scripts/window_merge_tool/example_run/example_run.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python merge_winfile.py \ 4 | -f ./window_merge.dir.json \ 5 | -v 6 | -------------------------------------------------------------------------------- /pytomo3d/station/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | from .extract_staxml_info import extract_staxml_info # NOQA 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | git+git://github.com/wjlei1990/pyflex.git@devel 2 | git+git://github.com/wjlei1990/spaceweight.git@master 3 | git+git://github.com/chukren/pyadjoint.git@dev -------------------------------------------------------------------------------- /tests/data/adjoint/waveform.adjoint.config.yaml: -------------------------------------------------------------------------------- 1 | adj_src_type: "waveform_misfit" 2 | 3 | # min and max period(unit: second) 4 | min_period: 27.0 5 | max_period: 60.0 6 | 7 | # adjoint config parameter 8 | taper_type: 'hann' 9 | taper_percentage: 0.15 10 | -------------------------------------------------------------------------------- /tests/data/adjoint/cc_traveltime.adjoint.config.yaml: -------------------------------------------------------------------------------- 1 | adj_src_type: "cc_traveltime_misfit" 2 | 3 | # min and max period of seismograms 4 | min_period: 27.0 5 | max_period: 60.0 6 | 7 | # config parameters 8 | taper_type: 'hann' 9 | taper_percentage: 0.15 10 | 11 | measure_type: 'dt' 12 | dt_sigma_min: 1.0 13 | dlna_sigma_min: 0.5 14 | -------------------------------------------------------------------------------- /docs/source/pytomo3d.rst: -------------------------------------------------------------------------------- 1 | pytomo3d package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | pytomo3d.adjoint 10 | pytomo3d.signal 11 | pytomo3d.window 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: pytomo3d 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /pytomo3d/signal/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | :copyright: 5 | Wenjie Lei (lei@princeton.edu), 2016 6 | :license: 7 | GNU General Public License, Version 3 8 | (http://www.gnu.org/copyleft/gpl.html) 9 | """ 10 | 11 | from __future__ import (absolute_import, division, print_function) 12 | 13 | from .process import process_stream # NOQA 14 | -------------------------------------------------------------------------------- /pytomo3d/source/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | :copyright: 5 | Wenjie Lei (lei@princeton.edu), 2016 6 | :license: 7 | GNU General Public License, Version 3 8 | (http://www.gnu.org/copyleft/gpl.html) 9 | """ 10 | 11 | from __future__ import (absolute_import, division, print_function) 12 | 13 | from .append_cmtsolution import append_cmt_to_catalog # NOQA 14 | -------------------------------------------------------------------------------- /pytomo3d/window/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | :copyright: 5 | Wenjie Lei (lei@princeton.edu), 2016 6 | :license: 7 | GNU General Public License, Version 3 8 | (http://www.gnu.org/copyleft/gpl.html) 9 | """ 10 | 11 | from __future__ import (absolute_import, division, print_function) 12 | 13 | from .window import window_on_stream, window_on_trace # NOQA 14 | -------------------------------------------------------------------------------- /tests/data/quakeml/C201009031635A.inv: -------------------------------------------------------------------------------- 1 | PDE 2010 9 3 16 35 47.80 -43.5300 171.8100 12.0 6.4 7.3 SOUTH ISLAND, NEW ZEALAND 2 | event name: C201009031635A 3 | time shift: 10.1000 4 | half duration: 7.5000 5 | latitude: -43.5600 6 | longitude: 172.1200 7 | depth: 12.0000 8 | Mrr: 3.850000e+25 9 | Mtt: 8.000000e+23 10 | Mpp: -3.920000e+25 11 | Mrt: 4.910000e+25 12 | Mrp: 1.000000e+24 13 | Mtp: -3.600000e+26 14 | -------------------------------------------------------------------------------- /docs/source/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Pytomo3d has dependency on ``obspy`` (version 0.10.2), ``pyflex`` and ``pyadjoint``. Please 5 | install those packages first. To detailed instruction, check `INSTALL.md`_. 6 | 7 | .. _INSTALL.md: https://github.com/wjlei1990/pytomo3d/blob/master/INSTALL.md 8 | 9 | 1. Install from source(recommended):: 10 | 11 | git clone https://github.com/wjlei1990/pytomo3d 12 | cd pytomo3d 13 | pip install -v -e . 14 | 15 | 2. Install from pip:: 16 | 17 | pip install pytomo3d 18 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pytomo3d documentation master file, created by 2 | sphinx-quickstart on Mon Feb 22 13:52:20 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to pytomo3d's documentation! 7 | ==================================== 8 | 9 | **Contents:** 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | intro 15 | install 16 | tutorial 17 | 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | 26 | -------------------------------------------------------------------------------- /tests/data/adjoint/multitaper.adjoint.config.yaml: -------------------------------------------------------------------------------- 1 | adj_src_type: "multitaper_misfit" 2 | 3 | # min and max period(unit: second) 4 | min_period: 27.0 5 | max_period: 60.0 6 | 7 | # adjoint config parameter 8 | lnpt: 15 9 | transfunc_waterlevel: 1.0E-10 10 | water_threshold: 0.02 11 | ipower_costaper: 10 12 | min_cycle_in_window: 3 13 | taper_percentage: 0.3 14 | mt_nw: 4.0 15 | num_taper: 5 16 | phase_step: 1.5 17 | dt_fac: 2.0 18 | err_fac: 2.5 19 | dt_max_scale: 3.5 20 | measure_type: 'dt' 21 | dt_sigma_min: 1.0 22 | dlna_sigma_min: 0.5 23 | taper_type: 'hann' 24 | use_cc_error: True 25 | use_mt_error: False 26 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | :copyright: 5 | Wenjie Lei (lei@princeton.edu), 2016 6 | :license: 7 | GNU General Public License, Version 3 8 | (http://www.gnu.org/copyleft/gpl.html) 9 | """ 10 | 11 | from __future__ import (absolute_import, division, print_function) # NOQA 12 | 13 | from .adjoint_source import calculate_adjsrc_on_stream # NOQA 14 | from .adjoint_source import calculate_and_process_adjsrc_on_stream # NOQA 15 | from .adjoint_source import calculate_adjsrc_on_trace # NOQA 16 | from .adjoint_source import measure_adjoint_on_stream # NOQA 17 | -------------------------------------------------------------------------------- /docs/source/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ================================ 3 | 4 | This is python version of seismic data processing tools. It contains modules for signal processing, window selection and adjoint source constructor. 5 | 6 | 1. *Singal Processing* 7 | It is signal processing workflow defined for adjoint tomography, but should work for most seismology cases. It uses obspy functions. 8 | 9 | 2. *Window selection* 10 | Select windows on a observed and synthetic seismograms. This modules uses pyflex. 11 | 12 | 3. *Adjoint source constructor* 13 | Construct windows based on observed data, synthetic data and windows selected. It uses pyadjoint'. 14 | -------------------------------------------------------------------------------- /docs/source/pytomo3d.signal.rst: -------------------------------------------------------------------------------- 1 | pytomo3d.signal package 2 | ======================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | pytomo3d.signal.process module 8 | ------------------------------ 9 | 10 | .. automodule:: pytomo3d.signal.process 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | pytomo3d.signal.rotate module 16 | ----------------------------- 17 | 18 | .. automodule:: pytomo3d.signal.rotate 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: pytomo3d.signal 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/source/pytomo3d.adjoint.rst: -------------------------------------------------------------------------------- 1 | pytomo3d.adjoint package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | pytomo3d.adjoint.adjsrc module 8 | ------------------------------ 9 | 10 | .. automodule:: pytomo3d.adjoint.adjsrc 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | pytomo3d.adjoint.plot_util module 16 | --------------------------------- 17 | 18 | .. automodule:: pytomo3d.adjoint.plot_util 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: pytomo3d.adjoint 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/source/pytomo3d.window.rst: -------------------------------------------------------------------------------- 1 | pytomo3d.window package 2 | ======================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | pytomo3d.window.window module 8 | ----------------------------- 9 | 10 | .. automodule:: pytomo3d.window.window 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | pytomo3d.window.write_window module 16 | ----------------------------------- 17 | 18 | .. automodule:: pytomo3d.window.write_window 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: pytomo3d.window 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pytomo3d 2 | [![Build Status](https://travis-ci.org/wjlei1990/pytomo3d.svg?branch=master)](https://travis-ci.org/wjlei1990/pytomo3d/branches) 3 | [![DOI](https://zenodo.org/badge/22621/wjlei1990/pypaw.svg)](https://zenodo.org/badge/latestdoi/22621/wjlei1990/pypaw) 4 | 5 | Seismic tomography toolkits, includes signal processing, window selection and adjoint sources constructor. The documentation could be found [here](http://wjlei1990.github.io/pytomo3d/). 6 | 7 | For installation, please refer to [**INSTALL.md**](https://github.com/wjlei1990/pytomo3d/blob/master/INSTALL.md) 8 | 9 | **Note** 10 | 1. Current version of pytomo3d now fully transfer to **`obspy1.*.*`**. 11 | 12 | Please make sure you update your obspy to at least 1.0.0 before running the test. 13 | -------------------------------------------------------------------------------- /pytomo3d/utils/io.py: -------------------------------------------------------------------------------- 1 | from __future__ import (absolute_import, division, print_function) 2 | import json 3 | 4 | 5 | def load_json(filename): 6 | with open(filename) as fh: 7 | return json.load(fh) 8 | 9 | 10 | def dump_json(content, filename): 11 | with open(filename, 'w') as fh: 12 | json.dump(content, fh, indent=2, sort_keys=True) 13 | 14 | 15 | def check_dict_keys(dict_to_check, keys): 16 | if not isinstance(dict_to_check, dict): 17 | raise TypeError("input dict_to_check should be type of dict: %s" 18 | % (type(dict_to_check))) 19 | 20 | set_input = set(dict_to_check.keys()) 21 | set_stand = set(keys) 22 | 23 | if set_input != set_stand: 24 | print("More: %s" % (set_input - set_stand)) 25 | print("Missing: %s" % (set_stand - set_input)) 26 | raise ValueError("Keys is not consistent: %s --- %s" 27 | % (set_input, set_stand)) 28 | -------------------------------------------------------------------------------- /pytomo3d/source/tests/test_append_cmtsolution.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import pytomo3d.source.append_cmtsolution as app_cmt 4 | 5 | 6 | def _upper_level(path, nlevel=4): 7 | """ 8 | Go the nlevel dir up 9 | """ 10 | for i in range(nlevel): 11 | path = os.path.dirname(path) 12 | return path 13 | 14 | 15 | # Most generic way to get the data folder path. 16 | TESTBASE_DIR = _upper_level(os.path.abspath( 17 | inspect.getfile(inspect.currentframe())), 4) 18 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 19 | 20 | testquakeml = os.path.join(DATA_DIR, "quakeml", "C201009031635A.xml") 21 | testcmt = os.path.join(DATA_DIR, "quakeml", "C201009031635A.inv") 22 | 23 | 24 | def test_append_cmt_to_catalog(): 25 | tag = "GATG_M15" 26 | new_cat = app_cmt.append_cmt_to_catalog( 27 | testquakeml, testcmt, tag, change_preferred_id=True) 28 | 29 | assert new_cat[0].preferred_origin() 30 | assert new_cat[0].preferred_magnitude() 31 | assert new_cat[0].preferred_focal_mechanism() 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | #Ipython Notebook 62 | .ipynb_checkpoints 63 | 64 | # Pycharm file 65 | .idea 66 | 67 | # temp files 68 | *.sw* 69 | -------------------------------------------------------------------------------- /tests/data/window/windows.fake.json: -------------------------------------------------------------------------------- 1 | { 2 | "II.AAK": { 3 | "II.AAK..BHR":[ 4 | {"left_index": 1, "right_index": 2}, 5 | {"left_index": 2, "right_index": 3} 6 | ], 7 | "II.AAK..BHT":[ 8 | {"left_index": 1, "right_index": 2} 9 | ], 10 | "II.AAK..BHZ":[ 11 | {"left_index": 1, "right_index": 2}, 12 | {"left_index": 2, "right_index": 3}, 13 | {"left_index": 3, "right_index": 4} 14 | ] 15 | }, 16 | "II.ABKT": { 17 | "II.ABKT..BHR":[ 18 | {"left_index": 1, "right_index": 2} 19 | ], 20 | "II.ABKT..BHT":[], 21 | "II.ABKT..BHZ":[ 22 | {"left_index": 1, "right_index": 2}, 23 | {"left_index": 2, "right_index": 3} 24 | ] 25 | }, 26 | "IU.BCD": { 27 | "IU.BCD..BHR":[ 28 | {"left_index": 1, "right_index": 2}, 29 | {"left_index": 2, "right_index": 3} 30 | ], 31 | "IU.BCD..BHT":[ 32 | {"left_index": 1, "right_index": 2}, 33 | {"left_index": 2, "right_index": 3} 34 | ], 35 | "IU.BCD..BHZ":[ 36 | {"left_index": 1, "right_index": 2}, 37 | {"left_index": 2, "right_index": 3}, 38 | {"left_index": 3, "right_index": 4}, 39 | {"left_index": 4, "right_index": 5}, 40 | {"left_index": 5, "right_index": 6} 41 | ] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "2.7" 5 | 6 | before_install: 7 | - sudo apt-get update 8 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 9 | wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; 10 | else 11 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 12 | fi 13 | - bash miniconda.sh -b -p $HOME/miniconda 14 | - export PATH="$HOME/miniconda/bin:$PATH" 15 | - hash -r 16 | - conda config --set always_yes yes --set changeps1 no 17 | - conda update -q conda 18 | - conda info -a 19 | - conda create -n condaenv python=$TRAVIS_PYTHON_VERSION 20 | - conda install -n condaenv pip 21 | - source activate condaenv 22 | - conda install -c obspy obspy 23 | - conda install python=$TRAVIS_PYTHON_VERSION pyyaml 24 | - conda install python=$TRAVIS_PYTHON_VERSION atlas numpy scipy matplotlib nose pytest flake8 sphinx lxml sqlalchemy mock future yaml 25 | - pip install --user geographiclib 26 | - pip install --user -r requirements.txt 27 | - pip install coverage 28 | - pip install coveralls 29 | 30 | install: 31 | - pip install --no-deps -v -e . 32 | 33 | script: 34 | - coverage run --source=pytomo3d setup.py test 35 | 36 | after_success: 37 | - cd $TRAVIS_BUILD_DIR; coveralls 38 | -------------------------------------------------------------------------------- /tests/data/stations/stations.fake.json: -------------------------------------------------------------------------------- 1 | { 2 | "II.AAK..BHE": { 3 | "depth": 0.0, 4 | "elevation": 2437.8, 5 | "latitude": 0.0, 6 | "longitude": 0.0, 7 | "sensor": "Streckeisen STS-1H/VBB Seismometer" 8 | }, 9 | "II.AAK..BHN": { 10 | "depth": 0.0, 11 | "elevation": 2437.8, 12 | "latitude": 0.0, 13 | "longitude": 0.0, 14 | "sensor": "Streckeisen STS-1H/VBB Seismometer" 15 | }, 16 | "II.AAK..BHZ": { 17 | "depth": 0.0, 18 | "elevation": 2437.8, 19 | "latitude": 0.0, 20 | "longitude": 0.0, 21 | "sensor": "Streckeisen STS-1H/VBB Seismometer" 22 | }, 23 | "II.ABKT..BH1": { 24 | "depth": 0.0, 25 | "elevation": 2437.8, 26 | "latitude": 0.0, 27 | "longitude": 120.0, 28 | "sensor": "Streckeisen STS1H/VBB Seismometer" 29 | }, 30 | "II.ABKT..BH2": { 31 | "depth": 0.0, 32 | "elevation": 2437.8, 33 | "latitude": 0.0, 34 | "longitude": 120.0, 35 | "sensor": "Streckeisen STS1H/VBB Seismometer" 36 | }, 37 | "II.ABKT..BHZ": { 38 | "depth": 0.0, 39 | "elevation": 2437.8, 40 | "latitude": 0.0, 41 | "longitude": 120.0, 42 | "sensor": "Streckeisen STS1H/VBB Seismometer" 43 | }, 44 | "IU.BCD..BHZ": { 45 | "depth": 0.0, 46 | "elevation": 2437.8, 47 | "latitude": 0.0, 48 | "longitude": -120.0, 49 | "sensor": "Guralp CMG3ESP/Reftek 72A-08 Datalogger w/LOG chan" 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /tests/data/window/27_60.BHZ.config.yaml: -------------------------------------------------------------------------------- 1 | # Example file for window config 2 | # The basic structure follows the original version of 3 | # FLEXWIN(and all the parameters). If you want furture 4 | # and detailed documentions, please refer to the manual 5 | # of FLEXWIN 6 | 7 | # min and max period of seismograms 8 | "min_period": 27.0 9 | "max_period": 60.0 10 | 11 | # STA/LAT water level 12 | "stalta_waterlevel": 0.10 13 | 14 | # max tsfhit 15 | "tshift_acceptance_level": 8.0 16 | "tshift_reference": 0.0 17 | 18 | # max amplitude difference 19 | "dlna_acceptance_level": 0.50 20 | "dlna_reference": 0.0 21 | 22 | # min cc coef 23 | "cc_acceptance_level": 0.85 24 | 25 | # window signal-to-noise ratio 26 | "s2n_limit": 3.0 27 | "s2n_limit_energy": 1.5 28 | "window_signal_to_noise_type": "amplitude" 29 | 30 | # min/max surface wave velocity, to calculate slowest/fast 31 | # surface wave arrival to define the boundaries of 32 | # surface wave region 33 | "selection_mode": "body_and_surface_waves" 34 | "min_surface_wave_velocity": 3.20 35 | "max_surface_wave_velocity": 4.10 36 | "earth_model": "ak135" 37 | "max_time_before_first_arrival": 50.0 38 | 39 | # check global data quality 40 | "check_global_data_quality": True 41 | "snr_integrate_base": 3.5 42 | "snr_max_base": 3.0 43 | 44 | # see reference in FLEXWIN manual 45 | "c_0": 0.7 46 | "c_1": 2.0 47 | "c_2": 0.0 48 | "c_3a": 1.0 49 | "c_3b": 2.0 50 | "c_4a": 3.0 51 | "c_4b": 10.0 52 | 53 | # window merge strategy 54 | "resolution_strategy": "interval_scheduling" 55 | -------------------------------------------------------------------------------- /pytomo3d/station/tests/test_station.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | from copy import deepcopy 4 | from pytomo3d.station import extract_staxml_info 5 | import obspy 6 | 7 | 8 | def _upper_level(path, nlevel=4): 9 | """ 10 | Go the nlevel dir up 11 | """ 12 | for i in range(nlevel): 13 | path = os.path.dirname(path) 14 | return path 15 | 16 | 17 | # Most generic way to get the data folder path. 18 | TESTBASE_DIR = _upper_level(os.path.abspath( 19 | inspect.getfile(inspect.currentframe())), 4) 20 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 21 | 22 | staxmlfile = os.path.join(DATA_DIR, "stationxml", "IU.KBL.xml") 23 | teststaxml = obspy.read_inventory(staxmlfile) 24 | 25 | 26 | def test_process_obsd(): 27 | 28 | true_type = { 29 | u'IU.KBL..BHZ': { 30 | 'latitude': 34.5408, 'depth': 7.0, 'elevation': 1913.0, 31 | 'longitude': 69.0432, 32 | 'sensor': 'Streckeisen STS-2/VBB Seismometer'}, 33 | u'IU.KBL..BHN': { 34 | 'latitude': 34.5408, 'depth': 7.0, 'elevation': 1913.0, 35 | 'longitude': 69.0432, 36 | 'sensor': 'Streckeisen STS-2/VBB Seismometer'}, 37 | u'IU.KBL..BHE': { 38 | 'latitude': 34.5408, 'depth': 7.0, 'elevation': 1913.0, 39 | 'longitude': 69.0432, 40 | 'sensor': 'Streckeisen STS-2/VBB Seismometer'} 41 | } 42 | 43 | inv = deepcopy(teststaxml) 44 | sensor_type = extract_staxml_info(inv) 45 | assert sensor_type == true_type 46 | 47 | inv = deepcopy(staxmlfile) 48 | sensor_type = extract_staxml_info(inv) 49 | assert sensor_type == true_type 50 | -------------------------------------------------------------------------------- /scripts/window_merge_tool/generate_json_files/generate_window_merge_parfile.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import glob 4 | import argparse 5 | 6 | superbase = "/lustre/atlas/proj-shared/geo111/Wenjie/DATA_SI/ASDF" 7 | 8 | windowbase = os.path.join(superbase, "window") 9 | outputbase = os.path.join(windowbase, "mt_input") 10 | period_list = ["50_100", "60_100"] 11 | #period_list = ["50_100", ] 12 | 13 | if not os.path.exists(parfile_dir): 14 | os.makedirs(parfile_dir) 15 | 16 | 17 | def read_txt_into_list(txtfile): 18 | with open(txtfile, 'r') as f: 19 | content = f.readlines() 20 | eventlist = [ line.rstrip() for line in content] 21 | return eventlist 22 | 23 | 24 | def generate_json_dirfiles(eventlist): 25 | content = [] 26 | 27 | for event in eventlist: 28 | print "="*20 29 | print "Event:", event 30 | for period in period_list: 31 | parlist = {} 32 | parlist['input_file'] = \ 33 | os.path.join(windowbase, "%s.%s" % (event, period), 34 | "windows.json") 35 | parlist['output_file'] = \ 36 | os.path.join(outputbase, "%s.%s.json" % (event, period)) 37 | content.append(parlist) 38 | 39 | par_jsonfile = "window_merge.dir.json" 40 | print("outputfile: %s" % par_jsonfile) 41 | with open(par_jsonfile, 'w') as f: 42 | json.dump(content, f, indent=2, sort_keys=True) 43 | 44 | 45 | if __name__ == "__main__": 46 | parser = argparse.ArgumentParser() 47 | parser.add_argument('-f', action='store', dest='eventlist_file', required=True) 48 | args = parser.parse_args() 49 | 50 | eventlist = read_txt_into_list(args.eventlist_file) 51 | generate_json_dirfiles(eventlist) 52 | -------------------------------------------------------------------------------- /tests/data/window/measurements.fake.json: -------------------------------------------------------------------------------- 1 | { 2 | "II.AAK": { 3 | "II.AAK..BHR":[ 4 | {"dt": 1.0, "misfit_dt": 1.0, "dlna": 0.7, "misfit_dlna": 1.0}, 5 | {"dt": -1.0, "misfit_dt": 1.0, "dlna": -0.7, "misfit_dlna": 1.0} 6 | ], 7 | "II.AAK..BHT":[ 8 | {"dt": 1.5, "misfit_dt": 2.5, "dlna": 0.9, "misfit_dlna": 0.5} 9 | ], 10 | "II.AAK..BHZ":[ 11 | {"dt": 1.0, "misfit_dt": 1.0, "dlna": 0.6, "misfit_dlna": 1.0}, 12 | {"dt": 2.0, "misfit_dt": 4.0, "dlna": 0.4, "misfit_dlna": 0.6}, 13 | {"dt": -1.5, "misfit_dt": 3.00, "dlna": -0.5, "misfit_dlna": 1.0} 14 | ] 15 | }, 16 | "II.ABKT": { 17 | "II.ABKT..BHR":[ 18 | {"dt": 1.0, "misfit_dt": 1.5, "dlna": 0.6, "misfit_dlna": 0.5} 19 | ], 20 | "II.ABKT..BHT":[], 21 | "II.ABKT..BHZ":[ 22 | {"dt": 2.0, "misfit_dt": 4.0, "dlna": 1.2, "misfit_dlna": 1.0}, 23 | {"dt": -5.0, "misfit_dt": 16.0, "dlna": -1.5, "misfit_dlna": 1.0} 24 | ] 25 | }, 26 | "IU.BCD": { 27 | "IU.BCD..BHR":[ 28 | {"dt": 1.0, "misfit_dt": 2.0, "dlna": 1.0, "misfit_dlna": 1.5}, 29 | {"dt": -2.0, "misfit_dt": 3.0, "dlna": -0.8, "misfit_dlna": 1.2} 30 | ], 31 | "IU.BCD..BHT":[ 32 | {"dt": 1.0, "misfit_dt": 2.0, "dlna": 0.3, "misfit_dlna": 0.2}, 33 | {"dt": -2.5, "misfit_dt": 3.0, "dlna": -0.7, "misfit_dlna": 1.1} 34 | ], 35 | "IU.BCD..BHZ":[ 36 | {"dt": -0.2, "misfit_dt": 2.0, "dlna": -0.2, "misfit_dlna": 2.0}, 37 | {"dt": 0.8, "misfit_dt": 3.0, "dlna": 0.8, "misfit_dlna": 0.6}, 38 | {"dt": -1.6, "misfit_dt": 4.0, "dlna": -0.6, "misfit_dlna": 0.9}, 39 | {"dt": 1.6, "misfit_dt": 3.0, "dlna": 1.1, "misfit_dlna": 1.2}, 40 | {"dt": 0.9, "misfit_dt": 0.4, "dlna": 0.9, "misfit_dlna": 0.4} 41 | ] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /tests/test_code_formatting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Tests all Python files of the project with flake8. This ensure PEP8 conformance 5 | and some other sanity checks as well. 6 | 7 | :copyright: 8 | Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013-2014 9 | :license: 10 | GNU General Public License, Version 3 11 | (http://www.gnu.org/copyleft/gpl.html) 12 | """ 13 | from flake8.api import legacy as flake8 14 | import inspect 15 | import os 16 | 17 | 18 | def test_flake8(): 19 | test_dir = os.path.dirname(os.path.abspath(inspect.getfile( 20 | inspect.currentframe()))) 21 | 22 | basedir = os.path.dirname(test_dir) 23 | 24 | # Possibility to ignore some files and paths. 25 | ignore_paths = [ 26 | os.path.join(basedir, "doc"), 27 | os.path.join(basedir, ".git"), 28 | os.path.join(basedir, "scripts")] 29 | files = [] 30 | 31 | for dirpath, _, filenames in os.walk(basedir): 32 | ignore = False 33 | for path in ignore_paths: 34 | if dirpath.startswith(path): 35 | ignore = True 36 | break 37 | if ignore: 38 | continue 39 | filenames = [_i for _i in filenames if 40 | os.path.splitext(_i)[-1] == os.path.extsep + "py"] 41 | if not filenames: 42 | continue 43 | for py_file in filenames: 44 | full_path = os.path.join(dirpath, py_file) 45 | files.append(full_path) 46 | 47 | style_guide = flake8.get_style_guide(ignore=['E24', 'W503', 'E226']) 48 | report = style_guide.check_files(files) 49 | assert report.get_statistics('E') == [], 'Flake8 found violations' 50 | assert report.total_errors == 0 51 | 52 | 53 | if __name__ == "__main__": 54 | test_flake8() 55 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | from obspy import read 4 | import pytomo3d.adjoint.utils as adj_utils 5 | # import pyadjoint.adjoint_source 6 | 7 | 8 | def _upper_level(path, nlevel=4): 9 | """ 10 | Go the nlevel dir up 11 | """ 12 | for i in range(nlevel): 13 | path = os.path.dirname(path) 14 | return path 15 | 16 | 17 | # Most generic way to get the data folder path. 18 | TESTBASE_DIR = _upper_level(os.path.abspath( 19 | inspect.getfile(inspect.currentframe())), 4) 20 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 21 | 22 | obsfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 23 | synfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 24 | winfile = os.path.join(DATA_DIR, "window", "IU.KBL..BHR.window.json") 25 | 26 | 27 | class TestUtilOne: 28 | 29 | @staticmethod 30 | def get_fake_adjsrcs(): 31 | obs = read(obsfile) 32 | adjsrcs = adj_utils.ensemble_fake_adj(obs) 33 | return adjsrcs 34 | 35 | def test_ensemble_fake_adj(self): 36 | adjsrcs = self.get_fake_adjsrcs() 37 | 38 | assert len(adjsrcs) == 3 39 | for adj in adjsrcs: 40 | assert adj.adj_src_type == "waveform_misfit" 41 | assert adj.misfit == 0.0 42 | assert adj.dt == 0.5 43 | assert adj.min_period == 50.0 44 | assert adj.max_period == 100.0 45 | assert adj.network == "IU" 46 | assert adj.station == "KBL" 47 | assert adj.component in ["BHR", "BHT", "BHZ"] 48 | assert adj.location == "" 49 | assert adj.measurement is None 50 | 51 | def test_change_adjsrc_channel_name(self): 52 | adjsrcs = self.get_fake_adjsrcs() 53 | adj_utils.change_adjsrc_channel_name(adjsrcs, "MX") 54 | for adj in adjsrcs: 55 | assert adj.component[:2] == "MX" 56 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sys 3 | from setuptools import setup, find_packages 4 | from setuptools.command.test import test as test_command 5 | 6 | 7 | class PyTest(test_command): 8 | user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] 9 | 10 | def initialize_options(self): 11 | test_command.initialize_options(self) 12 | self.pytest_args = [] 13 | 14 | def run_tests(self): 15 | import pytest 16 | errno = pytest.main(self.pytest_args) 17 | sys.exit(errno) 18 | 19 | 20 | setup( 21 | name="pytomo3d", 22 | version="0.2.0", 23 | license='GNU Lesser General Public License, version 3 (LGPLv3)', 24 | description="Python toolkits for seismic tomograpy", 25 | author="Wenjie Lei", 26 | author_email="lei@princeton.edu", 27 | url="https://github.com/wjlei1990/pytomo3d", 28 | packages=find_packages(), 29 | tests_require=['pytest'], 30 | cmdclass={'test': PyTest}, 31 | zip_safe=False, 32 | classifiers=[ 33 | # complete classifier list: 34 | # http://pypi.python.org/pypi?%3Aaction=list_classifiers 35 | "Development Status :: 4 - Beta", 36 | "Intended Audience :: Developers", 37 | "Intended Audience :: Science/Research", 38 | "Operating System :: Unix", 39 | "Operating System :: POSIX", 40 | "Operating System :: Microsoft :: Windows", 41 | "Programming Language :: Python", 42 | "Programming Language :: Python :: 2.7", 43 | "Programming Language :: Python :: Implementation :: CPython", 44 | "Topic :: Scientific/Engineering", 45 | "Topic :: Scientific/Engineering :: Physics", 46 | ], 47 | keywords=[ 48 | "seismology", "tomography", "adjoint", "signal", "inversion", "window" 49 | ], 50 | install_requires=[ 51 | "numpy", "obspy==1.0.3", "flake8>=3.0", "pytest", "nose", 52 | "future>=0.14.1", "pyflex", "pyadjoint", "geographiclib" 53 | ], 54 | extras_require={ 55 | "docs": ["sphinx", "ipython", "runipy"] 56 | } 57 | ) 58 | -------------------------------------------------------------------------------- /pytomo3d/station/extract_staxml_info.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This script extract channel instrument type from obspy.inventory 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | 13 | from __future__ import print_function, division, absolute_import 14 | import os 15 | from collections import defaultdict 16 | from obspy import Inventory 17 | from obspy import read_inventory 18 | 19 | 20 | def safe_load_staxml(staxmlfile): 21 | try: 22 | inv = read_inventory(staxmlfile) 23 | except Exception as exp: 24 | raise("Failed to parse staxml file(%s) due to: %s" 25 | % (staxmlfile, exp)) 26 | return inv 27 | 28 | 29 | def extract_staxml_info(staxml): 30 | """ extract information from staionxml file or obspy.Inventory """ 31 | instruments = defaultdict(dict) 32 | 33 | if isinstance(staxml, Inventory): 34 | inv = staxml 35 | else: 36 | if os.path.isfile(staxml): 37 | inv = safe_load_staxml(staxml) 38 | else: 39 | raise ValueError("Input staxml is neither obspy.Inventory or " 40 | "staxml file") 41 | for nw in inv: 42 | nw_code = nw.code 43 | for sta in nw: 44 | sta_code = sta.code 45 | for chan in sta: 46 | chan_code = chan.code 47 | loc_code = chan.location_code 48 | key = "%s.%s.%s.%s" % (nw_code, sta_code, loc_code, chan_code) 49 | instruments[key]["latitude"] = chan.latitude 50 | instruments[key]["longitude"] = chan.longitude 51 | instruments[key]["elevation"] = chan.elevation 52 | instruments[key]["depth"] = chan.depth 53 | if chan.sensor.description is not None: 54 | sensor_type = chan.sensor.description 55 | elif chan.sensor.type is not None: 56 | sensor_type = chan.sensor.type 57 | else: 58 | sensor_type = "None" 59 | instruments[key]["sensor"] = sensor_type 60 | 61 | return instruments 62 | -------------------------------------------------------------------------------- /pytomo3d/window/tests/user_module_example.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | 5 | from obspy.geodetics import calcVincentyInverse 6 | 7 | 8 | def get_dist_in_km(station, event, obsd): 9 | """ 10 | Returns distance in km 11 | """ 12 | stats = obsd.stats 13 | station_coor = station.get_coordinates(".".join([stats.network, 14 | stats.station, 15 | stats.location, 16 | stats.channel[:-1]+"Z"])) 17 | 18 | evlat = event.events[0].origins[0].latitude 19 | evlon = event.events[0].origins[0].longitude 20 | 21 | dist = calcVincentyInverse(station_coor["latitude"], 22 | station_coor["longitude"], 23 | evlat, evlon)[0] / 1000 24 | 25 | return dist 26 | 27 | 28 | def get_time_array(obsd, event): 29 | stats = obsd.stats 30 | dt = stats.delta 31 | npts = stats.npts 32 | start = stats.starttime - event.events[0].origins[0].time 33 | return np.arange(start, start+npts*dt, dt) 34 | 35 | 36 | # raise levels after rayleigh 37 | def generate_user_levels(config, station, event, obsd, synt): 38 | """Returns a list of acceptance levels 39 | """ 40 | stats = obsd.stats 41 | npts = stats.npts 42 | 43 | base_water_level = config.stalta_waterlevel 44 | base_cc = config.cc_acceptance_level 45 | base_tshift = config.tshift_acceptance_level 46 | base_dlna = config.dlna_acceptance_level 47 | base_s2n = config.s2n_limit 48 | 49 | stalta_waterlevel = np.ones(npts)*base_water_level 50 | cc = np.ones(npts)*base_cc 51 | tshift = np.ones(npts)*base_tshift 52 | dlna = np.ones(npts)*base_dlna 53 | s2n = np.ones(npts)*base_s2n 54 | 55 | dist = get_dist_in_km(station, event, obsd) 56 | 57 | # Rayleigh 58 | r_vel = config.min_surface_wave_velocity 59 | r_time = dist/r_vel 60 | 61 | times = get_time_array(obsd, event) 62 | 63 | for i, time in enumerate(times): 64 | if time > r_time: 65 | stalta_waterlevel[i] = base_water_level*2.0 66 | tshift[i] = base_tshift/3.0 67 | cc[i] = 0.95 68 | dlna[i] = base_dlna/3.0 69 | s2n[i] = 10*base_s2n 70 | 71 | return stalta_waterlevel, tshift, dlna, cc, s2n 72 | -------------------------------------------------------------------------------- /pytomo3d/window/tests/test_window_io.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | from obspy import read, read_inventory, readEvents 4 | import pyflex 5 | import pytomo3d.window.window as win 6 | import pytomo3d.window.io as wio 7 | 8 | 9 | def _upper_level(path, nlevel=4): 10 | """ 11 | Go the nlevel dir up 12 | """ 13 | for i in range(nlevel): 14 | path = os.path.dirname(path) 15 | return path 16 | 17 | 18 | # Most generic way to get the data folder path. 19 | TESTBASE_DIR = _upper_level( 20 | os.path.abspath(inspect.getfile(inspect.currentframe())), 4) 21 | print TESTBASE_DIR 22 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 23 | 24 | obsfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 25 | synfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 26 | staxml = os.path.join(DATA_DIR, "stationxml", "IU.KBL.xml") 27 | quakeml = os.path.join(DATA_DIR, "quakeml", "C201009031635A.xml") 28 | 29 | 30 | def test_load_window_config_yaml(): 31 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 32 | config = wio.load_window_config_yaml(config_file) 33 | assert isinstance(config, pyflex.Config) 34 | assert config.max_period == 60.0 35 | assert config.min_period == 27.0 36 | assert config.stalta_waterlevel == 0.10 37 | 38 | 39 | class TestWrite: 40 | 41 | @staticmethod 42 | def get_windows(): 43 | obs_tr = read(obsfile).select(channel="*R")[0] 44 | syn_tr = read(synfile).select(channel="*R")[0] 45 | 46 | config_file = os.path.join( 47 | DATA_DIR, "window", "27_60.BHZ.config.yaml") 48 | config = wio.load_window_config_yaml(config_file) 49 | 50 | cat = readEvents(quakeml) 51 | 52 | inv = read_inventory(staxml) 53 | windows = win.window_on_trace(obs_tr, syn_tr, config, station=inv, 54 | event=cat, _verbose=False, 55 | figure_mode=False) 56 | return windows 57 | 58 | def test_write_txtfile(self, tmpdir): 59 | windows = self.get_windows() 60 | filename = os.path.join(str(tmpdir), "window.txt") 61 | wio.write_txtfile(windows, filename) 62 | 63 | def test_write_jsonfile(self, tmpdir): 64 | windows = self.get_windows() 65 | filename = os.path.join(str(tmpdir), "window.json") 66 | wio.write_jsonfile(windows, filename) 67 | -------------------------------------------------------------------------------- /docs/source/tutorial.rst: -------------------------------------------------------------------------------- 1 | Tutorial 2 | ======== 3 | 4 | The data used here should be read in by `obspy`_ as a ``Stream`` or ``Trace``. 5 | 6 | .. _obspy: https://github.com/obspy/obspy/wiki 7 | 8 | 1. Signal processing 9 | -------------------- 10 | 11 | Give observed seismograms, you want to apply signal processing operations to remove the instrument response(stationxml file is required), filter to a certain band, re-sampling and rotate from ``NE`` to ``RT``. You can write your script this way:: 12 | 13 | from pytomo3d.signal.process import process 14 | from obspy import read, read_inventory 15 | 16 | # read in waveform data 17 | obs = read("II.AAK.obs.mseed") 18 | # read in stationxml 19 | inv = read_inventory("II.AAK.xml") 20 | # set up your filter frequency band 21 | pre_filt = [1/150., 1/100., 1/50., 1/40.] 22 | # setup cutting starttime and endtime 23 | starttime = stream[0].stats.starttime + 10 # second 24 | endtime = stream[0].stats.starttime + 3610 # second 25 | new_obs = process(obs, remove_response_flag=True, inventory=inv, 26 | filter_flag=True, pre_filt=pre_filt, 27 | starttime=starttime, endtime=endtime, 28 | resample_flag=True, sampling_rate=1.0, 29 | rotate_flag=True, event_latitude=12.2, 30 | event_longitude=-95.6) 31 | # write out processed stream 32 | new_obs.write("II.AAK.obs.proc.mseed", format="MSEED") 33 | 34 | Given an synthetic stream, you want to filter, re-sample and rotate:: 35 | 36 | new_syn = process(syn, remove_response_flag=False, inventory=inv, 37 | filter_flag=True, pre_filt=pre_filt, 38 | starttime=starttime, endtime=endtime, 39 | resample_flag=True, sampling_rate=1.0, 40 | rotate_flag=True, event_latitude=12.2, 41 | event_longitude=-95.6) 42 | # write out processed stream 43 | new_obs.write("II.AAK.syn.proc.mseed", format="MSEED") 44 | 45 | 2. Window Selection 46 | ------------------- 47 | To make window selections, you need first prepare window selection config dictionary in python:: 48 | 49 | { 50 | "Z": pyflex.Config, 51 | "R": pyflex.Config, 52 | "T": pyflex.Config 53 | } 54 | 55 | And the selection script:: 56 | 57 | window = window_on_stream( 58 | obsd, synt, config_dict, station=inv, event=event, figure_mode=True, 59 | figure_dir="./figure/", verbose=False) 60 | 61 | 3. Adjoint Sources 62 | ------------------ 63 | For adjoint source calculate, the script:: 64 | 65 | adjsrcs = calcualte_adjsrc_on_stream( 66 | obsd, synt, windows, config, 'multitaper_misfit', 67 | figure_mode=True, figure_dir="./figure") 68 | -------------------------------------------------------------------------------- /scripts/window_merge_tool/util.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | import getopt 4 | import os 5 | from pyasdf import ASDFDataSet 6 | 7 | 8 | class JSONObject(): 9 | def __init__(self, d): 10 | self.__dict__ = d 11 | 12 | 13 | def read_json_file(parfile): 14 | with open(parfile, 'r') as f: 15 | data = json.load(f, object_hook=JSONObject) 16 | return data 17 | 18 | 19 | def is_mpi_env(): 20 | """ 21 | Test if current environment is MPI or not 22 | """ 23 | try: 24 | import mpi4py 25 | except ImportError: 26 | return False 27 | 28 | try: 29 | import mpi4py.MPI 30 | except ImportError: 31 | return False 32 | 33 | if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0: 34 | return False 35 | 36 | return True 37 | 38 | 39 | def get_system_arg(argv, Usage): 40 | 41 | verbose = False 42 | parfile = None 43 | eventname = None 44 | 45 | opts, args = getopt.getopt(argv, "vp:h", ["parfile=", 'help', 'verbose']) 46 | for opt, value in opts: 47 | if opt in ("-p", "--parfile"): 48 | parfile = value 49 | elif opt in ("-v", "--verbose"): 50 | verbose = True 51 | elif opt in ("-h", "--help"): 52 | Usage() 53 | sys.exit() 54 | else: 55 | Usage() 56 | sys.exit() 57 | try: 58 | eventname = args[0] 59 | except: 60 | Usage() 61 | sys.exit() 62 | 63 | return eventname, verbose, parfile 64 | 65 | def print_option(): 66 | print "========== Command line option help ==========" 67 | print "-p(--parfile=) path to the parameter file" 68 | print "-v(--verbose) verbose mode" 69 | print "-h(--help) print out help information" 70 | 71 | 72 | def smart_read_json(mpi_mode, json_file): 73 | """ 74 | read json file under mpi and multi-processing environment 75 | """ 76 | if not mpi_mode: 77 | json_obj = read_json_file(json_file) 78 | else: 79 | from mpi4py import MPI 80 | comm = MPI.COMM_WORLD 81 | rank = comm.Get_rank() 82 | if rank == 0: 83 | json_obj = read_json_file(json_file) 84 | else: 85 | json_obj = None 86 | json_obj = comm.bcast(json_obj, root=0) 87 | return json_obj 88 | 89 | 90 | def load_asdf_file(asdf_fn): 91 | if not os.path.exists(asdf_fn): 92 | print "No asdf file: %s" % asdf_fn 93 | asdf_ds = ASDFDataSet(asdf_fn) 94 | return asdf_ds 95 | 96 | 97 | def clean_memory(asdf_ds): 98 | del asdf_ds 99 | 100 | 101 | def isclose(a, b, rel_tol=1.0e-09, abs_tol=0.0): 102 | return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) 103 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/io.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that handles adjoint sources 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from __future__ import (print_function, division) 13 | import yaml 14 | import numpy as np 15 | import pyadjoint 16 | 17 | 18 | def load_adjoint_config_yaml(filename): 19 | """ 20 | load yaml and setup pyadjoint.Config object 21 | """ 22 | with open(filename) as fh: 23 | data = yaml.load(fh) 24 | 25 | adjsrc_type = data["adj_src_type"] 26 | data.pop("adj_src_type") 27 | 28 | if adjsrc_type == "multitaper_misfit": 29 | ConfigClass = pyadjoint.ConfigMultiTaper 30 | elif adjsrc_type == "cc_traveltime_misfit": 31 | ConfigClass = pyadjoint.ConfigCrossCorrelation 32 | elif adjsrc_type == "waveform_misfit": 33 | ConfigClass = pyadjoint.ConfigWaveForm 34 | 35 | if data["min_period"] > data["max_period"]: 36 | raise ValueError("min_period is larger than max_period in config " 37 | "file: %s" % filename) 38 | 39 | return ConfigClass(**data) 40 | 41 | 42 | def _extract_window_id(windows): 43 | """ 44 | Extract obsd id and synt id associated with the windows. 45 | Windows should come from the same channel. 46 | 47 | :param windows: a list of pyflex.Window 48 | :return: a two dimension numpy.array of time window, with window 49 | starttime and endtime 50 | """ 51 | obs_ids = [] 52 | syn_ids = [] 53 | for _win in windows: 54 | if isinstance(_win, dict): 55 | obs_id = _win["channel_id"] 56 | try: 57 | syn_id = _win["channel_id_2"] 58 | except: 59 | syn_id = "UNKNOWN" 60 | else: 61 | obs_id = _win.channel_id 62 | try: 63 | syn_id = _win.channel_id_2 64 | except: 65 | syn_id = "UNKNOWN" 66 | obs_ids.append(obs_id) 67 | syn_ids.append(syn_id) 68 | 69 | # sanity check for windows in the same channel 70 | if len(set(obs_ids)) != 1: 71 | raise ValueError("Windows in for the same channel not consistent for" 72 | "obsd id:%s" % obs_ids) 73 | if len(set(syn_ids)) != 1: 74 | raise ValueError("Windows in for the same channel not consistent for" 75 | "obsd id:%s" % syn_ids) 76 | 77 | obs_id = obs_ids[0] 78 | syn_id = syn_ids[0] 79 | # read windows for this trace 80 | return obs_id, syn_id 81 | 82 | 83 | def _extract_window_time(windows): 84 | """ 85 | Extract window time information from a list of windows. 86 | """ 87 | win_time = [] 88 | for _win in windows: 89 | if isinstance(_win, dict): 90 | win_time.append([_win["relative_starttime"], 91 | _win["relative_endtime"]]) 92 | else: 93 | win_time.append([_win.relative_starttime, 94 | _win.relative_endtime]) 95 | return np.array(win_time) 96 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/plot_util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that handles plotting adjoint sources 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | import matplotlib.pyplot as plt 13 | from obspy import Trace 14 | from pyadjoint import AdjointSource 15 | from matplotlib.patches import Rectangle 16 | 17 | 18 | def plot_only_adjoint(adjsrc, wintimes=None): 19 | pass 20 | 21 | 22 | def plot_adjoint_and_data(adjsrc, win_times, obs_tr, syn_tr): 23 | 24 | plt.figure(figsize=(15, 5)) 25 | times = [obs_tr.stats.delta * i for i in range(obs_tr.stats.npts)] 26 | 27 | plt.subplot(211) 28 | plt.plot(obs_tr.times(), obs_tr.data, color="0.2", label="Observed", 29 | lw=2) 30 | plt.plot(syn_tr.times(), syn_tr.data, color="#bb474f", 31 | label="Synthetic", lw=2) 32 | 33 | plt.grid() 34 | plt.legend(fancybox=True, framealpha=0.5) 35 | ylim = max(map(abs, plt.ylim())) 36 | plt.ylim(-ylim, ylim) 37 | for win in win_times: 38 | l = win[0] 39 | r = win[1] 40 | re = Rectangle((l, plt.ylim()[0]), r - l, 41 | plt.ylim()[1] - plt.ylim()[0], color="blue", 42 | alpha=0.4) 43 | plt.gca().add_patch(re) 44 | 45 | plt.subplot(212) 46 | plt.plot(times, adjsrc.adjoint_source[::-1], color="#2f8d5b", lw=2, 47 | label="Adjoint Source") 48 | plt.grid() 49 | plt.legend(fancybox=True, framealpha=0.5) 50 | xlim = max(map(abs, plt.xlim())) 51 | ylim = max(map(abs, plt.ylim())) 52 | plt.ylim(-ylim, ylim) 53 | for win in win_times: 54 | l = win[0] 55 | r = win[1] 56 | re = Rectangle((l, plt.ylim()[0]), r - l, 57 | plt.ylim()[1] - plt.ylim()[0], color="blue", 58 | alpha=0.4) 59 | plt.gca().add_patch(re) 60 | 61 | plt.text(0.01*xlim, 0.9*ylim, adjsrc.adj_src_name, 62 | horizontalalignment='left', verticalalignment='top') 63 | plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0) 64 | 65 | 66 | def plot_adjoint_source(adjsrc, win_times=None, 67 | obs_tr=None, syn_tr=None, 68 | figname=None): 69 | """ 70 | Plot adjoint source for multiple windows 71 | 72 | :param figname: output figure file name 73 | :type figname: str 74 | :param adjsrc: adjoint source 75 | :type adjsrc: pyadjoint.AdjointSource 76 | :param adjsrc 77 | :return: 78 | """ 79 | if not isinstance(adjsrc, AdjointSource): 80 | raise ValueError("Input adjsrc should be type of " 81 | "pyadjoint.AdjointSource") 82 | 83 | if obs_tr is None or syn_tr is None: 84 | plot_only_adjoint(adjsrc, win_times) 85 | else: 86 | if not isinstance(obs_tr, Trace): 87 | raise ValueError("Input obs_tr should be type of obspy.Trace") 88 | if not isinstance(syn_tr, Trace): 89 | raise ValueError("Input syn_tr should be type of obspy.Trace") 90 | if win_times is None: 91 | raise ValueError("Input win_tims should be specified as time " 92 | "of windows") 93 | plot_adjoint_and_data(adjsrc, win_times, obs_tr, syn_tr) 94 | 95 | if figname is None: 96 | plt.show() 97 | else: 98 | plt.savefig(figname) 99 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installing python dependencies 2 | 3 | Pytomo3d has dependancies on the following packages: 4 | 5 | 1. [obspy](https://github.com/obspy/obspy) 6 | 2. [pyflex *devel* branch](https://github.com/wjlei1990/pyflex) 7 | 3. [pyadjoint *dev* branch](https://github.com/chukren/pyadjoint) 8 | 4. [spaceweight](https://github.com/wjlei1990/spaceweight) 9 | 10 | --- 11 | 12 | ### Manual installation 13 | 14 | Wenjie: If you are new to python, [anaconda](https://www.continuum.io/downloads) is recommmended. Please download the newest version( >= Anaconda2 - 2.5.0) since it already contains a lot of useful python packages, like pip, numpy and scipy. Older versions is not recommended since it usually has compliers inside, like gfortran and gcc. It is always better to use comiplers coming from your system rather than the very old ones embeded in anaconda. If you are expert in python, please choose the way you like. 15 | 16 | 1. downwnload Anaconda for Python 2.7 and 64 bit Linux and install it (http://continuum.io/downloads) (**optional**) or you get download the install binary using: 17 | ``` 18 | wget https://3230d63b5fc54e62148e-c95ac804525aac4b6dba79b00b39d1d3.ssl.cf1.rackcdn.com/Anaconda2-2.5.0-Linux-x86_64.sh 19 | ``` 20 | And then install anaconda using: 21 | ``` 22 | bash Anaconda2-2.5.0-Linux-x86_64.sh 23 | ``` 24 | 25 | 2. install obspy using anaconda. 26 | ``` 27 | conda install -c obspy obspy=1.0.3 28 | ``` 29 | Recently, obspy group has a big upgrade for obspy, which boost the version number from 0.10.x to 1.0.0. A lot of kernel functions has changed its module path. The recent version of pytomo3d also now no longer supports older version of obspy. Please upgrade your obspy version to at least 1.0.0. 30 | 31 | Or install from source code: 32 | ``` 33 | git clone https://github.com/obspy/obspy.git 34 | cd obspy 35 | pip install -v -e . (--user) 36 | cd .. 37 | ``` 38 | **If you *pip* tool is from system module, please add `--user` at installation to make sure it is installed at your home directory. If you install conda yourself, then you don't need to do this.** 39 | 40 | 3. install pyflex. 41 | ``` 42 | git clone --branch devel https://github.com/wjlei1990/pyflex 43 | cd pyflex 44 | pip install -v -e . (--user) 45 | cd .. 46 | ``` 47 | pyflex is used for seismic window selection. 48 | 49 | 4. Install pyadjoint 50 | ``` 51 | git clone --branch dev https://github.com/chukren/pyadjoint 52 | cd pyadjoint 53 | pip install -v -e . (--user) 54 | cd .. 55 | ``` 56 | pyadjoint is used for calculating adjoint sources. 57 | 58 | 59 | 5. Install spaceweight 60 | ``` 61 | git clone https://github.com/wjlei1990/spaceweight 62 | cd spaceweight 63 | pip install -v -e . (--user) 64 | cd .. 65 | ``` 66 | 67 | 5. Install pytomo3d. 68 | ``` 69 | git clone https://github.com/wjlei1990/pytomo3d 70 | cd pytomo3d 71 | pip install -v -e . (--user) 72 | cd .. 73 | ``` 74 | 75 | ### Script installation 76 | 77 | Wenjie: recommended for experienced user. 78 | 79 | 1. install obspy and pip yourself. 80 | 81 | 2. get the pytomo3d code using: 82 | ``` 83 | git clone https://github.com/wjlei1990/pytomo3d 84 | cd pytomo3d 85 | ``` 86 | 87 | 3. install pyflex and pyadjoint by: 88 | ``` 89 | pip install -r requirements.txt 90 | ``` 91 | 92 | --- 93 | 94 | After installation, you can run `py.test` in pytomo3d directory to see if you installed all the things correctly. 95 | 96 | ### Notes 97 | 1. If you already have some of the packages, please make sure to update them(not including anaconda) 98 | -------------------------------------------------------------------------------- /pytomo3d/signal/tests/test_compare_trace.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import pytest 4 | import numpy as np 5 | import matplotlib as mpl 6 | import matplotlib.pyplot as plt 7 | import numpy.testing as npt 8 | import pytomo3d.signal.compare_trace as ct 9 | from obspy import read 10 | 11 | 12 | def _upper_level(path, nlevel=4): 13 | """ 14 | Go the nlevel dir up 15 | """ 16 | for i in range(nlevel): 17 | path = os.path.dirname(path) 18 | return path 19 | 20 | 21 | # Most generic way to get the data folder path. 22 | TESTBASE_DIR = _upper_level(os.path.abspath( 23 | inspect.getfile(inspect.currentframe())), 4) 24 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 25 | 26 | # synfile = os.path.join(DATA_DIR, "raw", "IU.KBL.syn.mseed") 27 | # testsyn = read(synfile) 28 | small_mseed = os.path.join(DATA_DIR, "raw", "BW.RJOB.obs.mseed") 29 | smallobs = read(small_mseed) 30 | 31 | obs = read(os.path.join(DATA_DIR, "raw", "IU.KBL.obs.mseed")) 32 | syn = read(os.path.join(DATA_DIR, "raw", "IU.KBL.syn.mseed")) 33 | 34 | 35 | def reset_matplotlib(): 36 | """ 37 | Reset matplotlib to a common default. 38 | """ 39 | # Set all default values. 40 | mpl.rcdefaults() 41 | # Force agg backend. 42 | plt.switch_backend('agg') 43 | 44 | 45 | def test_least_square_error(): 46 | d1 = np.array([1, 2, 3]) 47 | d2 = 2 * d1 48 | err = ct.least_squre_error(d1, d2) 49 | npt.assert_almost_equal(err, 1/np.sqrt(2)) 50 | 51 | d1 = np.random.random(10) 52 | d2 = 2 * d1 53 | err = ct.least_squre_error(d1, d2) 54 | npt.assert_almost_equal(err, 1/np.sqrt(2)) 55 | 56 | 57 | def test_cross_correlation(): 58 | d1 = np.random.random(10) 59 | d2 = 2 * d1 60 | corr = ct.cross_correlation(d1, d2) 61 | npt.assert_almost_equal(corr, 1.0) 62 | 63 | d1 = np.random.random(10) 64 | d2 = -2 * d1 65 | corr = ct.cross_correlation(d1, d2) 66 | npt.assert_almost_equal(corr, -1.0) 67 | 68 | d1 = np.array([1, 2, 3, 4, 3, 2, 1]) 69 | d2 = np.array([1, 3, 4, -6, 4, 3, 1]) 70 | corr = ct.cross_correlation(d1, d2) 71 | npt.assert_almost_equal(corr, -0.37849937) 72 | 73 | 74 | def test_calculate_misfit(): 75 | 76 | tr1 = smallobs[0] 77 | tr2 = smallobs[0] 78 | 79 | res = ct.calculate_misfit(tr1, tr2) 80 | 81 | npt.assert_allclose(res["tr1_coverage"], 1.0, rtol=1e-3) 82 | npt.assert_allclose(res["tr2_coverage"], 1.0, rtol=1e-3) 83 | npt.assert_allclose(res["correlation"], 1.0) 84 | npt.assert_allclose(res["error"], 0.0) 85 | 86 | 87 | def test_trace_length(): 88 | l1 = ct.trace_length(smallobs[0]) 89 | l2 = smallobs[0].stats.endtime - smallobs[0].stats.starttime 90 | npt.assert_almost_equal(l1, l2) 91 | 92 | 93 | def test_plot_two_trace_raise(): 94 | with pytest.raises(TypeError): 95 | ct.calculate_misfit(obs[0], syn) 96 | 97 | with pytest.raises(TypeError): 98 | ct.calculate_misfit(obs, syn[0]) 99 | 100 | 101 | def test_plot_two_trace(tmpdir): 102 | 103 | reset_matplotlib() 104 | figname = os.path.join(str(tmpdir), "trace_compare.png") 105 | ct.plot_two_trace(smallobs[0], smallobs[0].copy(), figname=figname) 106 | 107 | 108 | def test_plot_two_traces_raise(tmpdir): 109 | 110 | reset_matplotlib() 111 | figname = os.path.join(str(tmpdir), "trace_compare.png") 112 | 113 | with pytest.raises(TypeError): 114 | ct.plot_two_trace(smallobs, smallobs[0].copy(), figname=figname) 115 | 116 | with pytest.raises(TypeError): 117 | ct.plot_two_trace(smallobs[0], smallobs.copy(), figname=figname) 118 | -------------------------------------------------------------------------------- /pytomo3d/station/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that contains utils for adjoint sources 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from __future__ import (absolute_import, division, print_function) 13 | import collections 14 | from obspy import UTCDateTime 15 | from obspy.core.inventory import Channel, Station, Network, Inventory, Site 16 | 17 | 18 | def check_in_range(value, vranges): 19 | if vranges[0] > vranges[1]: 20 | vmin = vranges[1] 21 | vmax = vranges[0] 22 | else: 23 | vmin = vranges[0] 24 | vmax = vranges[1] 25 | 26 | if value < vmin or value > vmax: 27 | raise ValueError("Value(%f) not in range: %s" % (value, vranges)) 28 | 29 | 30 | def write_stations_file(sta_dict, filename="STATIONS"): 31 | """ 32 | Write station information out to a txt file(in SPECFEM FORMAT) 33 | 34 | :param sta_dict: the dict contains station locations information. 35 | The key should be "network.station", like "II.AAK". 36 | The value are the list of 37 | [latitude, longitude, elevation_in_m, depth_in_m]. 38 | :type sta_dict: dict 39 | :param filename: the output filename for STATIONS file. 40 | :type filename: str 41 | """ 42 | with open(filename, 'w') as fh: 43 | od = collections.OrderedDict(sorted(sta_dict.items())) 44 | for _sta_id, _sta in od.iteritems(): 45 | network, station = _sta_id.split(".") 46 | _lat = _sta[0] 47 | _lon = _sta[1] 48 | check_in_range(_lat, [-90.1, 90.1]) 49 | check_in_range(_lon, [-180.1, 180.1]) 50 | fh.write("%-9s %5s %15.4f %12.4f %10.1f %6.1f\n" 51 | % (station, network, _lat, _lon, _sta[2], _sta[3])) 52 | 53 | 54 | def create_simple_inventory(network, station, latitude=None, longitude=None, 55 | elevation=None, depth=None, start_date=None, 56 | end_date=None, location_code="S3", 57 | channel_code="MX"): 58 | """ 59 | Create simple inventory with only location information, 60 | for ZNE component, especially usefull for synthetic data 61 | """ 62 | azi_dict = {"MXZ": 0.0, "MXN": 0.0, "MXE": 90.0} 63 | dip_dict = {"MXZ": 90.0, "MXN": 0.0, "MXE": 0.0} 64 | channel_list = [] 65 | 66 | if start_date is None: 67 | start_date = UTCDateTime(0) 68 | 69 | # specfem default channel code is MX 70 | for _comp in ["Z", "E", "N"]: 71 | _chan_code = "%s%s" % (channel_code, _comp) 72 | chan = Channel(_chan_code, location_code, latitude=latitude, 73 | longitude=longitude, elevation=elevation, 74 | depth=depth, azimuth=azi_dict[_chan_code], 75 | dip=dip_dict[_chan_code], start_date=start_date, 76 | end_date=end_date) 77 | channel_list.append(chan) 78 | 79 | site = Site("N/A") 80 | sta = Station(station, latitude=latitude, longitude=longitude, 81 | elevation=elevation, channels=channel_list, site=site, 82 | creation_date=start_date, total_number_of_channels=3, 83 | selected_number_of_channels=3) 84 | 85 | nw = Network(network, stations=[sta, ], total_number_of_stations=1, 86 | selected_number_of_stations=1) 87 | 88 | inv = Inventory([nw, ], source="SPECFEM3D_GLOBE", sender="Princeton", 89 | created=UTCDateTime.now()) 90 | 91 | return inv 92 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/tests/test_io.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import pytomo3d.adjoint.io as adj_io 4 | import pytest 5 | import pyadjoint 6 | 7 | 8 | def _upper_level(path, nlevel=4): 9 | """ 10 | Go the nlevel dir up 11 | """ 12 | for i in range(nlevel): 13 | path = os.path.dirname(path) 14 | return path 15 | 16 | 17 | # Most generic way to get the data folder path. 18 | TESTBASE_DIR = _upper_level(os.path.abspath( 19 | inspect.getfile(inspect.currentframe())), 4) 20 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 21 | 22 | obsfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 23 | synfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 24 | winfile = os.path.join(DATA_DIR, "window", "IU.KBL..BHR.window.json") 25 | 26 | 27 | # @pytest.fixture 28 | # def load_config_waveform(): 29 | # config_file = os.path.join(DATA_DIR, "adjoint", 30 | # "waveform.adjoint.config.yaml") 31 | # return adj_utils.load_adjoint_config_yaml(config_file) 32 | 33 | 34 | # @pytest.fixture 35 | # def load_config_traveltime(): 36 | # config_file = os.path.join(DATA_DIR, "adjoint", 37 | # "cc_traveltime.adjoint.config.yaml") 38 | # return adj_utils.load_adjoint_config_yaml(config_file) 39 | 40 | 41 | @pytest.fixture 42 | def load_config_multitaper(): 43 | config_file = os.path.join(DATA_DIR, "adjoint", 44 | "multitaper.adjoint.config.yaml") 45 | return adj_io.load_adjoint_config_yaml(config_file) 46 | 47 | 48 | # def test_load_adjoint_config_yaml_for_waveform_misfit(): 49 | # config = load_config_waveform() 50 | # assert isinstance(config, pyadjoint.Config) 51 | # assert config.max_period == 60.0 52 | # assert config.min_period == 27.0 53 | # assert config.taper_percentage == 0.15 54 | # assert config.taper_type == 'hann' 55 | # assert not config.use_cc_error 56 | 57 | 58 | # def test_load_adjoint_config_yaml_for_traveltime_misfit(): 59 | # config = load_config_traveltime() 60 | # assert isinstance(config, pyadjoint.Config) 61 | # assert config.max_period == 60.0 62 | # assert config.min_period == 27.0 63 | # assert config.ipower_costaper == 10 64 | # assert config.taper_percentage == 0.15 65 | # assert config.taper_type == 'hann' 66 | # assert config.use_cc_error 67 | 68 | 69 | def test_multitaper_config_keys(): 70 | default_args = inspect.getargspec( 71 | pyadjoint.ConfigMultiTaper.__init__).args 72 | default_args.remove("self") 73 | 74 | args = set([ 75 | "max_period", "min_period", "lnpt", "transfunc_waterlevel", 76 | "water_threshold", 77 | "ipower_costaper", "min_cycle_in_window", "taper_type", 78 | "taper_percentage", "mt_nw", "num_taper", "dt_fac", 79 | "phase_step", "err_fac", "dt_max_scale", "measure_type", 80 | "dt_sigma_min", "dlna_sigma_min", "use_cc_error", "use_mt_error"]) 81 | 82 | assert set(default_args) == args 83 | 84 | 85 | def test_load_adjoint_config_yaml_for_multitaper_misfit(): 86 | config = load_config_multitaper() 87 | assert isinstance(config, pyadjoint.ConfigMultiTaper) 88 | assert config.max_period == 60.0 89 | assert config.min_period == 27.0 90 | assert config.lnpt == 15 91 | assert config.transfunc_waterlevel == 1.0e-10 92 | assert config.water_threshold == 0.02 93 | assert config.ipower_costaper == 10 94 | assert config.min_cycle_in_window == 3 95 | assert config.taper_percentage == 0.3 96 | assert config.mt_nw == 4.0 97 | assert config.num_taper == 5 98 | assert config.phase_step == 1.5 99 | assert config.dt_fac == 2.0 100 | assert config.err_fac == 2.5 101 | assert config.dt_max_scale == 3.5 102 | assert config.measure_type == "dt" 103 | assert config.taper_type == 'hann' 104 | assert config.dt_sigma_min == 1.0 105 | assert config.dlna_sigma_min == 0.5 106 | assert config.use_cc_error 107 | assert not config.use_mt_error 108 | -------------------------------------------------------------------------------- /pytomo3d/window/io.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that handles writing windows 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | import json 13 | import obspy 14 | import yaml 15 | import pyflex 16 | import numpy as np 17 | 18 | 19 | def load_window_config_yaml(filename): 20 | """ 21 | Load yaml and setup pyflex.Config object 22 | 23 | :param filename: 24 | :return: 25 | """ 26 | with open(filename) as fh: 27 | data = yaml.load(fh) 28 | 29 | if data["min_period"] > data["max_period"]: 30 | raise ValueError("min_period is larger than max_period in config " 31 | "file: %s" % filename) 32 | 33 | return pyflex.Config(**data) 34 | 35 | 36 | def write_txtfile(windows, filename): 37 | """ 38 | Write windows to text file, for a list of windows. Notice that 39 | this method only works on LISTS of windows, for example, 40 | windows on a pair of traces. 41 | 42 | :param windows: list of windows(from same observed and synthetic) 43 | :type windows: list 44 | :param filename: output filename 45 | :type filename: str 46 | :return: 47 | """ 48 | with open(filename, 'w') as fh: 49 | fh.write("%s\n" % windows[0].channel_id) 50 | fh.write("%d\n" % len(windows)) 51 | for win in windows: 52 | fh.write("%10.2f %10.2f %10.2f %10.3f %10.3f\n" 53 | % (win.relative_starttime, win.relative_endtime, 54 | win.cc_shift, win.dlnA, win.max_cc_value)) 55 | 56 | 57 | def get_json_content(window, simple_mode=True): 58 | """ 59 | Extract information from json to a dict 60 | 61 | :param window: 62 | :return: 63 | """ 64 | # to be comptabile with olde pyflex, which doesn't has 65 | # channel_id_2. If not, assign it with "UNKNOWN" 66 | info = { 67 | "left_index": window.left, 68 | "right_index": window.right, 69 | "center_index": window.center, 70 | "channel_id": window.channel_id, 71 | "time_of_first_sample": window.time_of_first_sample, 72 | "max_cc_value": window.max_cc_value, 73 | "cc_shift_in_samples": window.cc_shift, 74 | "cc_shift_in_seconds": window.cc_shift_in_seconds, 75 | "dlnA": window.dlnA, 76 | "dt": window.dt, 77 | "min_period": window.min_period, 78 | "absolute_starttime": window.absolute_starttime, 79 | "absolute_endtime": window.absolute_endtime, 80 | "relative_starttime": window.relative_starttime, 81 | "relative_endtime": window.relative_endtime, 82 | "window_weight": window.weight} 83 | 84 | if not simple_mode: 85 | info["phase_arrivals"] = window.phase_arrivals 86 | 87 | if "channel_id_2" in dir(window): 88 | info["channel_id_2"] = window.channel_id_2 89 | 90 | return info 91 | 92 | 93 | class WindowEncoder(json.JSONEncoder): 94 | def default(self, obj): 95 | if isinstance(obj, obspy.UTCDateTime): 96 | return str(obj) 97 | # Numpy objects also require explicit handling. 98 | elif isinstance(obj, np.int64): 99 | return int(obj) 100 | elif isinstance(obj, np.int32): 101 | return int(obj) 102 | elif isinstance(obj, np.float64): 103 | return float(obj) 104 | elif isinstance(obj, np.float32): 105 | return float(obj) 106 | # Let the base class default method raise the TypeError 107 | return json.JSONEncoder.default(self, obj) 108 | 109 | 110 | def write_jsonfile(windows, filename): 111 | """ 112 | Write windows to a json file. Also, this requires windows to be 113 | type of list. 114 | 115 | :param windows: list of windows 116 | :param filename: output filename 117 | :return: 118 | """ 119 | 120 | win_json = [get_json_content(_i) for _i in windows] 121 | with open(filename, 'w') as fh: 122 | j = json.dumps(win_json, cls=WindowEncoder, sort_keys=True, 123 | indent=2, separators=(',', ':')) 124 | try: 125 | fh.write(j) 126 | except TypeError: 127 | fh.write(j.encode()) 128 | -------------------------------------------------------------------------------- /pytomo3d/source/source_weights.py: -------------------------------------------------------------------------------- 1 | # calculate the weight of source based on its location and window counts 2 | import os 3 | import numpy as np 4 | from pprint import pprint 5 | from spaceweight import SpherePoint, SphereDistRel 6 | from pytomo3d.utils.io import dump_json 7 | 8 | 9 | def assign_source_to_points(sources): 10 | points = [] 11 | for event, cat in sources.iteritems(): 12 | origin = cat[0].preferred_origin() 13 | point = SpherePoint(origin.latitude, origin.longitude, tag=event, 14 | weight=1.0) 15 | points.append(point) 16 | 17 | assert len(points) == len(sources) 18 | return points 19 | 20 | 21 | def normalize_source_weights(points, wcounts): 22 | wsum = 0.0 23 | wcounts_sum = 0 24 | for p in points: 25 | wsum += p.weight * wcounts[p.tag] 26 | wcounts_sum += wcounts[p.tag] 27 | 28 | print("The summation of window counts: %d" % wcounts_sum) 29 | print("The iniital summation(weight * window_counts): %f" % wsum) 30 | factor = 1.0 / wsum 31 | 32 | weights = {} 33 | for p in points: 34 | weights[p.tag] = p.weight * factor 35 | 36 | # validate 37 | wsum = 0.0 38 | for event in weights: 39 | wsum += wcounts[event] * weights[event] 40 | if not np.isclose(wsum, 1.0): 41 | raise ValueError("Error normalize source weights: %f" % wsum) 42 | print("The normalized sum is: %f" % wsum) 43 | print("Final weights: %s" % weights) 44 | return weights 45 | 46 | 47 | def calculate_source_weights_on_location( 48 | points, search_ratio, plot_flag, outputdir): 49 | """ 50 | :param outputdir: output directory for figures 51 | """ 52 | # set a fake center point 53 | center = SpherePoint(0, 180.0, tag="Center") 54 | weightobj = SphereDistRel(points, center=center) 55 | 56 | if plot_flag: 57 | scan_figname = os.path.join( 58 | outputdir, "source_weights.smart_scan.png") 59 | else: 60 | scan_figname = None 61 | 62 | ref_distance, cond_number = weightobj.smart_scan( 63 | max_ratio=search_ratio, start=0.1, gap=0.2, 64 | drop_ratio=0.95, plot=plot_flag, 65 | figname=scan_figname) 66 | 67 | print("Reference distance and condition number: %f, %f" 68 | % (ref_distance, cond_number)) 69 | 70 | if plot_flag: 71 | map_figname = os.path.join( 72 | outputdir, "source_weights.global_map.pdf") 73 | weightobj.plot_global_map(figname=map_figname, lon0=180.0) 74 | 75 | return ref_distance, cond_number 76 | 77 | 78 | def dump_weights_to_txt(weights, outputfile): 79 | events = weights.keys() 80 | events.sort() 81 | 82 | with open(outputfile, 'w') as fh: 83 | for e in events: 84 | fh.write("%-16s %.10e\n" % (e, weights[e])) 85 | 86 | 87 | def calculate_source_weights(info, param, output_file, _verbose=False): 88 | """ 89 | program which calculates the source weightings for weighting 90 | strategy I, in which case the source weightings needs to be 91 | calculated separately. 92 | """ 93 | print("=" * 10 + " Param " + "=" * 10) 94 | pprint(param) 95 | sources = {k: v["source"] for k, v in info.iteritems()} 96 | wcounts = {k: v["window_counts"] for k, v in info.iteritems()} 97 | 98 | outputdir = os.path.dirname(output_file) 99 | if not os.path.exists(outputdir): 100 | os.makedirs(outputdir) 101 | 102 | ref_distance = -1.0 103 | cond_num = -1.0 104 | 105 | points = assign_source_to_points(sources) 106 | if param["flag"]: 107 | print("=" * 10 + " Weight source on location " + "=" * 10) 108 | ref_distance, cond_num = calculate_source_weights_on_location( 109 | points, param["search_ratio"], param["flag"], outputdir) 110 | print("=" * 10 + " Normalize weights " + "=" * 10) 111 | weights = normalize_source_weights(points, wcounts) 112 | 113 | # write weights to txt(for summing kernels) 114 | print("=" * 10 + " Write weights " + "=" * 10) 115 | print("Output weight file: %s" % output_file) 116 | dump_weights_to_txt(weights, output_file) 117 | 118 | # generate log file 119 | log_content = {"weights": weights, "reference_distance": ref_distance, 120 | "cond_num": cond_num, "weight_flag": param["flag"], 121 | "serach_ratio": param["search_ratio"]} 122 | outputfn = os.path.join(outputdir, "source_weights.log.json") 123 | print("Output log file: %s" % outputfn) 124 | dump_json(log_content, outputfn) 125 | -------------------------------------------------------------------------------- /pytomo3d/utils/download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | utils for seismic data download 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from obspy.clients.fdsn import Client 13 | import os 14 | 15 | 16 | def read_station_file(station_filename): 17 | stations = [] 18 | with open(station_filename, "rt") as fh: 19 | for line in fh: 20 | line = line.split() 21 | stations.append((line[1], line[0])) 22 | return stations 23 | 24 | 25 | def _parse_station_id(station_id): 26 | content = station_id.split("_") 27 | if len(content) == 2: 28 | nw, sta = content 29 | loc = "*" 30 | comp = "*" 31 | elif len(content) == 4: 32 | nw, sta, loc, comp = content 33 | else: 34 | raise ValueError("Can't not parse station_id: %s" % station_id) 35 | return nw, sta, loc, comp 36 | 37 | 38 | def download_waveform(stations, starttime, endtime, outputdir=None, 39 | client=None): 40 | """ 41 | download wavefrom data from IRIS data center 42 | 43 | :param stations: list of stations, should be list of station ids, 44 | for example, "II.AAK.00.BHZ". Parts could be replaced by "*", 45 | for example, "II.AAK.*.BH*" 46 | """ 47 | if client is None: 48 | client = Client("IRIS") 49 | 50 | if starttime > endtime: 51 | raise ValueError("Starttime(%s) is larger than endtime(%s)" 52 | % (starttime, endtime)) 53 | 54 | if not os.path.exists(outputdir): 55 | raise ValueError("Outputdir not exists: %s" % outputdir) 56 | 57 | _status = {} 58 | for station_id in stations: 59 | error_code = "None" 60 | network, station, location, channel = _parse_station_id(station_id) 61 | 62 | if outputdir is not None: 63 | filename = os.path.join(outputdir, "%s.mseed" % station_id) 64 | if os.path.exists(filename): 65 | os.remove(filename) 66 | else: 67 | filename = None 68 | 69 | try: 70 | st = client.get_waveforms( 71 | network=network, station=station, location=location, 72 | channel=channel, starttime=starttime, endtime=endtime) 73 | if len(st) == 0: 74 | error_code = "stream empty" 75 | if filename is not None and len(st) > 0: 76 | st.write(filename, format="MSEED") 77 | except Exception as e: 78 | error_code = "Failed to download waveform '%s' due to: %s" \ 79 | % (station_id, str(e)) 80 | print(error_code) 81 | 82 | _status[station_id] = error_code 83 | 84 | return {"stream": st, "status": _status} 85 | 86 | 87 | def download_stationxml(stations, starttime, endtime, outputdir=None, 88 | client=None, level="response"): 89 | 90 | if client is None: 91 | client = Client("IRIS") 92 | 93 | if starttime > endtime: 94 | raise ValueError("Starttime(%s) is larger than endtime(%s)" 95 | % (starttime, endtime)) 96 | 97 | if not os.path.exists(outputdir): 98 | raise ValueError("Outputdir not exists: %s" % outputdir) 99 | 100 | _status = {} 101 | for station_id in stations: 102 | error_code = "None" 103 | network, station, location, channel = _parse_station_id(station_id) 104 | 105 | if outputdir is not None: 106 | filename = os.path.join(outputdir, "%s.xml" % station_id) 107 | if os.path.exists(filename): 108 | os.remove(filename) 109 | else: 110 | filename = None 111 | 112 | try: 113 | inv = client.get_stations( 114 | network=network, station=station, location=location, 115 | channel=channel, starttime=starttime, endtime=endtime, 116 | level=level) 117 | if len(inv) == 0: 118 | error_code = "Inventory Empty" 119 | if filename is not None and len(inv) > 0: 120 | inv.write(filename, format="STATIONXML") 121 | except Exception as e: 122 | error_code = "Failed to download StationXML '%s' due to: %s" \ 123 | % (station_id, str(e)) 124 | print(error_code) 125 | 126 | _status[station_id] = error_code 127 | 128 | return {"inventory": inv, "status": _status} 129 | -------------------------------------------------------------------------------- /pytomo3d/source/append_cmtsolution.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Functions that append CMTSOLUTION information into catalog 5 | :copyright: 6 | Wenjie Lei (lei@princeton.edu), 2016 7 | :license: 8 | GNU Lesser General Public License, version 3 (LGPLv3) 9 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 10 | """ 11 | from __future__ import print_function, division 12 | import obspy 13 | from obspy.core.event.source import ResourceIdentifier 14 | from obspy.core.event import Catalog, Event 15 | from obspy.core.event import CreationInfo 16 | 17 | 18 | def _validator(event, cmt_origin, cmt_mag, cmt_focal): 19 | if event.preferred_origin() != cmt_origin: 20 | raise ValueError("preferred_origin_id wrong, not the same as the " 21 | "new added cmt") 22 | if event.preferred_magnitude() != cmt_mag: 23 | raise ValueError("preferred_magnitude_id wrong, not the same as " 24 | "the new added cmt") 25 | if event.preferred_focal_mechanism() != cmt_focal: 26 | raise ValueError("preferred_focal_mechanism_id wrong, not the same " 27 | "as the new added cmt") 28 | 29 | 30 | def prepare_cmt_origin(cmt, tag, creation_info): 31 | cmt_origin = None 32 | # locate cmt origin 33 | for _origin in cmt.origins: 34 | if str(_origin.resource_id).endswith("origin#cmt"): 35 | cmt_origin = _origin 36 | break 37 | 38 | if cmt_origin is None: 39 | raise ValueError("No cmt origin found") 40 | 41 | new_id = str(cmt_origin.resource_id).rstrip() + "#%s" % tag 42 | # new_id = str(cmt_origin.resource_id).replace("origin#cmt", 43 | # "origin#%s" % tag) 44 | cmt_origin.resource_id = ResourceIdentifier(new_id) 45 | cmt_origin.creation_info = creation_info 46 | return cmt_origin 47 | 48 | 49 | def prepare_cmt_mag(cmt, tag, origin_id, creation_info): 50 | cmt_mag = None 51 | for _mag in cmt.magnitudes: 52 | if _mag.magnitude_type == "mw": 53 | cmt_mag = _mag 54 | 55 | if cmt_mag is None: 56 | raise ValueError("No cmt Mw mag found") 57 | 58 | new_id = str(cmt_mag.resource_id).strip() + "#%s" % tag 59 | cmt_mag.resource_id = ResourceIdentifier(new_id) 60 | cmt_mag.origin_id = origin_id 61 | cmt_mag.creation_info = creation_info 62 | return cmt_mag 63 | 64 | 65 | def prepare_cmt_focal(cmt, tag, origin_id, mag_id, creation_info): 66 | 67 | cmt_focal = None 68 | for _focal in cmt.focal_mechanisms: 69 | if "cmtsolution" in str(_focal.resource_id): 70 | cmt_focal = _focal 71 | break 72 | 73 | if cmt_focal is None: 74 | raise ValueError("no cmt focal found") 75 | 76 | focal_id = str(cmt_focal.resource_id).strip() + "#%s" % tag 77 | cmt_focal.resource_id = ResourceIdentifier(focal_id) 78 | cmt_focal.creation_info = creation_info 79 | tensor = cmt_focal.moment_tensor 80 | tensor_id = str(tensor.resource_id).strip() + "#%s" % tag 81 | tensor.resource_id = ResourceIdentifier(tensor_id) 82 | tensor.derived_origin_id = origin_id 83 | tensor.moment_magnitude_id = mag_id 84 | tensor.creation_info = creation_info 85 | return cmt_focal 86 | 87 | 88 | def _parse_event(event): 89 | if isinstance(event, str): 90 | event = obspy.read_events(event)[0] 91 | elif isinstance(event, Catalog): 92 | event = event[0] 93 | elif isinstance(event, Event): 94 | event = event 95 | else: 96 | raise TypeError("Input event info must be earthquake source file," 97 | "obspy.Catalog or obspy.Event") 98 | return event 99 | 100 | 101 | def append_cmt_to_catalog(event_origin, cmt_to_add, tag="new_cmt", 102 | author="Princeton GATG", 103 | change_preferred_id=True): 104 | """ 105 | Add cmt to event. The cmt.resource_id will be appened tag to avoid 106 | tag duplication problem in event. 107 | :param event: the event that you want to add cmt in. 108 | :type event: str, obspy.core.event.Event or obspy.core.event.Catalog 109 | :param cmt: the cmt that you want to add to event. 110 | :type event: str, obspy.core.event.Event or obspy.core.event.Catalog 111 | :param change_preferred_id: change all preferred_id to the new added cmt 112 | :type change_preferred_id: bool 113 | :return: obspy.Catalog 114 | """ 115 | event = _parse_event(event_origin) 116 | cmt_event = _parse_event(cmt_to_add) 117 | 118 | if not isinstance(tag, str): 119 | raise TypeError("tag(%s) should be type of str" % type(tag)) 120 | 121 | if not isinstance(author, str): 122 | raise TypeError("author(%s) should be type of str" % type(author)) 123 | 124 | # User defined creation information 125 | creation_info = CreationInfo(author=author, version=tag) 126 | 127 | # add cmt origin 128 | cmt_origin = prepare_cmt_origin(cmt_event, tag, creation_info) 129 | event.origins.append(cmt_origin) 130 | 131 | # add cmt magnitude 132 | cmt_mag = prepare_cmt_mag(cmt_event, tag, cmt_origin.resource_id, 133 | creation_info) 134 | event.magnitudes.append(cmt_mag) 135 | 136 | # add cmt focal mechanism 137 | cmt_focal = prepare_cmt_focal(cmt_event, tag, cmt_origin.resource_id, 138 | cmt_mag.resource_id, creation_info) 139 | event.focal_mechanisms.append(cmt_focal) 140 | 141 | # change preferred id if needed 142 | if change_preferred_id: 143 | event.preferred_origin_id = str(cmt_origin.resource_id) 144 | event.preferred_magnitude_id = str(cmt_mag.resource_id) 145 | event.preferred_focal_mechanism_id = str(cmt_focal.resource_id) 146 | _validator(event, cmt_origin, cmt_mag, cmt_focal) 147 | 148 | new_cat = Catalog() 149 | new_cat.append(event) 150 | 151 | return new_cat 152 | -------------------------------------------------------------------------------- /pytomo3d/signal/compare_trace.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Functions that compare two traces, return the measurements metrics. 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from obspy import Trace 15 | 16 | 17 | def least_squre_error(data1, data2): 18 | """ 19 | waveform difference between data1 and data2 20 | :param data1: 21 | :param data2: 22 | :return: 23 | """ 24 | # least square test 25 | err_max = 0.0 26 | norm = np.linalg.norm 27 | err = norm(data1 - data2) / np.sqrt(norm(data1) * norm(data2)) 28 | err_max = max(err, err_max) 29 | return err_max 30 | 31 | 32 | def cross_correlation(data1, data2): 33 | """ 34 | :param data1: 35 | :param data2: 36 | :return: 37 | """ 38 | # correlation test 39 | corr_min = 1.0 40 | corr_mat = np.corrcoef(data1, data2) 41 | corr = np.min(corr_mat) 42 | corr_min = min(corr, corr_min) 43 | return corr_min 44 | 45 | 46 | def trace_length(tr): 47 | return (tr.stats.npts-1) * tr.stats.delta 48 | 49 | 50 | def calculate_misfit(_tr1, _tr2, taper_flag=True, taper_percentage=0.05, 51 | correlation_flag=True): 52 | """ 53 | Calculate the misfit between two traces 54 | :param tr1: trace 1 55 | :type tr1: Obspy.Trace 56 | :param tr2: trace 2 57 | :type tr2: Obspy.Trace 58 | :param taper_flag: taper the seismogram or not 59 | :type taper_flag: bool 60 | :param taper_percentage: the taper percentage 61 | :type taper_percentage: float 62 | """ 63 | if not isinstance(_tr1, Trace): 64 | raise TypeError("Input tr1(type:%s) must be type of obspy.Trace" 65 | % type(_tr1)) 66 | if not isinstance(_tr2, Trace): 67 | raise TypeError("Input tr2(type:%s) must be type of obspy.Trace" 68 | % type(_tr2)) 69 | 70 | tr1 = _tr1.copy() 71 | tr2 = _tr2.copy() 72 | 73 | starttime = max(tr1.stats.starttime, tr2.stats.starttime) 74 | endtime = min(tr1.stats.endtime, tr2.stats.endtime) 75 | sampling_rate = min(tr1.stats.sampling_rate, tr2.stats.sampling_rate) 76 | npts = int((endtime - starttime) * sampling_rate) 77 | 78 | tr1.interpolate(sampling_rate, starttime=starttime, npts=npts) 79 | tr2.interpolate(sampling_rate, starttime=starttime, npts=npts) 80 | 81 | if taper_flag: 82 | tr1.taper(max_percentage=taper_percentage, type='hann') 83 | tr2.taper(max_percentage=taper_percentage, type='hann') 84 | 85 | corr_min = cross_correlation(tr1.data, tr2.data) 86 | err_max = least_squre_error(tr1.data, tr2.data) 87 | 88 | # coverage 89 | tr1_cover = trace_length(tr1) / trace_length(_tr1) 90 | tr2_cover = trace_length(tr2) / trace_length(_tr2) 91 | 92 | # amplitude diff 93 | twdiff = [i / sampling_rate for i in range(npts)] 94 | amp_ref = np.sum(np.abs(tr1.data) + np.abs(tr2.data)) / (2 * npts) 95 | wdiff = (tr1.data - tr2.data) / amp_ref 96 | 97 | return {"tr1_coverage": tr1_cover, "tr2_coverage": tr2_cover, 98 | "correlation": corr_min, "error": err_max, 99 | "time_array": twdiff, "diff_array": wdiff} 100 | 101 | 102 | def plot_two_trace(tr1, tr2, trace1_tag="trace 1", trace2_tag="trace 2", 103 | figname=None): 104 | 105 | if not isinstance(tr1, Trace): 106 | raise TypeError("Input tr1(type:%s) must be type of obspy.Trace" 107 | % type(tr1)) 108 | if not isinstance(tr2, Trace): 109 | raise TypeError("Input tr2(type:%s) must be type of obspy.Trace" 110 | % type(tr2)) 111 | 112 | fig = plt.figure(figsize=(20, 10)) 113 | 114 | # subplot 1 115 | plt.subplot(211) 116 | t1 = tr1.stats.starttime 117 | t2 = tr2.stats.starttime 118 | t_ref = max(t1, t2) 119 | 120 | bt = t1 - t_ref 121 | times1 = [bt + i * tr1.stats.delta for i in range(tr1.stats.npts)] 122 | plt.plot(times1, tr1.data, linestyle='-', color='r', marker="*", 123 | markersize=3, label=trace1_tag, markerfacecolor='r', 124 | markeredgecolor='none') 125 | 126 | bt = t2 - t_ref 127 | times2 = [bt + i * tr2.stats.delta for i in range(tr2.stats.npts)] 128 | plt.plot(times2, tr2.data, '-', color="b", linewidth=0.7, 129 | label=trace2_tag) 130 | 131 | plt.xlim([min(times1[0], times2[0]), max(times1[-1], times2[-1])]) 132 | plt.legend(loc="upper right") 133 | 134 | xmax = plt.xlim()[1] 135 | ymin = plt.ylim()[0] 136 | xpos = 0.7 * xmax 137 | ypos = 0.4 * ymin 138 | dypos = abs(0.1 * ymin) 139 | 140 | plt.text(xpos, ypos, "trace id:['%s', '%s']" % (tr1.id, tr2.id)) 141 | ypos -= dypos 142 | plt.text(xpos, ypos, "reference time: %s" % t_ref) 143 | ypos -= dypos 144 | plt.text(xpos, ypos, "detaT: [%6.3f, %6.3f]" % (tr1.stats.delta, 145 | tr2.stats.delta)) 146 | 147 | # calcualte misfit 148 | res = calculate_misfit(tr1, tr2) 149 | 150 | ypos -= dypos 151 | plt.text(xpos, ypos, "coverage:[%6.2f%% %6.2f%%]" 152 | % (res["tr1_coverage"] * 100, res["tr2_coverage"] * 100)) 153 | ypos -= dypos 154 | plt.text(xpos, ypos, "min correlation: % 6.4f" % res["correlation"]) 155 | ypos -= dypos 156 | plt.text(xpos, ypos, "max error: % 6.4f" % res["error"]) 157 | plt.grid() 158 | 159 | # subplot 2 160 | plt.subplot(212) 161 | plt.plot(res["time_array"], res["diff_array"], 'g', 162 | label="amplitude difference") 163 | plt.legend() 164 | plt.xlim([min(times1[0], times2[0]), max(times1[-1], times2[-1])]) 165 | plt.grid() 166 | 167 | plt.tight_layout() 168 | if figname is None: 169 | plt.show() 170 | else: 171 | plt.savefig(figname) 172 | 173 | plt.close(fig) 174 | -------------------------------------------------------------------------------- /pytomo3d/signal/tests/test_rotate_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import numpy as np 4 | import numpy.testing as npt 5 | import pytomo3d.signal.rotate_utils as rotate 6 | from obspy import read, read_inventory 7 | 8 | 9 | def _upper_level(path, nlevel=4): 10 | """ 11 | Go the nlevel dir up 12 | """ 13 | for i in range(nlevel): 14 | path = os.path.dirname(path) 15 | return path 16 | 17 | 18 | # Most generic way to get the data folder path. 19 | TESTBASE_DIR = _upper_level(os.path.abspath( 20 | inspect.getfile(inspect.currentframe())), 4) 21 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 22 | 23 | staxmlfile = os.path.join(DATA_DIR, "stationxml", "IU.KBL.xml") 24 | teststaxml = read_inventory(staxmlfile) 25 | testquakeml = os.path.join(DATA_DIR, "quakeml", "C201009031635A.xml") 26 | 27 | obsfile = os.path.join(DATA_DIR, "raw", "IU.KBL.obs.mseed") 28 | testobs = read(obsfile) 29 | synfile = os.path.join(DATA_DIR, "raw", "IU.KBL.syn.mseed") 30 | testsyn = read(synfile) 31 | small_mseed = os.path.join(DATA_DIR, "raw", "BW.RJOB.obs.mseed") 32 | 33 | 34 | def test_check_orthogonality(): 35 | 36 | azi1 = 1 37 | azi2 = 91 38 | assert rotate.check_orthogonality(azi1, azi2) == "left-hand" 39 | assert rotate.check_orthogonality(azi2, azi1) == "right-hand" 40 | 41 | azi1 = 315 42 | azi2 = 45 43 | assert rotate.check_orthogonality(azi1, azi2) == "left-hand" 44 | assert rotate.check_orthogonality(azi2, azi1) == "right-hand" 45 | 46 | azi1 = 405 47 | azi2 = 495 48 | assert rotate.check_orthogonality(azi1, azi2) == "left-hand" 49 | assert rotate.check_orthogonality(azi2, azi1) == "right-hand" 50 | 51 | azi1 = 46 52 | azi2 = 137 53 | assert not rotate.check_orthogonality(azi1, azi2) 54 | assert not rotate.check_orthogonality(azi2, azi1) 55 | 56 | azi1 = 46 57 | azi2 = 314 58 | assert not rotate.check_orthogonality(azi1, azi2) 59 | assert not rotate.check_orthogonality(azi2, azi1) 60 | 61 | 62 | def test_check_orthogonality_2(): 63 | azi1 = -180 64 | azi2 = -90 65 | assert rotate.check_orthogonality(azi1, azi2) == "left-hand" 66 | assert rotate.check_orthogonality(azi2, azi1) == "right-hand" 67 | 68 | azi1 = -315 69 | azi2 = 135 70 | assert rotate.check_orthogonality(azi1, azi2) == "left-hand" 71 | assert rotate.check_orthogonality(azi2, azi1) == "right-hand" 72 | 73 | azi1 = -181 74 | azi2 = -90 75 | assert not rotate.check_orthogonality(azi1, azi2) 76 | assert not rotate.check_orthogonality(azi2, azi1) 77 | 78 | 79 | def test_rotate_certain_angle(): 80 | 81 | d1 = np.array([1.0, 0.0]) 82 | d2 = np.array([0.0, 1.0]) 83 | 84 | dnew1, dnew2 = rotate.rotate_certain_angle(d1, d2, 30.0) 85 | 86 | dnew1_true = np.array([np.sqrt(3)/2.0, 0.5]) 87 | dnew2_true = np.array([-0.5, np.sqrt(3)/2.0]) 88 | npt.assert_allclose(dnew1, dnew1_true) 89 | npt.assert_allclose(dnew2, dnew2_true) 90 | 91 | 92 | def test_rotate_certain_angle_2(): 93 | 94 | d1 = np.array([1.0, 0.0]) 95 | d2 = np.array([0.0, 1.0]) 96 | 97 | dnew1, dnew2 = rotate.rotate_certain_angle(d1, d2, 90.0) 98 | npt.assert_array_almost_equal(dnew1, [0.0, 1.0]) 99 | npt.assert_array_almost_equal(dnew2, [-1.0, 0.0]) 100 | 101 | dnew1, dnew2 = rotate.rotate_certain_angle(d1, d2, 180.0) 102 | npt.assert_array_almost_equal(dnew1, [-1.0, 0.0]) 103 | npt.assert_array_almost_equal(dnew2, [0.0, -1.0]) 104 | 105 | dnew1, dnew2 = rotate.rotate_certain_angle(d1, d2, 270.0) 106 | npt.assert_array_almost_equal(dnew1, [0.0, -1.0]) 107 | npt.assert_array_almost_equal(dnew2, [1.0, 0.0]) 108 | 109 | dnew1, dnew2 = rotate.rotate_certain_angle(d1, d2, 360.0) 110 | npt.assert_array_almost_equal(dnew1, [1.0, 0.0]) 111 | npt.assert_array_almost_equal(dnew2, [0.0, 1.0]) 112 | 113 | 114 | def test_rotate_12_ne(): 115 | 116 | d1 = np.array([1.0, 0.0]) 117 | d2 = np.array([0.0, 1.0]) 118 | 119 | n, e = rotate.rotate_12_ne(d1, d2, 30, 120) 120 | 121 | n_true = np.array([np.sqrt(3)/2.0, -0.5]) 122 | e_true = np.array([0.5, np.sqrt(3)/2.0]) 123 | npt.assert_allclose(n, n_true) 124 | npt.assert_allclose(e, e_true) 125 | 126 | 127 | def test_rotate_ne_12(): 128 | 129 | n = np.array([1.0, 0.0]) 130 | e = np.array([0.0, 1.0]) 131 | 132 | dnew1, dnew2 = rotate.rotate_ne_12(n, e, 30, 120) 133 | 134 | assert rotate.check_orthogonality(30, 120) == "left-hand" 135 | 136 | dnew1_true = np.array([np.sqrt(3)/2.0, 0.5]) 137 | dnew2_true = np.array([-0.5, np.sqrt(3)/2.0]) 138 | npt.assert_allclose(dnew1, dnew1_true) 139 | npt.assert_allclose(dnew2, dnew2_true) 140 | 141 | 142 | def test_rotate_ne_and_12(): 143 | # test if rotate_NE_12 and rotate_12_NE are reversable 144 | 145 | n = np.array([1.0, 0.0]) 146 | e = np.array([0.0, 1.0]) 147 | 148 | d1, d2 = rotate.rotate_ne_12(n, e, 30, 120) 149 | 150 | n_new, e_new = rotate.rotate_12_ne(d1, d2, 30, 120) 151 | 152 | npt.assert_allclose(n, n_new) 153 | npt.assert_allclose(e, e_new) 154 | 155 | 156 | def test_rotate_12_rt(): 157 | 158 | d1 = np.array([1.0, 0.0]) 159 | d2 = np.array([0.0, 1.0]) 160 | azi1 = 30 161 | azi2 = 120 162 | baz = 240 163 | 164 | r, t = rotate.rotate_12_rt(d1, d2, baz, azi1, azi2) 165 | 166 | n, e = rotate.rotate_12_ne(d1, d2, azi1, azi2) 167 | r_true, t_true = rotate.rotate_ne_12(n, e, baz - 180, baz - 90) 168 | 169 | npt.assert_allclose(r, r_true) 170 | npt.assert_allclose(t, t_true) 171 | 172 | 173 | def test_rotate_rt_12(): 174 | 175 | r = np.array([1.0, 0.0]) 176 | t = np.array([0.0, 1.0]) 177 | azi1 = 30 178 | azi2 = 120 179 | baz = 240 180 | 181 | d1, d2 = rotate.rotate_rt_12(r, t, baz, azi1, azi2) 182 | 183 | n, e = rotate.rotate_12_ne(r, t, baz - 180, baz - 90) 184 | d1_true, d2_true = rotate.rotate_ne_12(n, e, azi1, azi2) 185 | 186 | 187 | def test_rotate_rt_and_12(): 188 | 189 | r = np.array([1.0, 0.0]) 190 | t = np.array([0.0, 1.0]) 191 | azi1 = 30 192 | azi2 = 120 193 | baz = 240 194 | 195 | d1, d2 = rotate.rotate_rt_12(r, t, baz, azi1, azi2) 196 | r_new, t_new = rotate.rotate_12_rt(d1, d2, baz, azi1, azi2) 197 | 198 | npt.assert_allclose(r, r_new) 199 | npt.assert_allclose(t, t_new) 200 | -------------------------------------------------------------------------------- /pytomo3d/station/generate_adjoint_stations.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script generates the adjoint stations from stations.json file and 3 | measurements files. The output file is STATIONS_ADJOINT, which will 4 | be used in the adjoint simulations later. 5 | """ 6 | from .utils import write_stations_file 7 | 8 | 9 | def extract_usable_stations_from_one_period(measures): 10 | """ Extract usable stations and channels from measurements file """ 11 | stations = [] 12 | channels = [] 13 | for sta, sta_info in measures.iteritems(): 14 | n_measure_sta = 0 15 | # append each usable channel 16 | for chan, chan_info in sta_info.iteritems(): 17 | n_measure_chan = len(chan_info) 18 | if n_measure_chan > 0: 19 | channels.append(chan) 20 | n_measure_sta += n_measure_chan 21 | # append each usable station 22 | if n_measure_sta > 0: 23 | stations.append(sta) 24 | 25 | return stations, channels 26 | 27 | 28 | def extract_usable_stations_from_measurements(measurements): 29 | stations = set() 30 | channels = set() 31 | 32 | for period, measures in measurements.iteritems(): 33 | stations_one_period, channels_one_period = \ 34 | extract_usable_stations_from_one_period(measures) 35 | print("[Period:%s]Number of stations and channels: %d, %d" % 36 | (period, len(stations_one_period), len(channels_one_period))) 37 | stations = stations.union(set(stations_one_period)) 38 | channels = channels.union(set(channels_one_period)) 39 | 40 | print("Total number of station and channels: %d, %d" % 41 | (len(stations), len(channels))) 42 | 43 | return stations, channels 44 | 45 | 46 | def extract_one_station(chan, stations): 47 | nw, sta, loc, comp = chan.split(".") 48 | if comp[-1] == "Z": 49 | # directly get the station information 50 | info = stations[chan] 51 | else: 52 | # if horizontal components, first try BHE and then try BH1 53 | new_id_e = "%s.%s.%s.%sE" % (nw, sta, loc, comp[0:2]) 54 | new_id_1 = "%s.%s.%s.%s1" % (nw, sta, loc, comp[0:2]) 55 | if new_id_e in stations: 56 | info = stations[new_id_e] 57 | elif new_id_1 in stations: 58 | info = stations[new_id_1] 59 | else: 60 | raise ValueError("Can not locate station(%s) in staitons file" 61 | % (chan)) 62 | return info 63 | 64 | 65 | def prepare_adjoint_station_information(usable_channels, stations): 66 | """ Based on usable channels, extract adjoint station information """ 67 | adjoint_stations_info = {} 68 | 69 | for chan_id in usable_channels: 70 | nw, sta, loc, comp = chan_id.split(".") 71 | info = extract_one_station(chan_id, stations) 72 | sta_id = "%s.%s" % (nw, sta) 73 | if sta_id not in adjoint_stations_info: 74 | # if no previous, just add 75 | adjoint_stations_info[sta_id] = info 76 | else: 77 | # if previous, check if current is Z component 78 | if comp[-1] == "Z": 79 | adjoint_stations_info[sta_id] = info 80 | else: 81 | continue 82 | 83 | adjoint_stations = {} 84 | for sta_id, sta_info in adjoint_stations_info.iteritems(): 85 | adjoint_stations[sta_id] = [ 86 | sta_info["latitude"], sta_info["longitude"], 87 | sta_info["elevation"], sta_info["depth"]] 88 | 89 | return adjoint_stations 90 | 91 | 92 | def check_adjoint_stations_consistency(adjoint_stations, usable_stations): 93 | if len(adjoint_stations) != len(usable_stations): 94 | raise ValueError("Inconsistent between adjoint_stations and " 95 | "usable_stations") 96 | 97 | set1 = set(adjoint_stations.keys()) 98 | set2 = set(usable_stations) 99 | if set1 != set2: 100 | print("Stations more: %s" % (set1 - set2)) 101 | print("Stations less: %s" % (set2 - set1)) 102 | raise ValueError("Inconsistent between adjoint_stations and " 103 | "usable_stations") 104 | 105 | print("Validation check passed") 106 | 107 | 108 | def benchmark_stations(adjoint_stations): 109 | """ 110 | Benchmark a few common stations(latitude and longitude) 111 | just to check if it is correct 112 | """ 113 | threshold = 0.01 114 | 115 | def is_close(values, true_values): 116 | for _v1, _v2 in zip(values, true_values): 117 | if abs(_v1 - _v2) > threshold: 118 | return False 119 | return True 120 | 121 | true_values = { 122 | "II.AAK": [42.6375, 74.4942], "II.ABPO": [-19.0180, 47.2290], 123 | "II.EFI": [-51.6753, -58.0637], "IU.AFI": [-13.9093, -171.7773], 124 | "IU.ANMO": [34.9460, -106.4571], "G.CAN": [-35.3187, 148.9963] 125 | } 126 | 127 | npass = 0 128 | nfail = 0 129 | for key in true_values: 130 | if key not in adjoint_stations: 131 | continue 132 | if not is_close(adjoint_stations[key], true_values[key]): 133 | print("Fails at benchmark station %s" % key) 134 | nfail += 1 135 | else: 136 | npass += 1 137 | 138 | if nfail != 0: 139 | raise ValueError("Number of benchmark fails: %d" % nfail) 140 | 141 | return npass 142 | 143 | 144 | def generate_adjoint_stations(measurements, stations, outputfn, 145 | benchmark_flag=True): 146 | """ 147 | This program takes in measurements and stations, and output 148 | the STATIONS_ADJOINT for stations has adjoint measurements 149 | 150 | :param measurements: dict contains measurements information 151 | from several period bands, generated by 152 | pypaw-measure_adjoint_asdf 153 | :type measurements: dict 154 | :param stations: dict contains station information, including 155 | station location and instrument information for all 156 | channels 157 | :type stations: dict 158 | :param outputfn: output STATIONS file 159 | :type outputfn: str 160 | :param benchmark_flag: whether benchmark some stations with 161 | standard locations values 162 | :type benchmark_flag: bool 163 | """ 164 | usable_stations, usable_channels = \ 165 | extract_usable_stations_from_measurements(measurements) 166 | 167 | adjoint_stations = prepare_adjoint_station_information( 168 | usable_channels, stations) 169 | 170 | check_adjoint_stations_consistency(adjoint_stations, usable_stations) 171 | 172 | if benchmark_flag: 173 | npass = benchmark_stations(adjoint_stations) 174 | print("Benchmark passed at level: %d" % npass) 175 | 176 | print("Write output station file in: %s" % outputfn) 177 | write_stations_file(adjoint_stations, outputfn) 178 | -------------------------------------------------------------------------------- /tests/data/window/IU.KBL..BHR.window.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "absolute_endtime":"2010-09-03T16:52:21.900000Z", 4 | "absolute_starttime":"2010-09-03T16:50:52.400000Z", 5 | "cc_shift_in_samples":1, 6 | "cc_shift_in_seconds":0.5, 7 | "center_index":1878, 8 | "channel_id":"IU.KBL..BHR", 9 | "channel_id_2":"IU.KBL.S3.MXR", 10 | "dlnA":0.48552751541137695, 11 | "dt":0.5, 12 | "left_index":1789, 13 | "max_cc_value":0.8778460621833801, 14 | "min_period":27.0, 15 | "phase_arrivals":[ 16 | { 17 | "dT/dD":4.4456692671514837, 18 | "phase_name":"Pdiff", 19 | "take-off angle":13.434087677707552, 20 | "time":921.57164021473227 21 | }, 22 | { 23 | "dT/dD":4.4456692671514837, 24 | "phase_name":"pPdiff", 25 | "take-off angle":-13.434087677707552, 26 | "time":925.59656472549852 27 | }, 28 | { 29 | "dT/dD":4.4456692671514837, 30 | "phase_name":"sPdiff", 31 | "take-off angle":-7.9665437648996926, 32 | "time":927.01890281265969 33 | } 34 | ], 35 | "relative_endtime":984.0, 36 | "relative_starttime":894.5, 37 | "right_index":1968, 38 | "time_of_first_sample":"2010-09-03T16:35:57.900000Z", 39 | "window_weight":2.9098971320523157 40 | }, 41 | { 42 | "absolute_endtime":"2010-09-03T16:57:31.900000Z", 43 | "absolute_starttime":"2010-09-03T16:54:02.400000Z", 44 | "cc_shift_in_samples":-1, 45 | "cc_shift_in_seconds":-0.5, 46 | "center_index":2378, 47 | "channel_id":"IU.KBL..BHR", 48 | "channel_id_2":"IU.KBL.S3.MXR", 49 | "dlnA":0.48230868577957153, 50 | "dt":0.5, 51 | "left_index":2169, 52 | "max_cc_value":0.8720169067382812, 53 | "min_period":27.0, 54 | "phase_arrivals":[ 55 | { 56 | "dT/dD":1.9203947915200748, 57 | "phase_name":"PKIKP", 58 | "take-off angle":5.7597903672055866, 59 | "time":1133.7321324662771 60 | }, 61 | { 62 | "dT/dD":1.9667534143239638, 63 | "phase_name":"PKiKP", 64 | "take-off angle":5.8993201347274269, 65 | "time":1133.8825697541524 66 | }, 67 | { 68 | "dT/dD":1.9204256128819468, 69 | "phase_name":"pPKIKP", 70 | "take-off angle":-5.7598831215868769, 71 | "time":1137.8492259219263 72 | }, 73 | { 74 | "dT/dD":1.9666201058462494, 75 | "phase_name":"pPKiKP", 76 | "take-off angle":-5.8989188541267081, 77 | "time":1137.9986120246617 78 | }, 79 | { 80 | "dT/dD":1.920419365467769, 81 | "phase_name":"sPKIKP", 82 | "take-off angle":-3.4323249861736018, 83 | "time":1139.2526820380981 84 | }, 85 | { 86 | "dT/dD":1.9666471316624559, 87 | "phase_name":"sPKiKP", 88 | "take-off angle":-3.5150494831133834, 89 | "time":1139.4022812356418 90 | }, 91 | { 92 | "dT/dD":6.8044611381118356, 93 | "phase_name":"PP", 94 | "take-off angle":20.829909599926395, 95 | "time":1226.4648460852875 96 | } 97 | ], 98 | "relative_endtime":1294.0, 99 | "relative_starttime":1084.5, 100 | "right_index":2588, 101 | "time_of_first_sample":"2010-09-03T16:35:57.900000Z", 102 | "window_weight":6.7662052578396272 103 | }, 104 | { 105 | "absolute_endtime":"2010-09-03T17:04:37.900000Z", 106 | "absolute_starttime":"2010-09-03T17:02:37.900000Z", 107 | "cc_shift_in_samples":5, 108 | "cc_shift_in_seconds":2.5, 109 | "center_index":3320, 110 | "channel_id":"IU.KBL..BHR", 111 | "channel_id_2":"IU.KBL.S3.MXR", 112 | "dlnA":0.21606998145580292, 113 | "dt":0.5, 114 | "left_index":3200, 115 | "max_cc_value":0.8694694638252258, 116 | "min_period":27.0, 117 | "phase_arrivals":[ 118 | { 119 | "dT/dD":6.4899426478691424, 120 | "phase_name":"SKKS", 121 | "take-off angle":11.672984879522478, 122 | "time":1645.8188901670642 123 | }, 124 | { 125 | "dT/dD":8.3405984429746454, 126 | "phase_name":"Sdiff", 127 | "take-off angle":15.071261840889404, 128 | "time":1700.6517202757764 129 | }, 130 | { 131 | "dT/dD":8.3405984429746454, 132 | "phase_name":"pSdiff", 133 | "take-off angle":-25.840796286684665, 134 | "time":1705.8633598019533 135 | }, 136 | { 137 | "dT/dD":8.3405984429746454, 138 | "phase_name":"sSdiff", 139 | "take-off angle":-15.071261840889404, 140 | "time":1707.3500031243075 141 | } 142 | ], 143 | "relative_endtime":1720.0, 144 | "relative_starttime":1600.0, 145 | "right_index":3440, 146 | "time_of_first_sample":"2010-09-03T16:35:57.900000Z", 147 | "window_weight":3.8643087281121149 148 | }, 149 | { 150 | "absolute_endtime":"2010-09-03T17:15:43.900000Z", 151 | "absolute_starttime":"2010-09-03T17:12:22.900000Z", 152 | "cc_shift_in_samples":1, 153 | "cc_shift_in_seconds":0.5, 154 | "center_index":4571, 155 | "channel_id":"IU.KBL..BHR", 156 | "channel_id_2":"IU.KBL.S3.MXR", 157 | "dlnA":0.34011590480804443, 158 | "dt":0.5, 159 | "left_index":4370, 160 | "max_cc_value":0.9845608472824097, 161 | "min_period":27.0, 162 | "phase_arrivals":[ 163 | { 164 | "dT/dD":2.9610305040872409, 165 | "phase_name":"SKKS", 166 | "take-off angle":5.2965630710429092, 167 | "time":2185.9236772240752 168 | }, 169 | { 170 | "dT/dD":1.873485479005921, 171 | "phase_name":"SKIKKIKS", 172 | "take-off angle":3.3483442154348597, 173 | "time":2199.1581326465957 174 | }, 175 | { 176 | "dT/dD":12.767144008283758, 177 | "phase_name":"SS", 178 | "take-off angle":23.454386812440546, 179 | "time":2222.589822007224 180 | }, 181 | { 182 | "dT/dD":1.9222659759508847, 183 | "phase_name":"PKIKPPKIKP", 184 | "take-off angle":5.7654215719944482, 185 | "time":2259.5987753741283 186 | } 187 | ], 188 | "relative_endtime":2386.0, 189 | "relative_starttime":2185.0, 190 | "right_index":4772, 191 | "time_of_first_sample":"2010-09-03T16:35:57.900000Z", 192 | "window_weight":7.3295085297690497 193 | }, 194 | { 195 | "absolute_endtime":"2010-09-03T17:19:57.900000Z", 196 | "absolute_starttime":"2010-09-03T17:16:58.400000Z", 197 | "cc_shift_in_samples":5, 198 | "cc_shift_in_seconds":2.5, 199 | "center_index":5100, 200 | "channel_id":"IU.KBL..BHR", 201 | "channel_id_2":"IU.KBL.S3.MXR", 202 | "dlnA":0.3200240433216095, 203 | "dt":0.5, 204 | "left_index":4921, 205 | "max_cc_value":0.8598626852035522, 206 | "min_period":27.0, 207 | "phase_arrivals":[], 208 | "relative_endtime":2640.0, 209 | "relative_starttime":2460.5, 210 | "right_index":5280, 211 | "time_of_first_sample":"2010-09-03T16:35:57.900000Z", 212 | "window_weight":5.7164945182976901 213 | } 214 | ] -------------------------------------------------------------------------------- /pytomo3d/window/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | # functions mainly for pypaw. 3 | """ 4 | import numpy as np 5 | from pytomo3d.utils.io import dump_json 6 | 7 | 8 | def sort_windows_on_channel_and_location(sta_win): 9 | """ 10 | functions for merge_instruments_window. the windows 11 | will be sorted based on [chan][location] 12 | 13 | :param sta_win: 14 | :return: 15 | """ 16 | sort_dict = {} 17 | for trace_id, trace_win in sta_win.iteritems(): 18 | chan = trace_id.split('.')[-1][0:2] 19 | loc = trace_id.split('.')[-2] 20 | if chan not in sort_dict: 21 | sort_dict[chan] = {} 22 | if loc not in sort_dict[chan]: 23 | sort_dict[chan][loc] = {"traces": [], "nwins": 0} 24 | sort_dict[chan][loc]["traces"].append(trace_id) 25 | sort_dict[chan][loc]["nwins"] += len(trace_win) 26 | 27 | # sort to trace names in order 28 | for chan, chan_info in sort_dict.iteritems(): 29 | for loc, loc_info in chan_info.iteritems(): 30 | loc_info['traces'] = sorted(loc_info['traces']) 31 | 32 | return sort_dict 33 | 34 | 35 | def pick_location_with_more_windows(sort_dict): 36 | choosen = {} 37 | for chan, chan_info in sort_dict.iteritems(): 38 | if len(chan_info.keys()) == 0: 39 | continue 40 | 41 | # if multiple locations available, choose the 42 | # one with most number of windows 43 | _locs = [] 44 | _nwins = [] 45 | for loc, loc_info in chan_info.iteritems(): 46 | _locs.append(loc) 47 | _nwins.append(loc_info["nwins"]) 48 | _max_idx = np.array(_nwins).argmax() 49 | choosen[chan] = _locs[_max_idx] 50 | 51 | return choosen 52 | 53 | 54 | def merge_instruments_window(sta_win): 55 | """ 56 | Merge windows from the same channel, for example, if 57 | there are windows from "00.BH*" and "10.BH*", keep only one 58 | with the most windows. For example, if "00.BH*" has 10 59 | windows and "10.BH*" has 20 windows, we will keep the 60 | "10.BH*" since it has more windows. 61 | """ 62 | if len(sta_win) == 0: 63 | return sta_win 64 | 65 | sort_dict = sort_windows_on_channel_and_location(sta_win) 66 | choosen_locs = pick_location_with_more_windows(sort_dict) 67 | 68 | choosen_wins = {} 69 | for chan, loc in choosen_locs.iteritems(): 70 | trace_list = sort_dict[chan][loc]["traces"] 71 | for tr_id in trace_list: 72 | choosen_wins[tr_id] = sta_win[tr_id] 73 | 74 | return choosen_wins 75 | 76 | 77 | def sort_windows_on_channel(sta_win): 78 | """ 79 | Gounp windows from one station into channels and count 80 | the number of windows in that channel 81 | :param sta_win: 82 | :return: 83 | """ 84 | sort_dict = {} 85 | for trace_id, trace_win in sta_win.iteritems(): 86 | chan = trace_id.split(".")[-1][0:2] 87 | if chan not in sort_dict: 88 | sort_dict[chan] = {"traces": [], "nwins": 0} 89 | sort_dict[chan]["traces"].append(trace_id) 90 | sort_dict[chan]["nwins"] += len(trace_win) 91 | 92 | return sort_dict 93 | 94 | 95 | def pick_channel_with_more_windows(sort_dict): 96 | max_wins = -1 97 | max_chan = None 98 | for chan, chaninfo in sort_dict.iteritems(): 99 | if chaninfo["nwins"] > max_wins: 100 | max_wins = chaninfo["nwins"] 101 | max_chan = chan 102 | return max_chan 103 | 104 | 105 | def merge_channels_window(sta_win): 106 | """ 107 | Merge windows from different channels. 108 | This step should be done after merge instruments windows 109 | because after that there will only one instrument left 110 | on one channel. 111 | For example, if we have "BH" channel with 20 windows and 112 | "LH" has 10 windows, we will keep only the "BH" channel. 113 | """ 114 | if len(sta_win) == 0: 115 | return sta_win 116 | 117 | sort_dict = sort_windows_on_channel(sta_win) 118 | choosen_chan = pick_channel_with_more_windows(sort_dict) 119 | choosen_traces = sort_dict[choosen_chan]["traces"] 120 | 121 | choosen_wins = {} 122 | for _trace_id in choosen_traces: 123 | choosen_wins[_trace_id] = sta_win[_trace_id] 124 | 125 | return choosen_wins 126 | 127 | 128 | def merge_station_windows(windows): 129 | """ 130 | Merge windows for one station. 131 | For example, you may have "00.BH", "10.BH", "00.LH", "10.LH" from 132 | different locations and channels. You may only want to keep one 133 | at the very end. So: 134 | 1) select locations: keep only one location with the most 135 | number of windows for one channel. For example, in "00.BH" and 136 | "10.BH" we only keep "10.BH". 137 | 2) select channel: after last step, we keep only "00.BH" and "10.LH", 138 | but ultimately we only want to keep one. So we may only choose 139 | "00.BH" since it has more windows. 140 | """ 141 | w = merge_instruments_window(windows) 142 | w = merge_channels_window(w) 143 | return w 144 | 145 | 146 | def merge_windows(windows): 147 | """ 148 | Merge the windows(from one event, multiple stations) 149 | """ 150 | new_windows = {} 151 | for sta, sta_info in windows.iteritems(): 152 | if sta_info is None: 153 | continue 154 | # merge the windows for each station 155 | new_windows[sta] = merge_station_windows(sta_info) 156 | return new_windows 157 | 158 | 159 | def generate_log_content(windows): 160 | overall_log = {"stations": 0, "stations_with_windows": 0, 161 | "windows": 0, "traces": 0, "traces_with_windows": 0} 162 | comp_log = {} 163 | for sta_name, sta_win in windows.iteritems(): 164 | if sta_win is None: 165 | continue 166 | nwin_sta = 0 167 | ntraces_with_windows = 0 168 | for trace_id, trace_win in sta_win.iteritems(): 169 | comp = trace_id.split(".")[-1] 170 | if comp not in comp_log: 171 | comp_log[comp] = { 172 | "windows": 0, "traces": 0, "traces_with_windows": 0} 173 | comp_log[comp]["windows"] += len(trace_win) 174 | if len(trace_win) > 0: 175 | comp_log[comp]["traces_with_windows"] += 1 176 | ntraces_with_windows += 1 177 | comp_log[comp]["traces"] += 1 178 | nwin_sta += len(trace_win) 179 | 180 | overall_log["stations"] += 1 181 | overall_log["windows"] += nwin_sta 182 | overall_log["traces"] += len(sta_win) 183 | overall_log["traces_with_windows"] += ntraces_with_windows 184 | if nwin_sta > 0: 185 | overall_log["stations_with_windows"] += 1 186 | 187 | log = {"component": comp_log, "overall": overall_log} 188 | return log 189 | 190 | 191 | def stats_all_windows(windows, obsd_tag, synt_tag, 192 | instrument_merge_flag, 193 | output_file): 194 | """ 195 | Generate window statistic information 196 | """ 197 | log = {"obsd_tag": obsd_tag, "synt_tag": synt_tag, 198 | "instrument_merge_flag": instrument_merge_flag} 199 | 200 | window_log = generate_log_content(windows) 201 | log.update(window_log) 202 | 203 | print("Windows statistic log file: %s" % output_file) 204 | dump_json(log, output_file) 205 | -------------------------------------------------------------------------------- /pytomo3d/signal/tests/test_process.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import numpy as np 4 | import pytest 5 | import obspy 6 | import pytomo3d.signal.process as proc 7 | from copy import deepcopy 8 | 9 | 10 | def _upper_level(path, nlevel=4): 11 | """ 12 | Go the nlevel dir up 13 | """ 14 | for i in range(nlevel): 15 | path = os.path.dirname(path) 16 | return path 17 | 18 | 19 | # Most generic way to get the data folder path. 20 | TESTBASE_DIR = _upper_level(os.path.abspath( 21 | inspect.getfile(inspect.currentframe())), 4) 22 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 23 | 24 | staxmlfile = os.path.join(DATA_DIR, "stationxml", "IU.KBL.xml") 25 | teststaxml = obspy.read_inventory(staxmlfile) 26 | testquakeml = os.path.join(DATA_DIR, "quakeml", "C201009031635A.xml") 27 | obsfile = os.path.join(DATA_DIR, "raw", "IU.KBL.obs.mseed") 28 | testobs = obspy.read(obsfile) 29 | synfile = os.path.join(DATA_DIR, "raw", "IU.KBL.syn.mseed") 30 | testsyn = obspy.read(synfile) 31 | small_mseed = os.path.join(DATA_DIR, "raw", "BW.RJOB.obs.mseed") 32 | 33 | 34 | def test_check_array(): 35 | array = [1, 2, 3, 4] 36 | assert proc.check_array_order(array, order='ascending') 37 | array = [-1.0, -2.0, -3, -4] 38 | assert proc.check_array_order(array, order='descending') 39 | array = [2.0, 1.0, 3.0, 4.0] 40 | assert (not proc.check_array_order(array)) 41 | 42 | 43 | def test_flex_cut_trace(): 44 | 45 | st = obspy.read(small_mseed) 46 | tr_old = st[0] 47 | tstart = tr_old.stats.starttime 48 | tend = tr_old.stats.endtime 49 | npts = tr_old.stats.npts 50 | dt = tr_old.stats.delta 51 | 52 | tr = tr_old.copy() 53 | t1 = tstart + int(npts / 4) * dt 54 | t2 = tend - int(npts / 4) * dt 55 | proc.flex_cut_trace(tr, t1, t2) 56 | assert tr.stats.starttime == t1 57 | assert tr.stats.endtime == t2 58 | 59 | tr = tr_old.copy() 60 | t1 = tstart + 20 * dt 61 | t2 = tend - 20 * dt 62 | proc.flex_cut_trace(tr, t1, t2, dynamic_npts=10) 63 | assert tr.stats.starttime == (t1 - 10 * dt) 64 | assert tr.stats.endtime == (t2 + 10 * dt) 65 | 66 | tr = tr_old.copy() 67 | t1 = tstart - int(npts / 4) * dt 68 | t2 = tend + int(npts / 4) * dt 69 | proc.flex_cut_trace(tr, t1, t2) 70 | assert tr.stats.starttime == tstart 71 | assert tr.stats.endtime == tend 72 | 73 | tr = tr_old.copy() 74 | t1 = tstart + int(npts * 0.8) * dt 75 | t2 = tend - int(npts * 0.8) * dt 76 | with pytest.raises(ValueError): 77 | proc.flex_cut_trace(tr, t1, t2) 78 | 79 | 80 | def test_flex_cut_stream(): 81 | st = obspy.read(small_mseed) 82 | tstart = st[0].stats.starttime 83 | tend = st[0].stats.endtime 84 | dt = st[0].stats.delta 85 | t1 = tstart + 100 * dt 86 | t2 = tend - 100 * dt 87 | dynamic_npts = 5 88 | st = proc.flex_cut_stream(st, t1, t2, dynamic_npts=dynamic_npts) 89 | for tr in st: 90 | assert tr.stats.starttime == t1 - dynamic_npts * dt 91 | assert tr.stats.endtime == t2 + dynamic_npts * dt 92 | 93 | 94 | def test_filter_trace(): 95 | st = testsyn.copy() 96 | pre_filt = [1/90., 1/60., 1/27.0, 1/22.5] 97 | 98 | # check length doesn't change after filtering 99 | tr = st[0].copy() 100 | proc.filter_trace(tr, pre_filt) 101 | assert len(tr.data) == len(st[0].data) 102 | 103 | 104 | def compare_stream_kernel(st1, st2): 105 | if len(st1) != len(st2): 106 | return False 107 | for tr1 in st1: 108 | tr2 = st2.select(id=tr1.id)[0] 109 | if not compare_trace_kernel(tr1, tr2): 110 | return False 111 | return True 112 | 113 | 114 | def compare_trace_kernel(tr1, tr2): 115 | if tr1.stats.starttime != tr2.stats.starttime: 116 | return False 117 | if tr1.stats.endtime != tr2.stats.endtime: 118 | return False 119 | if tr1.stats.sampling_rate != tr2.stats.sampling_rate: 120 | return False 121 | if tr1.stats.npts != tr2.stats.npts: 122 | return False 123 | if not np.allclose(tr1.data, tr2.data): 124 | return False 125 | return True 126 | 127 | 128 | def test_process_obsd(): 129 | 130 | st = testobs.copy() 131 | inv = deepcopy(teststaxml) 132 | event = obspy.readEvents(testquakeml)[0] 133 | origin = event.preferred_origin() or event.origins[0] 134 | event_lat = origin.latitude 135 | event_lon = origin.longitude 136 | event_time = origin.time 137 | 138 | pre_filt = [1/90., 1/60., 1/27.0, 1/22.5] 139 | t1 = event_time 140 | t2 = event_time + 6000.0 141 | st_new = proc.process_stream( 142 | st, remove_response_flag=True, water_level=60, inventory=inv, 143 | filter_flag=True, pre_filt=pre_filt, 144 | starttime=t1, endtime=t2, resample_flag=True, 145 | sampling_rate=2.0, taper_type="hann", 146 | taper_percentage=0.05, rotate_flag=True, 147 | event_latitude=event_lat, 148 | event_longitude=event_lon) 149 | bmfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 150 | st_compare = obspy.read(bmfile) 151 | assert compare_stream_kernel(st_new, st_compare) 152 | 153 | 154 | def test_process_obsd_2(): 155 | st = testobs.copy() 156 | inv = deepcopy(teststaxml) 157 | event = obspy.readEvents(testquakeml)[0] 158 | origin = event.preferred_origin() or event.origins[0] 159 | event_lat = origin.latitude 160 | event_lon = origin.longitude 161 | event_time = origin.time 162 | 163 | pre_filt = [1/90., 1/60., 1/27.0, 1/22.5] 164 | t1 = event_time 165 | t2 = event_time + 6000.0 166 | st_new = proc.process_stream( 167 | st, remove_response_flag=True, water_level=60, inventory=inv, 168 | filter_flag=True, pre_filt=pre_filt, 169 | starttime=t1, endtime=t2, resample_flag=True, 170 | sampling_rate=2.0, taper_type="hann", 171 | taper_percentage=0.05, rotate_flag=True, 172 | event_latitude=event_lat, 173 | event_longitude=event_lon, 174 | sanity_check=True) 175 | bmfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 176 | st_compare = obspy.read(bmfile) 177 | assert len(st_new) == 3 178 | assert compare_trace_kernel(st_new.select(channel="BHZ")[0], 179 | st_compare.select(channel="BHZ")[0]) 180 | 181 | 182 | def test_process_synt(): 183 | staxmlfile = os.path.join(DATA_DIR, "stationxml", "IU.KBL.syn.xml") 184 | inv = obspy.read_inventory(staxmlfile) 185 | 186 | st = testsyn.copy() 187 | event = obspy.readEvents(testquakeml)[0] 188 | origin = event.preferred_origin() or event.origins[0] 189 | event_lat = origin.latitude 190 | event_lon = origin.longitude 191 | event_time = origin.time 192 | 193 | pre_filt = [1/90., 1/60., 1/27.0, 1/22.5] 194 | t1 = event_time 195 | t2 = event_time + 6000.0 196 | st_new = proc.process_stream( 197 | st, remove_response_flag=False, inventory=inv, 198 | filter_flag=True, pre_filt=pre_filt, 199 | starttime=t1, endtime=t2, resample_flag=True, 200 | sampling_rate=2.0, taper_type="hann", 201 | taper_percentage=0.05, rotate_flag=True, 202 | event_latitude=event_lat, 203 | event_longitude=event_lon) 204 | bmfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 205 | st_compare = obspy.read(bmfile) 206 | assert compare_stream_kernel(st_new, st_compare) 207 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that handles adjoint sources 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from __future__ import (print_function, division) 13 | import numpy as np 14 | from pyadjoint import AdjointSource 15 | 16 | 17 | def ensemble_fake_adj(stream): 18 | """ 19 | Ensemble fake adjoint sources from stream, for test purpose. 20 | """ 21 | adjsrcs = list() 22 | comps = ["Z", "R", "T"] 23 | for comp in comps: 24 | tr = stream.select(channel="*%s" % comp)[0] 25 | adj = AdjointSource( 26 | "waveform_misfit", misfit=0.0, dt=tr.stats.delta, 27 | min_period=50.0, max_period=100.0, 28 | component=tr.stats.channel, 29 | adjoint_source=tr.data, network=tr.stats.network, 30 | station=tr.stats.station, location=tr.stats.location, 31 | starttime=tr.stats.starttime) 32 | adjsrcs.append(adj) 33 | 34 | return adjsrcs 35 | 36 | 37 | def change_adjsrc_channel_name(adjsrcs, channel): 38 | """ 39 | Change adjoint source channel name to given string. For example, 40 | as specfem input, the channel name is "MX". So before writing 41 | adjoint source out, we need to change the channel name to "MX" 42 | """ 43 | if len(channel) != 2: 44 | raise ValueError("channel(%s) must be length of 2" % channel) 45 | for adj in adjsrcs: 46 | adj.component = channel + adj.component[-1] 47 | 48 | 49 | def _stats_channel_window(adjsrcs, windows): 50 | """ 51 | Determine number of windows on each channel of each component. 52 | """ 53 | adj_dict = {} 54 | for idx, adj in enumerate(adjsrcs): 55 | adj_id = "%s.%s.%s.%s" % (adj.network, adj.station, adj.location, 56 | adj.component) 57 | adj_dict[adj_id] = idx 58 | 59 | adj_win_dict = {} 60 | for chan_win in windows.itervalues(): 61 | if len(chan_win) == 0: 62 | continue 63 | chan_id = chan_win[0]["channel_id"] 64 | adj_win_dict[chan_id] = len(chan_win) 65 | 66 | new_win_dict = {} 67 | for key in adj_win_dict: 68 | if key in adj_dict: 69 | new_win_dict[key] = adj_win_dict[key] 70 | 71 | return adj_dict, new_win_dict 72 | 73 | 74 | def calculate_chan_weight(adjsrcs, windows_sta): 75 | """ 76 | Calcualte window weights based on adjoint sources and windows 77 | 78 | :param adjsrcs: 79 | :param windows_sta: 80 | :return: 81 | """ 82 | 83 | _, adj_win_dict = _stats_channel_window(adjsrcs, windows_sta) 84 | 85 | comp_dict = {} 86 | for tr_id, nwins in adj_win_dict.iteritems(): 87 | comp = "MX%s" % tr_id.split(".")[-1][-1] 88 | if comp not in comp_dict: 89 | comp_dict[comp] = {} 90 | comp_dict[comp][tr_id] = nwins 91 | 92 | for comp, comp_wins in comp_dict.iteritems(): 93 | ntotal = 0 94 | for chan_id, chan_win in comp_wins.iteritems(): 95 | ntotal += chan_win 96 | for chan_id, chan_win in comp_wins.iteritems(): 97 | comp_dict[comp][chan_id] = chan_win / ntotal 98 | 99 | return comp_dict 100 | 101 | 102 | def check_multiple_instruments(adjsrcs): 103 | """ 104 | Check if there are mutiple instruments for one component 105 | This is very important because if there is only one instrument 106 | for one component, we can define the path shorter and change 107 | channel name to "MX" to follow the specfem style. For example, 108 | in adjsrcs, if there are only "II.AAK.00.BHZ" in component Z, 109 | then we can define the path as "II_AAK_MXZ". If there are 110 | multiple instruments, then we define the whole path, as 111 | "II_AAK_00_BHZ" 112 | """ 113 | name_list = [] 114 | adj_dict = {} 115 | for adj in adjsrcs: 116 | cat = adj.component[-1] 117 | if cat not in adj_dict: 118 | adj_dict[cat] = [] 119 | adj_id = "%s.%s.%s.%s" % (adj.network, adj.station, 120 | adj.location, adj.component) 121 | adj_dict[cat].append(adj_id) 122 | name_list.append(adj_id) 123 | 124 | if len(set(name_list)) != len(name_list): 125 | raise ValueError("Error on adjoint source(%s.%s) since it has" 126 | "duplicate name on adjoint source: %s" 127 | % (adj.network, adj.station, name_list)) 128 | 129 | _flag = False 130 | for cat_info in adj_dict.itervalues(): 131 | if len(cat_info) > 1: 132 | _flag = True 133 | break 134 | return _flag 135 | 136 | 137 | def reshape_adj(adjsrcs, staxml, dtype=np.float32, 138 | default_specfem_channel="MX"): 139 | """ 140 | Reshape adjsrcs to a certain structure required by pyasdf writer 141 | """ 142 | if not isinstance(adjsrcs, list): 143 | raise ValueError("Input ajdsrcs must be a list of adjoint sources") 144 | 145 | vtype = "AuxiliaryData" 146 | reshape_list = [] 147 | tag_list = [] 148 | 149 | # extract station information 150 | sta_lat = staxml[0][0].latitude 151 | sta_lon = staxml[0][0].longitude 152 | sta_ele = staxml[0][0].elevation 153 | sta_dep = staxml[0][0][0].depth 154 | 155 | # sanity check to see if there are multiple instruments 156 | _multiple_flag = check_multiple_instruments(adjsrcs) 157 | 158 | if not _multiple_flag: 159 | change_adjsrc_channel_name(adjsrcs, default_specfem_channel) 160 | 161 | for adj in adjsrcs: 162 | adj_array = np.asarray(adj.adjoint_source, dtype=dtype) 163 | 164 | station_id = "%s.%s" % (adj.network, adj.station) 165 | 166 | starttime = "T".join(str(adj.starttime).split()) 167 | parameters = {"dt": adj.dt, "starttime": starttime, 168 | "misfit": adj.misfit, 169 | "adjoint_source_type": adj.adj_src_type, 170 | "min_period": adj.min_period, 171 | "max_period": adj.max_period, 172 | "latitude": sta_lat, "longitude": sta_lon, 173 | "elevation_in_m": sta_ele, "depth_in_m": sta_dep, 174 | "station_id": station_id, "component": adj.component, 175 | "location": adj.location, 176 | "units": "m"} 177 | 178 | if _multiple_flag: 179 | tag = "%s_%s_%s_%s" % (adj.network, adj.station, 180 | adj.location, adj.component) 181 | else: 182 | tag = "%s_%s_%s" % (adj.network, adj.station, adj.component) 183 | 184 | tag_list.append(tag) 185 | 186 | dataset_path = "AdjointSources/%s" % tag 187 | 188 | _reshape = {"object": adj_array, "type": vtype, 189 | "path": dataset_path, "parameters": parameters} 190 | 191 | reshape_list.append(_reshape) 192 | 193 | # check if there are different adjoint sources with the same tag. If so, 194 | # the writer won't be able to write out because of the same dataset path 195 | if len(set(tag_list)) != len(tag_list): 196 | raise ValueError("Duplicate tag in adjoint sources list: %s" % 197 | tag_list) 198 | 199 | return reshape_list 200 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = ../../github_doc/pytomo3d_doc 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytomo3d.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytomo3d.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pytomo3d" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytomo3d" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /pytomo3d/station/tests/test_generate_adjoint_stations.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | from copy import deepcopy 4 | import pytest 5 | import numpy.testing as npt 6 | import pytomo3d.station.generate_adjoint_stations as gas 7 | from pytomo3d.utils.io import load_json 8 | 9 | 10 | def _upper_level(path, nlevel=4): 11 | """ 12 | Go the nlevel dir up 13 | """ 14 | for i in range(nlevel): 15 | path = os.path.dirname(path) 16 | return path 17 | 18 | 19 | # Most generic way to get the data folder path. 20 | TESTBASE_DIR = _upper_level(os.path.abspath( 21 | inspect.getfile(inspect.currentframe())), 4) 22 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 23 | 24 | measure_file = os.path.join(DATA_DIR, "window", "measurements.fake.json") 25 | _measurements = load_json(measure_file) 26 | 27 | station_file = os.path.join(DATA_DIR, "stations", "stations.fake.json") 28 | _stations = load_json(station_file) 29 | 30 | 31 | def test_extract_usable_stations_from_one_period(): 32 | ms = deepcopy(_measurements) 33 | stations, channels = gas.extract_usable_stations_from_one_period(ms) 34 | assert set(stations) == set(["II.AAK", "II.ABKT", "IU.BCD"]) 35 | assert set(channels) == set(["II.AAK..BHR", "II.AAK..BHT", "II.AAK..BHZ", 36 | "II.ABKT..BHR", "II.ABKT..BHZ", 37 | "IU.BCD..BHR", "IU.BCD..BHT", "IU.BCD..BHZ"]) 38 | 39 | # add a fake station with no measurements 40 | ms["FK.FAKE"] = {} 41 | stations, channels = gas.extract_usable_stations_from_one_period(ms) 42 | assert set(stations) == set(["II.AAK", "II.ABKT", "IU.BCD"]) 43 | assert set(channels) == set(["II.AAK..BHR", "II.AAK..BHT", "II.AAK..BHZ", 44 | "II.ABKT..BHR", "II.ABKT..BHZ", 45 | "IU.BCD..BHR", "IU.BCD..BHT", "IU.BCD..BHZ"]) 46 | 47 | stations, channels = gas.extract_usable_stations_from_one_period({}) 48 | assert len(stations) == 0 49 | assert len(channels) == 0 50 | 51 | 52 | def test_extract_usable_stations_from_measurements(): 53 | ms = {"17_40": deepcopy(_measurements), 54 | "40_100": deepcopy(_measurements), 55 | "90_250": deepcopy(_measurements)} 56 | 57 | stations, channels = gas.extract_usable_stations_from_measurements(ms) 58 | assert set(stations) == set(["II.AAK", "II.ABKT", "IU.BCD"]) 59 | assert set(channels) == set(["II.AAK..BHR", "II.AAK..BHT", "II.AAK..BHZ", 60 | "II.ABKT..BHR", "II.ABKT..BHZ", 61 | "IU.BCD..BHR", "IU.BCD..BHT", "IU.BCD..BHZ"]) 62 | 63 | ms["90_250"]["IU.BCD"]["IU.BCD..BHT"] = [] 64 | stations, channels = gas.extract_usable_stations_from_measurements(ms) 65 | assert set(stations) == set(["II.AAK", "II.ABKT", "IU.BCD"]) 66 | assert set(channels) == set(["II.AAK..BHR", "II.AAK..BHT", "II.AAK..BHZ", 67 | "II.ABKT..BHR", "II.ABKT..BHZ", 68 | "IU.BCD..BHR", "IU.BCD..BHT", "IU.BCD..BHZ"]) 69 | 70 | ms["90_250"]["IU.BCD"] = {} 71 | stations, channels = gas.extract_usable_stations_from_measurements(ms) 72 | assert set(stations) == set(["II.AAK", "II.ABKT", "IU.BCD"]) 73 | assert set(channels) == set(["II.AAK..BHR", "II.AAK..BHT", "II.AAK..BHZ", 74 | "II.ABKT..BHR", "II.ABKT..BHZ", 75 | "IU.BCD..BHR", "IU.BCD..BHT", "IU.BCD..BHZ"]) 76 | 77 | 78 | def test_extract_one_station(): 79 | info = gas.extract_one_station("II.ABKT..BHZ", _stations) 80 | 81 | true_info = {"depth": 0.0, "elevation": 2437.8, "latitude": 0.0, 82 | "longitude": 120.0, 83 | "sensor": "Streckeisen STS1H/VBB Seismometer"} 84 | 85 | assert info == true_info 86 | 87 | info = gas.extract_one_station("II.ABKT..BHR", _stations) 88 | assert info == true_info 89 | 90 | with pytest.raises(KeyError): 91 | gas.extract_one_station("II.ABKT..LHZ", _stations) 92 | 93 | 94 | def test_prepare_adjoint_station_information(): 95 | adjoint_stations = gas.prepare_adjoint_station_information( 96 | ["II.ABKT..BHR"], _stations) 97 | 98 | _true = [0.0, 120.0, 2437.8, 0.0] 99 | assert len(adjoint_stations) == 1 100 | npt.assert_allclose(adjoint_stations["II.ABKT"], _true) 101 | 102 | adjoint_stations = gas.prepare_adjoint_station_information( 103 | ["II.ABKT..BHR", "II.ABKT..BHZ"], _stations) 104 | assert len(adjoint_stations) == 1 105 | npt.assert_allclose(adjoint_stations["II.ABKT"], _true) 106 | 107 | adjoint_stations = gas.prepare_adjoint_station_information( 108 | ["II.ABKT..BHR", "II.ABKT..BHZ", "IU.BCD..BHZ"], _stations) 109 | assert len(adjoint_stations) == 2 110 | npt.assert_allclose(adjoint_stations["II.ABKT"], _true) 111 | npt.assert_allclose(adjoint_stations["IU.BCD"], 112 | [0.0, -120.0, 2437.8, 0.0]) 113 | 114 | 115 | def test_check_adjoint_stations_consistency(): 116 | adjoint_stations = {"II.AAK": [1, 2, 3], "II.ABKT": [1, 2], 117 | "IU.BCD": [1, 2]} 118 | usable_stations = ["II.AAK", "II.ABKT", "IU.BCD"] 119 | gas.check_adjoint_stations_consistency(adjoint_stations, usable_stations) 120 | 121 | adjoint_stations = {"II.AAK": [1, 2, 3], "II.ABKT": [1, 2], 122 | "IU.BCD": [1, 2], "FK.FAKE": [1]} 123 | with pytest.raises(ValueError): 124 | gas.check_adjoint_stations_consistency( 125 | adjoint_stations, usable_stations) 126 | 127 | adjoint_stations = {"II.AAK": [1, 2, 3], "II.ABKT": [1, 2], 128 | "FK.FAKE": [1]} 129 | with pytest.raises(ValueError): 130 | gas.check_adjoint_stations_consistency( 131 | adjoint_stations, usable_stations) 132 | 133 | 134 | def test_benchmark_stations(): 135 | adjoint_stations = {"II.AAK": [42.6375, 74.4942, 1633.10, 30.00], 136 | "FK.FAKE": [10.0, 20.0, 30.0, 40.0]} 137 | npass = gas.benchmark_stations(adjoint_stations) 138 | assert npass == 1 139 | 140 | adjoint_stations = {"II.AAK": [42.6375, 74.4942, 1633.10, 30.00], 141 | "II.ABPO": [-19.0180, 47.2290, 1528.00, 5.30], 142 | "G.CAN": [-35.4187, 148.9963, 700.00, 0.00]} 143 | with pytest.raises(ValueError): 144 | gas.benchmark_stations(adjoint_stations) 145 | 146 | 147 | def read_station_txt(fn): 148 | stations = {} 149 | with open(fn) as fh: 150 | for line in fh: 151 | content = line.split() 152 | sta = content[0] 153 | nw = content[1] 154 | stations["%s.%s" % (nw, sta)] = \ 155 | [float(content[i]) for i in range(2, 6)] 156 | 157 | return stations 158 | 159 | 160 | def test_generate_adjoint_stations(tmpdir): 161 | ms_one = deepcopy(_measurements) 162 | ms_one["IU.BCD"].pop("IU.BCD..BHR") 163 | ms_one["IU.BCD"].pop("IU.BCD..BHT") 164 | 165 | ms = {"17_40": deepcopy(ms_one), 166 | "40_100": deepcopy(ms_one), 167 | "90_250": deepcopy(ms_one)} 168 | 169 | outputfn = os.path.join(str(tmpdir), "STATIONS.tmp") 170 | gas.generate_adjoint_stations( 171 | ms, _stations, outputfn, benchmark_flag=False) 172 | 173 | output_station = read_station_txt(outputfn) 174 | assert len(output_station) == 3 175 | npt.assert_allclose(output_station["II.AAK"], [0.0, 0.0, 2437.8, 0.0]) 176 | npt.assert_allclose(output_station["II.ABKT"], [0.0, 120.0, 2437.8, 0.0]) 177 | npt.assert_allclose(output_station["IU.BCD"], [0.0, -120.0, 2437.8, 0.0]) 178 | 179 | with pytest.raises(ValueError): 180 | gas.generate_adjoint_stations( 181 | ms, _stations, outputfn, benchmark_flag=True) 182 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/sum_adjoint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Utility functions for sum_adjoint in pypaw 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from __future__ import print_function, division, absolute_import 13 | import numpy as np 14 | import copy 15 | from obspy import UTCDateTime 16 | from pytomo3d.signal.rotate import rotate_one_station_stream 17 | from pytomo3d.adjoint.process_adjsrc import convert_stream_to_adjs 18 | from pytomo3d.adjoint.process_adjsrc import convert_adjs_to_stream 19 | from pytomo3d.adjoint.process_adjsrc import add_missing_components 20 | from pyadjoint import AdjointSource 21 | 22 | 23 | def check_adj_consistency(adj_base, adj): 24 | """ 25 | Check the consistency of adj_base and adj 26 | If passed, return, then adj could be added into adj_base 27 | If not, raise ValueError 28 | """ 29 | if adj_base.network != adj.network or \ 30 | adj_base.station != adj.station or \ 31 | adj_base.component != adj.component: 32 | raise ValueError("Adjoint source network or station is different:" 33 | "%s, %s" % (adj_base.id, adj.id)) 34 | 35 | if not np.isclose(adj_base.dt, adj.dt): 36 | raise ValueError("DeltaT of current adjoint source(%f)" 37 | "and new added adj(%f) not the same" 38 | % (adj_base.dt, adj.dt)) 39 | 40 | if np.abs(adj_base.starttime - adj.starttime) > 0.5 * adj.dt: 41 | raise ValueError("Start time of current adjoint source(%s)" 42 | "and new added adj(%s) not the same" 43 | % (adj_base.dt, adj.dt)) 44 | 45 | if len(adj_base.adjoint_source) != len(adj.adjoint_source): 46 | raise ValueError("Dimension of current adjoint_source(%d)" 47 | "and new added adj(%d) not the same" % 48 | (len(adj_base.adjoint_source), 49 | len(adj.adjoint_source))) 50 | 51 | 52 | def check_events_consistent(events): 53 | """ 54 | Check all events are consistent(same with each other) 55 | """ 56 | fn_base = events.keys()[0] 57 | event_base = events[fn_base] 58 | 59 | diffs = [] 60 | for asdf_fn, event in events.iteritems(): 61 | if event_base != event: 62 | diffs.append(asdf_fn) 63 | 64 | if len(diffs) != 0: 65 | raise ValueError("Event information in %s not the same as others: %s" 66 | % (diffs, fn_base)) 67 | 68 | 69 | def load_to_adjsrc(adj): 70 | """ 71 | Load from asdf file adjoint source to pyadjoint.AdjointSources 72 | """ 73 | starttime = UTCDateTime(adj.parameters["starttime"]) 74 | _id = adj.parameters["station_id"] 75 | nw, sta = _id.split(".") 76 | comp = adj.parameters["component"] 77 | loc = adj.parameters["location"] 78 | 79 | new_adj = AdjointSource(adj.parameters["adjoint_source_type"], 80 | adj.parameters["misfit"], 81 | adj.parameters["dt"], 82 | adj.parameters["min_period"], 83 | adj.parameters["max_period"], 84 | comp, 85 | adjoint_source=np.array(adj.data), 86 | network=nw, station=sta, 87 | location=loc, 88 | starttime=starttime) 89 | 90 | station_info = {"latitude": adj.parameters["latitude"], 91 | "longitude": adj.parameters["longitude"], 92 | "elevation_in_m": adj.parameters["elevation_in_m"], 93 | "depth_in_m": adj.parameters["depth_in_m"], 94 | "station": sta, "network": nw, 95 | "location": loc} 96 | return new_adj, station_info 97 | 98 | 99 | def dump_adjsrc(adj, station_info): 100 | """ 101 | Combine the adj(pyadjoint.AdjointSource) and station information 102 | to form the adjoint content for ASDF file 103 | """ 104 | adj_array = np.asarray(adj.adjoint_source, dtype=np.float32) 105 | station_id = "%s.%s" % (adj.network, adj.station) 106 | 107 | starttime = "T".join(str(adj.starttime).split()) 108 | parameters = \ 109 | {"dt": adj.dt, "starttime": starttime, 110 | "misfit": adj.misfit, 111 | "adjoint_source_type": adj.adj_src_type, 112 | "min_period": adj.min_period, 113 | "max_period": adj.max_period, 114 | "location": adj.location, 115 | "latitude": station_info["latitude"], 116 | "longitude": station_info["longitude"], 117 | "elevation_in_m": station_info["elevation_in_m"], 118 | "depth_in_m": station_info["depth_in_m"], 119 | "station_id": station_id, "component": adj.component, 120 | "units": "m"} 121 | 122 | adj_path = "%s_%s_%s" % (adj.network, adj.station, adj.component) 123 | 124 | return adj_array, adj_path, parameters 125 | 126 | 127 | def create_weighted_adj(adj, weight): 128 | new_adj = copy.deepcopy(adj) 129 | new_adj.adjoint_source *= weight 130 | new_adj.misfit *= weight 131 | new_adj.location = "" 132 | return new_adj 133 | 134 | 135 | def sum_adj_to_base(adj_base, adj, weight): 136 | check_adj_consistency(adj_base, adj) 137 | adj_base.adjoint_source += weight * adj.adjoint_source 138 | adj_base.misfit += weight * adj.misfit 139 | adj_base.min_period = min(adj.min_period, adj_base.min_period) 140 | adj_base.max_period = max(adj.max_period, adj_base.max_period) 141 | 142 | 143 | def check_station_consistent(sta1, sta2): 144 | for key in sta1: 145 | if key == "location": 146 | # don't check location 147 | continue 148 | if key not in sta2: 149 | return False 150 | if isinstance(sta1[key], float): 151 | if not np.isclose(sta1[key], sta2[key]): 152 | return False 153 | else: 154 | if sta1[key] != sta2[key]: 155 | return False 156 | return True 157 | 158 | 159 | def get_station_adjsrcs(adjsrcs, sta_tag): 160 | """ 161 | Extract three components for a specific sta_tag 162 | """ 163 | comp_list = ["MXR", "MXT", "MXZ"] 164 | adj_list = [] 165 | for comp in comp_list: 166 | adj_name = "%s_%s" % (sta_tag, comp) 167 | if adj_name in adjsrcs: 168 | adj_list.append(adjsrcs[adj_name]) 169 | return adj_list 170 | 171 | 172 | def rotate_one_station_adjsrcs(sta_adjs, slat, slon, elat, elon): 173 | adj_stream, meta_info = convert_adjs_to_stream(sta_adjs) 174 | add_missing_components(adj_stream) 175 | 176 | rotate_one_station_stream( 177 | adj_stream, elat, elon, station_latitude=slat, station_longitude=slon, 178 | mode="RT->NE") 179 | 180 | new_adjs = convert_stream_to_adjs(adj_stream, meta_info) 181 | adj_dict = {} 182 | for _adj in new_adjs: 183 | adj_id = "%s_%s_%s" % (_adj.network, _adj.station, _adj.component) 184 | adj_dict[adj_id] = _adj 185 | return adj_dict 186 | 187 | 188 | def rotate_adjoint_sources(old_adjs, stations, event_latitude, 189 | event_longitude): 190 | print("="*15 + "\nRotate adjoint sources from RT to EN") 191 | done_sta_list = [] 192 | new_adjs = {} 193 | 194 | for adj_id, adj in old_adjs.iteritems(): 195 | network = adj.network 196 | station = adj.station 197 | sta_tag = "%s_%s" % (network, station) 198 | 199 | if sta_tag not in done_sta_list: 200 | slat = stations[sta_tag]["latitude"] 201 | slon = stations[sta_tag]["longitude"] 202 | 203 | sta_adjs = get_station_adjsrcs(old_adjs, sta_tag) 204 | adj_dict = rotate_one_station_adjsrcs( 205 | sta_adjs, slat, slon, event_latitude, 206 | event_longitude) 207 | new_adjs.update(adj_dict) 208 | 209 | return new_adjs 210 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/adjoint_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Methods that handles adjoint sources 5 | 6 | :copyright: 7 | Wenjie Lei (lei@princeton.edu), 2016 8 | :license: 9 | GNU Lesser General Public License, version 3 (LGPLv3) 10 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 11 | """ 12 | from __future__ import (print_function, division) 13 | import os 14 | from obspy import Stream, Trace 15 | import pyadjoint 16 | from .plot_util import plot_adjoint_source 17 | from .process_adjsrc import process_adjoint 18 | from .io import _extract_window_time, _extract_window_id 19 | from .utils import calculate_chan_weight 20 | 21 | 22 | def calculate_adjsrc_on_trace(obs, syn, windows, config, adj_src_type, 23 | figure_mode=False, figure_dir=None, 24 | adjoint_src_flag=True): 25 | """ 26 | Calculate adjoint source on a pair of trace and windows selected 27 | 28 | :param obs: observed trace 29 | :type obs: obspy.Trace 30 | :param syn: synthetic trace 31 | :type syn: obspy.Trace 32 | :param window_time: window time information, 2-dimension array, like 33 | [[win_1_left, win_1_right], [win_2_left, win_2_right], ...] 34 | :type windows: 2-d list or numpy.array 35 | :param config: config of pyadjoint 36 | :type config: pyadjoint.Config 37 | :param adj_src_type: adjoint source type, options include: 38 | 1) "cc_traveltime_misfit" 39 | 2) "multitaper_misfit" 40 | 3) "waveform_misfit" 41 | :type adj_src_type: str 42 | :param adjoint_src_flag: whether calcualte adjoint source or not. 43 | If False, only make measurements 44 | :type adjoint_src_flag: bool 45 | :param plot_flag: whether make plots or not. If True, it will lot 46 | a adjoint source figure right after calculation 47 | :type plot_flag: bool 48 | :return: adjoint source(pyadjoit.AdjointSource) 49 | """ 50 | if not isinstance(obs, Trace): 51 | raise ValueError("Input obs should be obspy.Trace") 52 | if not isinstance(syn, Trace): 53 | raise ValueError("Input syn should be obspy.Trace") 54 | # if not isinstance(config, pyadjoint.Config): 55 | # raise ValueError("Input config should be pyadjoint.Config") 56 | 57 | window_time = _extract_window_time(windows) 58 | if len(window_time.shape) != 2 or window_time.shape[1] != 2: 59 | raise ValueError("Input windows dimension incorrect, dimension" 60 | "(*, 2) expected") 61 | 62 | adjsrc = pyadjoint.calculate_adjoint_source( 63 | adj_src_type=adj_src_type, observed=obs, synthetic=syn, 64 | config=config, window=window_time, adjoint_src=adjoint_src_flag, 65 | plot=figure_mode) 66 | 67 | if figure_mode: 68 | if figure_dir is None: 69 | figname = None 70 | else: 71 | figname = os.path.join(figure_dir, "%s.pdf" % obs.id) 72 | plot_adjoint_source(adjsrc, win_times=window_time, obs_tr=obs, 73 | syn_tr=syn, figname=figname) 74 | 75 | return adjsrc 76 | 77 | 78 | def calculate_adjsrc_on_stream(observed, synthetic, windows, config, 79 | adj_src_type, figure_mode=False, 80 | figure_dir=None, adjoint_src_flag=True): 81 | """ 82 | calculate adjoint source on a pair of stream and windows selected 83 | 84 | :param observed: observed stream 85 | :type observed: obspy.Stream 86 | :param synthetic: observed stream 87 | :type synthetic: obspy.Stream 88 | :param windows: list of pyflex windows, like: 89 | [[Windows(), Windows(), Windows()], [Windows(), Windows()], ...] 90 | For each element, it contains windows for one channel 91 | :type windows: list 92 | :param config: config for calculating adjoint source 93 | :type config: pyadjoit.Config 94 | :param adj_src_type: adjoint source type 95 | :type adj_src_type: str 96 | :param figure_mode: plot flag. Leave it to True if you want to see adjoint 97 | plots for every trace 98 | :type figure_mode: bool 99 | :param adjoint_src_flag: adjoint source flag. Set it to True if you want 100 | to calculate adjoint sources 101 | :type adjoint_src_flag: bool 102 | :return: 103 | """ 104 | if not isinstance(observed, Stream): 105 | raise ValueError("Input observed should be obspy.Stream") 106 | if not isinstance(synthetic, Stream): 107 | raise ValueError("Input synthetic should be obspy.Stream") 108 | if windows is None or len(windows) == 0: 109 | return None 110 | # if not isinstance(config, pyadjoint.Config): 111 | # raise ValueError("Input config should be pyadjoint.Config") 112 | 113 | adjsrcs_list = [] 114 | 115 | for chan_win in windows.itervalues(): 116 | if len(chan_win) == 0: 117 | continue 118 | 119 | obsd_id, synt_id = _extract_window_id(chan_win) 120 | 121 | try: 122 | obs = observed.select(id=obsd_id)[0] 123 | except: 124 | raise ValueError("Missing observed trace for window: %s" % obsd_id) 125 | 126 | if synt_id == "UNKNOWN": 127 | syn = synthetic.select(channel="*%s" % obs.stats.channel[-1])[0] 128 | else: 129 | syn = synthetic.select(id=synt_id)[0] 130 | 131 | adjsrc = calculate_adjsrc_on_trace( 132 | obs, syn, windows[obsd_id], config, adj_src_type, 133 | adjoint_src_flag=adjoint_src_flag, 134 | figure_mode=figure_mode, figure_dir=figure_dir) 135 | 136 | if adjsrc is None: 137 | continue 138 | adjsrcs_list.append(adjsrc) 139 | 140 | return adjsrcs_list 141 | 142 | 143 | def calculate_and_process_adjsrc_on_stream( 144 | observed, synthetic, windows, inventory, config, event, 145 | adj_src_type, postproc_param, figure_mode=False, 146 | figure_dir=None): 147 | """ 148 | (API for pypaw) 149 | Calculate based on config, then process adjoint sources 150 | based on postproc_param 151 | """ 152 | # check total number of windows. If total number of 153 | # window is 0, return None 154 | nwin_total = 0 155 | for value in windows.itervalues(): 156 | nwin_total += len(value) 157 | if nwin_total == 0: 158 | return 159 | 160 | adjsrcs = calculate_adjsrc_on_stream( 161 | observed, synthetic, windows, config, adj_src_type, 162 | figure_mode=figure_mode, figure_dir=figure_dir, 163 | adjoint_src_flag=True) 164 | 165 | if postproc_param["weight_flag"]: 166 | chan_weight_dict = calculate_chan_weight(adjsrcs, windows) 167 | else: 168 | chan_weight_dict = None 169 | 170 | origin = event.preferred_origin() or event.origins[0] 171 | focal = event.preferred_focal_mechanism() 172 | hdr = focal.moment_tensor.source_time_function.duration / 2.0 173 | # according to SPECFEM starttime convention 174 | time_offset = -1.5 * hdr 175 | starttime = origin.time + time_offset 176 | 177 | new_adjsrcs = process_adjoint( 178 | adjsrcs, interp_starttime=starttime, 179 | inventory=inventory, event=event, 180 | weight_dict=chan_weight_dict, 181 | **postproc_param) 182 | 183 | # return new_adjsrcs, time_offset 184 | return new_adjsrcs 185 | 186 | 187 | def measure_adjoint_on_stream( 188 | observed, synthetic, windows, config, adj_src_type, 189 | figure_mode=False, figure_dir=None): 190 | """ 191 | (API for pypaw) 192 | Calculate the measurement of adjoint sources. Only measurments 193 | are returned(adjoint source is not returned). 194 | """ 195 | 196 | nwin_total = 0 197 | for value in windows.itervalues(): 198 | nwin_total += len(value) 199 | if nwin_total == 0: 200 | return 201 | 202 | adjsrcs = calculate_adjsrc_on_stream( 203 | observed, synthetic, windows, config, adj_src_type, 204 | figure_mode=False, figure_dir=None, 205 | adjoint_src_flag=True) 206 | 207 | results = {} 208 | for adj in adjsrcs: 209 | results[adj.id] = adj.measurement 210 | return results 211 | -------------------------------------------------------------------------------- /scripts/window_merge_tool/merge_winfile.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | from util import read_json_file, JSONObject, isclose 4 | import argparse 5 | import json 6 | 7 | 8 | class WindowMerge(object): 9 | """ 10 | Rewrite output from pyflex into a simpler form(not all 11 | information are necessary in next stage) and 12 | also do some processing to the window(add weighting) 13 | """ 14 | 15 | def __init__(self, dirfile, verbose): 16 | self.dirfile = dirfile 17 | self._verbose = verbose 18 | 19 | def smart_run(self, strategy="combined"): 20 | """ 21 | running window merging 22 | 23 | :param strategy: strategy to merge windows. values could be 24 | "combined" and "selective". 25 | If "combined", each channel will be kept and the weighting will be 26 | calculated according to the number of windows. 27 | If "selective", only one channel(the one with max number of window) 28 | will be kept and the weighting will be 1. If there are multiple max 29 | values, the channel_id with smaller location value will be kept. 30 | :type strategy: string 31 | """ 32 | strategy = strategy.lower() 33 | if strategy not in ['combined', 'selective']: 34 | raise ValueError("strategy can only be: 1)'combined'; " 35 | "2)'selective'") 36 | 37 | dirlist = read_json_file(self.dirfile) 38 | if isinstance(dirlist, list): 39 | for _dir in dirlist: 40 | self.merge_window(_dir, strategy=strategy) 41 | elif isinstance(dirlist, JSONObject): 42 | self.merge_window(dirlist, strategy) 43 | 44 | def merge_window(self, dir_obj, strategy): 45 | input_winfile = dir_obj.input_file 46 | output_winfile = dir_obj.output_file 47 | 48 | if self._verbose: 49 | print("+"*20) 50 | print("input window file: %s" % input_winfile) 51 | print("output window file: %s" % output_winfile) 52 | print("merging strategy: %s" % strategy) 53 | 54 | windows = self.load_input_winfile(input_winfile) 55 | new_windows = {} 56 | for sta_name, sta_win in windows.iteritems(): 57 | if self._verbose == 2: 58 | print("="*15+"\nStation: %s" % sta_name) 59 | new_sta_win = self.merge_one_station(sta_win, strategy) 60 | new_windows[sta_name] = new_sta_win 61 | 62 | if self._verbose == 1: 63 | num_sta, num_win = self._stats_window(windows) 64 | print("Before merging, number of station and window: [%d, %d]" % 65 | (num_sta, num_win)) 66 | num_sta, num_win = self._stats_window(new_windows) 67 | print("Before merging, number of station and window: [%d, %d]" % 68 | (num_sta, num_win)) 69 | 70 | self.write_output(new_windows, output_winfile) 71 | 72 | @staticmethod 73 | def _stats_window(windows): 74 | num_sta = 0 75 | num_win = 0 76 | for sta_name, sta_win in windows.iteritems(): 77 | num_sta += 1 78 | for channel_id, channel_win in sta_win.iteritems(): 79 | num_win += len(channel_win) 80 | return num_sta, num_win 81 | 82 | @staticmethod 83 | def find_duplicate_channel(window, channel_id): 84 | 85 | def _split_channel_id(channel_id): 86 | """ 87 | Channel id: network.station.location.channel 88 | """ 89 | return channel_id.split(".") 90 | 91 | numwin = {} 92 | info = _split_channel_id(channel_id) 93 | 94 | for _id, _win in window.iteritems(): 95 | newinfo = _split_channel_id(_id) 96 | if newinfo[-1][-1:] == info[-1][-1:]: 97 | numwin[_id] = len(_win) 98 | return numwin 99 | 100 | @staticmethod 101 | def calculate_weighting(channel_id, winnum, strategy): 102 | 103 | def combined_weighting(channel_id, winnum): 104 | return float(winnum[channel_id]) / sum(winnum.values()) 105 | 106 | def selective_weighting(channel_id, winnum): 107 | values = winnum.values() 108 | values.sort() 109 | if winnum[channel_id] == max(values): 110 | if values[-1] == values[-2]: 111 | # multiple max, select one 112 | _channel_list = [] 113 | for _id, _num in winnum.iteritems(): 114 | if _num == max(values): 115 | _channel_list.append(_id) 116 | if channel_id == min(_channel_list): 117 | weighting = 1.0 118 | else: 119 | weighting = 0.0 120 | else: 121 | weighting = 1.0 122 | else: 123 | weighting = 0.0 124 | 125 | return weighting 126 | 127 | if strategy == "combined": 128 | return combined_weighting(channel_id, winnum) 129 | elif strategy == "selective": 130 | return selective_weighting(channel_id, winnum) 131 | else: 132 | raise NotImplementedError("strategy not implemented:%s" 133 | % strategy) 134 | 135 | def merge_one_station(self, window, strategy): 136 | new_window = {} 137 | for channel_id, channel_win in window.iteritems(): 138 | winnum = self.find_duplicate_channel(window, channel_id) 139 | if len(winnum) == 1: 140 | weighting = 1.0 141 | if len(winnum) > 1: 142 | weighting = self.calculate_weighting( 143 | channel_id, winnum, strategy) 144 | 145 | if self._verbose == 2: 146 | print("%s" % str(channel_id)), 147 | print(winnum), 148 | print(" --> weighting: %.2f" % (weighting)) 149 | 150 | if isclose(weighting, 0.0): 151 | continue 152 | 153 | new_window[channel_id] = [] 154 | for win in channel_win: 155 | newwin = {} 156 | newwin["initial_weighting"] = weighting 157 | channel_id = win["channel_id"] 158 | content = channel_id.split(".") 159 | newwin["obsd_id"] = channel_id 160 | newwin["synt_id"] = "%s.%s.S3.MX%s" % (content[0], content[1], 161 | content[3][-1]) 162 | newwin["relative_starttime"] = win["relative_starttime"] 163 | newwin["relative_endtime"] = win["relative_endtime"] 164 | new_window[channel_id].append(newwin) 165 | return new_window 166 | 167 | @staticmethod 168 | def float_array_to_string(array): 169 | return ['{:.3f}'.format(i) for i in array] 170 | 171 | @staticmethod 172 | def load_input_winfile(input_winfile): 173 | with open(input_winfile) as fh: 174 | return json.load(fh) 175 | 176 | @staticmethod 177 | def write_output(windows, output_winfile): 178 | dirname = os.path.dirname(output_winfile) 179 | if not os.path.exists(dirname): 180 | os.makedirs(dirname) 181 | if os.path.exists(output_winfile): 182 | print("Output_winfile exists and removed: %s" % output_winfile) 183 | os.remove(output_winfile) 184 | 185 | with open(output_winfile, 'w') as fh: 186 | json.dump(windows, fh, indent=4) 187 | 188 | 189 | if __name__ == "__main__": 190 | 191 | parser = argparse.ArgumentParser() 192 | parser.add_argument('-f', action='store', dest='files', required=True) 193 | parser.add_argument('-v', "--verbosity", action="count", 194 | dest='verbose', help="increase output verbosity") 195 | args = parser.parse_args() 196 | 197 | if args.verbose is None: 198 | args.verbose = 0 199 | winmer = WindowMerge(args.files, args.verbose) 200 | #winmer.smart_run(strategy="selective") 201 | winmer.smart_run(strategy="combined") 202 | 203 | -------------------------------------------------------------------------------- /pytomo3d/adjoint/tests/test_adjoint_source.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import json 4 | from obspy import read, Stream 5 | from pyflex.window import Window 6 | import pytomo3d.adjoint.adjoint_source as adj 7 | import pytomo3d.adjoint.io as adj_io 8 | import pytest 9 | import matplotlib.pyplot as plt 10 | # import pyadjoint.adjoint_source 11 | 12 | 13 | def _upper_level(path, nlevel=4): 14 | """ 15 | Go the nlevel dir up 16 | """ 17 | for i in range(nlevel): 18 | path = os.path.dirname(path) 19 | return path 20 | 21 | 22 | # Most generic way to get the data folder path. 23 | TESTBASE_DIR = _upper_level(os.path.abspath( 24 | inspect.getfile(inspect.currentframe())), 4) 25 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 26 | 27 | obsfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 28 | synfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 29 | winfile = os.path.join(DATA_DIR, "window", "IU.KBL..BHR.window.json") 30 | 31 | 32 | @pytest.fixture 33 | def load_config_multitaper(): 34 | config_file = os.path.join(DATA_DIR, "adjoint", 35 | "multitaper.adjoint.config.yaml") 36 | return adj_io.load_adjoint_config_yaml(config_file) 37 | 38 | 39 | @pytest.fixture 40 | def setup_calculate_adjsrc_on_trace_args(): 41 | obs = read(obsfile).select(channel="*R")[0] 42 | syn = read(synfile).select(channel="*R")[0] 43 | 44 | with open(winfile) as fh: 45 | wins_json = json.load(fh) 46 | windows = [] 47 | for _win in wins_json: 48 | windows.append(Window._load_from_json_content(_win)) 49 | 50 | return obs, syn, windows 51 | 52 | 53 | def test_calculate_adjsrc_on_trace_raises_if_obs_is_not_trace(): 54 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 55 | config = load_config_multitaper() 56 | obs = [] 57 | with pytest.raises(ValueError): 58 | adj.calculate_adjsrc_on_trace(obs, syn, win_time, config, 59 | adj_src_type="multitaper_misfit") 60 | 61 | 62 | def test_calculate_adrjrc_on_trace_raises_if_syn_is_not_trace(): 63 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 64 | config = load_config_multitaper() 65 | syn = [] 66 | with pytest.raises(ValueError): 67 | adj.calculate_adjsrc_on_trace(obs, syn, win_time, config, 68 | adj_src_type="multitaper_misfit") 69 | 70 | 71 | def test_calculate_adjsrc_on_trace_raises_if_config_is_not_config(): 72 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 73 | config = [] 74 | with pytest.raises(ValueError): 75 | adj.calculate_adjsrc_on_trace(obs, syn, win_time, config, 76 | adj_src_type="multitaper_misfit") 77 | 78 | 79 | def test_calculate_adjsrc_on_trace_raises_bad_windows_shape(): 80 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 81 | config = load_config_multitaper() 82 | win_time = [] 83 | with pytest.raises(ValueError): 84 | adj.calculate_adjsrc_on_trace(obs, syn, win_time, config, 85 | adj_src_type="multitaper_misfit") 86 | 87 | 88 | def test_calculate_adjsrc_on_trace_figure_mode_none_figure_dir(): 89 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 90 | config = load_config_multitaper() 91 | plt.switch_backend('agg') 92 | adjsrc = adj.calculate_adjsrc_on_trace( 93 | obs, syn, win_time, config, adj_src_type="multitaper_misfit", 94 | figure_mode=True) 95 | assert adjsrc 96 | 97 | 98 | # def test_calculate_adjsrc_on_trace_waveform_misfit_produces_adjsrc(): 99 | # obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 100 | # config = load_config_waveform() 101 | 102 | # adjsrc = adj.calculate_adjsrc_on_trace( 103 | # obs, syn, win_time, config, adj_src_type="waveform_misfit", 104 | # adjoint_src_flag=True, figure_mode=False) 105 | # assert adjsrc 106 | 107 | 108 | def test_calculate_adjsrc_on_trace_multitaper_misfit_produces_adjsrc(): 109 | obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 110 | config = load_config_multitaper() 111 | 112 | adjsrc = adj.calculate_adjsrc_on_trace( 113 | obs, syn, win_time, config, adj_src_type="multitaper_misfit", 114 | adjoint_src_flag=True, figure_mode=False) 115 | assert adjsrc 116 | 117 | 118 | # def test_calculate_adjsrc_on_trace_traveltime_misfit_produces_adjsrc(): 119 | # obs, syn, win_time = setup_calculate_adjsrc_on_trace_args() 120 | # config = load_config_traveltime() 121 | # 122 | # adjsrc = adj.calculate_adjsrc_on_trace( 123 | # obs, syn, win_time, config, adj_src_type="cc_traveltime_misfit", 124 | # adjoint_src_flag=True, figure_mode=False) 125 | # assert adjsrc 126 | 127 | 128 | @pytest.fixture 129 | def setup_calculate_adjsrc_on_stream_args(): 130 | obs = Stream(traces=[read(obsfile).select(channel="*R")[0]]) 131 | syn = Stream(traces=[read(synfile).select(channel="*R")[0]]) 132 | 133 | with open(winfile) as fh: 134 | wins_json = json.load(fh) 135 | 136 | return obs, syn, {obs[0].id: wins_json} 137 | 138 | 139 | def test_calculate_adjsrc_on_stream_raises_if_obs_is_not_stream(): 140 | _, syn, windows = setup_calculate_adjsrc_on_stream_args() 141 | config = load_config_multitaper() 142 | obs = [] 143 | with pytest.raises(ValueError): 144 | adj.calculate_adjsrc_on_stream(obs, syn, windows, config, 145 | adj_src_type="multitaper_misfit") 146 | 147 | 148 | def test_calculate_adjsrc_on_stream_raises_if_syn_is_not_stream(): 149 | obs, _, windows = setup_calculate_adjsrc_on_stream_args() 150 | config = load_config_multitaper() 151 | syn = [] 152 | with pytest.raises(ValueError): 153 | adj.calculate_adjsrc_on_stream(obs, syn, windows, config, 154 | adj_src_type="multitaper_misfit") 155 | 156 | 157 | def test_calculate_adjsrc_on_stream_raises_if_config_is_not_config(): 158 | obs, syn, windows = setup_calculate_adjsrc_on_stream_args() 159 | config = [] 160 | with pytest.raises(ValueError): 161 | adj.calculate_adjsrc_on_stream(obs, syn, windows, config, 162 | adj_src_type="multitaper_misfit") 163 | 164 | 165 | def test_calculate_adjsrc_on_stream_raises_if_windows_is_empty(): 166 | obs, syn, _ = setup_calculate_adjsrc_on_stream_args() 167 | config = load_config_multitaper() 168 | windows = None 169 | ret = adj.calculate_adjsrc_on_stream(obs, syn, windows, config, 170 | adj_src_type="multitaper_misfit") 171 | assert ret is None 172 | windows = {} 173 | ret = adj.calculate_adjsrc_on_stream(obs, syn, windows, config, 174 | adj_src_type="multitaper_misfit") 175 | assert ret is None 176 | 177 | 178 | # def test_calculate_adjsrc_on_stream_multitaper_misfit_produces_adjsrc(): 179 | # obs, syn, windows = setup_calculate_adjsrc_on_stream_args() 180 | # config = load_config_traveltime() 181 | # 182 | # adjsrc = adj.calculate_adjsrc_on_stream( 183 | # obs, syn, windows, config, adj_src_type="multitaper_misfit", 184 | # adjoint_src_flag=True, figure_mode=False) 185 | # assert adjsrc 186 | 187 | 188 | # def test_calculate_adjsrc_on_stream_waveform_misfit_produces_adjsrc(): 189 | # obs, syn, windows = setup_calculate_adjsrc_on_stream_args() 190 | # config = load_config_traveltime() 191 | # 192 | # adjsrc = adj.calculate_adjsrc_on_stream( 193 | # obs, syn, windows, config, adj_src_type="waveform_misfit", 194 | # adjoint_src_flag=True, figure_mode=False) 195 | # assert adjsrc 196 | 197 | 198 | # def test_calculate_adjsrc_on_stream_traveltime_misfit_produces_adjsrc(): 199 | # obs, syn, windows = setup_calculate_adjsrc_on_stream_args() 200 | # config = load_config_traveltime() 201 | # 202 | # adjsrc = adj.calculate_adjsrc_on_stream( 203 | # obs, syn, windows, config, adj_src_type="cc_traveltime_misfit", 204 | # adjoint_src_flag=True, figure_mode=False) 205 | # assert adjsrc 206 | 207 | 208 | def test_measure_adjoint_on_stream(): 209 | pass 210 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /pytomo3d/signal/rotate_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Basic Methods that handles rotation of seismograms as 5 | extension to Obspy. It can rotate `12`, `EN` and `RT`, 6 | forward and backward. 7 | 8 | :copyright: 9 | Wenjie Lei (lei@princeton.edu), 2016 10 | :license: 11 | GNU Lesser General Public License, version 3 (LGPLv3) 12 | (http://www.gnu.org/licenses/lgpl-3.0.en.html) 13 | """ 14 | from __future__ import print_function 15 | from math import cos, sin 16 | from numpy import deg2rad 17 | 18 | 19 | SMALL_DEGREE = 0.01 20 | 21 | 22 | def check_orthogonality(azim1, azim2): 23 | """ 24 | Check if two azimuth are orthogonal, check whether 25 | (azim1, azim2, vertical) forms a left-hand or right-hand 26 | coordinate system. 27 | Remember the defination of azimuth is angle between north 28 | direction. 29 | Unit is degree, not radian. 30 | """ 31 | azim1 = (azim1 + 360) % 360 32 | azim2 = (azim2 + 360) % 360 33 | 34 | if abs(abs(azim1 - azim2) - 90.0) < SMALL_DEGREE: 35 | if abs(azim1 - azim2 - 90.0) < SMALL_DEGREE: 36 | return "right-hand" 37 | elif abs(azim1 - azim2 + 90.0) < SMALL_DEGREE: 38 | # should be orthogonal; otherwise return 39 | return "left-hand" 40 | else: 41 | return False 42 | else: 43 | # cross 360 degree 44 | if abs(azim1 - azim2 + 270.0) < SMALL_DEGREE: 45 | return "right-hand" 46 | elif abs(azim1 - azim2 - 270.0) < SMALL_DEGREE: 47 | return "left-hand" 48 | else: 49 | return False 50 | 51 | 52 | def rotate_certain_angle(d1, d2, angle, unit="degree"): 53 | """ 54 | Basic rotating function which rotate d1 and d2 by angle. 55 | d1 and d2 should be orthogonal to each other and form a 56 | 'left-handed' coordinate system together with vertical 57 | component. 58 | 59 | (d1, d2, Vertical) should form a left-handed coordinate system, i.e., 60 | Azimuth_{d2} = Azimuth_{d1} + 90.0 61 | For example, (North, East, Vertical) & (Radial, Transverse, Vertical) 62 | are both left-handed coordinate systems. The return value (dnew1, 63 | dnew2, vertic) should also form a left-handed coordinate system. 64 | The angle is azimuth differnce between d1 and dnew1, i.e., 65 | angle = Azimuth_{dnew1} - Azimuth_{d1} 66 | 67 | :type d1: :class:`~numpy.ndarray` 68 | :param d1: Data of one of the two horizontal components 69 | :type d2: :class:`~numpy.ndarray` 70 | :param d2: Data of one of the two horizontal components 71 | :type angle: float 72 | :param angle: component azimuth of data2 73 | :return: two new components after rotation 74 | """ 75 | if unit == "degree": 76 | angle = deg2rad(angle) 77 | elif unit == "radian": 78 | angle = angle 79 | else: 80 | raise ValueError("Unregonized unit(%s): 1) degree; 2) radian" 81 | % unit) 82 | 83 | if len(d1) != len(d2): 84 | raise ValueError("Length of d1(%d) and d2(%d) are not the same!" 85 | % (len(d1), len(d2))) 86 | 87 | dnew1 = d1 * cos(angle) + d2 * sin(angle) 88 | dnew2 = -d1 * sin(angle) + d2 * cos(angle) 89 | return dnew1, dnew2 90 | 91 | 92 | def rotate_12_rt(d1, d2, baz, azim1, azim2): 93 | """ 94 | Rotate from any two orthogonal horizontal components to RT components 95 | 96 | :type d1: :class:`~numpy.ndarray` 97 | :param d1: Data of one of the two horizontal components 98 | :type d2: :class:`~numpy.ndarray` 99 | :param d2: Data of the other horizontal components 100 | :type baz: float 101 | :param baz: the back azimuth from station to source in degrees 102 | :type azim1: float 103 | :param azim1: component azimuth of data1 104 | :type azim2: float 105 | :param azim2: component azimuth of data2 106 | :return: Radial and Transeversal component of seismogram. (None, None) 107 | returned if input two components are not orthogonal 108 | """ 109 | status = check_orthogonality(azim1, azim2) 110 | if not status: 111 | # raise ValueError("azim1 and azim2 not orthogonal") 112 | return None, None 113 | if "right" in status: 114 | # flip to left-hand 115 | d1, d2 = d2, d1 116 | azim1, azim2 = azim2, azim1 117 | 118 | if baz < 0 or baz > 360: 119 | raise ValueError("Back Azimuth should be between 0 and 360 degree") 120 | 121 | # caculate the angle of rotation 122 | angle = baz + 180.0 - azim1 123 | r, t = rotate_certain_angle(d1, d2, angle) 124 | 125 | return r, t 126 | 127 | 128 | def rotate_rt_12(r, t, baz, azim1, azim2): 129 | """ 130 | Rotate from any two orthogonal horizontal components to RT components 131 | 132 | :type data1: :class:`~numpy.ndarray` 133 | :param data1: Data of one of the two horizontal components 134 | :type data2: :class:`~numpy.ndarray` 135 | :param data2: Data of the other horizontal components 136 | :type baz: float 137 | :param baz: the back azimuth from station to source in degrees 138 | :type azim1: float 139 | :param azim1: component azimuth of data1 140 | :type azim2: float 141 | :param azim2: component azimuth of data2 142 | :return: Radial and Transeversal component of seismogram. 143 | """ 144 | status = check_orthogonality(azim1, azim2) 145 | if not status: 146 | raise ValueError("azim1 and azim2 not orthogonal") 147 | if "left" in status: 148 | azim = azim1 149 | elif "right" in status: 150 | azim = azim2 151 | 152 | if baz < 0 or baz > 360: 153 | raise ValueError("Back Azimuth should be between 0 and 360 degree") 154 | 155 | # caculate the angle of rotation 156 | angle = - (baz + 180.0 - azim) 157 | d1, d2 = rotate_certain_angle(r, t, angle) 158 | 159 | if "right" in status: 160 | return d2, d1 161 | elif "left" in status: 162 | return d1, d2 163 | 164 | 165 | def rotate_12_ne(d1, d2, azim1, azim2): 166 | """ 167 | Rotate from any two orthogonal horizontal components to EN components. 168 | The azimuth of the two horizontal components are specified by azim1 169 | and azim2. 170 | 171 | :type d1: :class:`~numpy.ndarray` 172 | :param d1: Data of one of the two horizontal components 173 | :type d2: :class:`~numpy.ndarray` 174 | :param d2: Data of the other horizontal components 175 | :type azim1: float 176 | :param azim1: component azimuth of data1 177 | :type azim2: float 178 | :param azim2: component azimuth of data2 179 | :return: East and North component of seismogram. 180 | """ 181 | status = check_orthogonality(azim1, azim2) 182 | if not status: 183 | raise ValueError("azim1 and azim2 not orthogonal") 184 | if "right" in status: 185 | # flip to left-hand 186 | d1, d2 = d2, d1 187 | azim1, azim2 = azim2, azim1 188 | 189 | # caculate the angle of rotation 190 | n, e = rotate_certain_angle(d1, d2, -azim1) 191 | 192 | return n, e 193 | 194 | 195 | def rotate_ne_12(n, e, azim1, azim2): 196 | """ 197 | Rotate from East and North components to give two orghogonal horizontal 198 | components. Returned values are (d1, d2) and (d1, d2, Vertical) will 199 | form a left-handed coordinate system. 200 | 201 | :type data1: :class:`~numpy.ndarray` 202 | :param data1: Data of one of the two horizontal components 203 | :type data2: :class:`~numpy.ndarray` 204 | :param data2: Data of the other horizontal components 205 | :type azim1: float 206 | :param azim1: component azimuth of data1 207 | :type azim2: float 208 | :param azim2: component azimuth of data2 209 | :return: two horizontal orthogonal seismogram after rotation. 210 | """ 211 | status = check_orthogonality(azim1, azim2) 212 | if not status: 213 | raise ValueError("azim1 and azim2 not orthogonal") 214 | if "left" in status: 215 | azim = azim1 216 | elif "right" in status: 217 | azim = azim2 218 | 219 | # caculate the angle of rotation 220 | d1, d2 = rotate_certain_angle(n, e, azim) 221 | 222 | if "right" in status: 223 | return d2, d1 224 | elif "left" in status: 225 | return d1, d2 226 | -------------------------------------------------------------------------------- /tests/data/quakeml/C201009031635A.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | smi:local/ndk/C201009031635A/origin#cmtorigin 6 | smi:local/ndk/C201009031635A/magnitude#moment_mag 7 | smi:local/ndk/C201009031635A/focal_mechanism 8 | earthquake 9 | known 10 | 11 | SOUTH ISLAND, NEW ZEALAND 12 | Flinn-Engdahl region 13 | 14 | 15 | C201009031635A 16 | earthquake name 17 | 18 | 19 | 22 | 23 | -43.53 24 | 25 | 26 | 171.81 27 | 28 | 29 | 12000.0 30 | 31 | hypocenter 32 | 33 | Hypocenter catalog: PDEW 34 | 35 | 36 | 37 | 41 | 42 | -43.56 43 | 0.0 44 | 45 | 46 | 172.12 47 | 0.0 48 | 49 | 50 | 12000.0 51 | 0.0 52 | 53 | from location 54 | false 55 | false 56 | centroid 57 | 58 | GCMT 59 | V10 60 | 61 | 62 | 63 | 64 | 6.97 65 | 66 | Mwc 67 | smi:local/ndk/C201009031635A/origin#cmtorigin 68 | 69 | GCMT 70 | V10 71 | 72 | 73 | 74 | 75 | 6.4 76 | 77 | mb 78 | 79 | Reported magnitude in NDK file. Most likely 'mb'. 80 | 81 | 82 | 83 | 84 | 7.3 85 | 86 | MS 87 | 88 | Reported magnitude in NDK file. Most likely 'MS'. 89 | 90 | 91 | 92 | 93 | 94 | 95 | 178.0 96 | 97 | 98 | 82.0 99 | 100 | 101 | 1.0 102 | 103 | 104 | 105 | 106 | 88.0 107 | 108 | 109 | 89.0 110 | 111 | 112 | 172.0 113 | 114 | 115 | 116 | 117 | 118 | 119 | 43.0 120 | 121 | 122 | 7.0 123 | 124 | 125 | 3.453e+19 126 | 127 | 128 | 129 | 130 | 134.0 131 | 132 | 133 | 5.0 134 | 135 | 136 | -3.826e+19 137 | 138 | 139 | 140 | 141 | 259.0 142 | 143 | 144 | 82.0 145 | 146 | 147 | 3.73e+18 148 | 149 | 150 | 151 | 152 | smi:local/ndk/C201009031635A/origin#cmtorigin 153 | 154 | body waves 155 | 135 156 | 352 157 | 50.0 158 | 159 | 160 | surface waves 161 | 131 162 | 326 163 | 50.0 164 | 165 | 166 | mantle waves 167 | 132 168 | 313 169 | 125.0 170 | 171 | 172 | 3.64e+19 173 | 174 | 175 | 176 | 3.85e+18 177 | 1.2e+17 178 | 179 | 180 | 8e+16 181 | 1.3e+17 182 | 183 | 184 | -3.92e+18 185 | 1.2e+17 186 | 187 | 188 | 4.91e+18 189 | 4.3e+17 190 | 191 | 192 | 1e+17 193 | 4.2e+17 194 | 195 | 196 | -3.6e+19 197 | 1e+17 198 | 199 | 200 | 201 | triangle 202 | 15.0 203 | 204 | zero trace 205 | 206 | GCMT 207 | V10 208 | 209 | 210 | 211 | CMT Analysis Type: Standard 212 | 213 | 214 | CMT Timestamp: S-20101202124830 215 | 216 | 217 | GCMT 218 | V10 219 | 220 | 221 | 222 | 223 | 224 | -------------------------------------------------------------------------------- /pytomo3d/window/tests/test_window_weights.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import json 4 | import numpy.testing as npt 5 | import pytest 6 | 7 | from spaceweight import SpherePoint 8 | import pytomo3d.window.window_weights as ww 9 | 10 | 11 | def _upper_level(path, nlevel=4): 12 | """ 13 | Go the nlevel dir up 14 | """ 15 | for i in range(nlevel): 16 | path = os.path.dirname(path) 17 | return path 18 | 19 | 20 | # Most generic way to get the data folder path. 21 | TESTBASE_DIR = _upper_level( 22 | os.path.abspath(inspect.getfile(inspect.currentframe())), 4) 23 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 24 | 25 | 26 | def load_json(filename): 27 | with open(filename) as fh: 28 | return json.load(fh) 29 | 30 | 31 | window_file = os.path.join(DATA_DIR, "window", "windows.fake.json") 32 | windows = load_json(window_file) 33 | station_file = os.path.join(DATA_DIR, "stations", "stations.fake.json") 34 | stations = load_json(station_file) 35 | 36 | 37 | def test_calculate_receiver_window_counts(): 38 | rec_counts, cat_wcounts = ww.calculate_receiver_window_counts(windows) 39 | 40 | _true = {"BHZ": {"II.AAK..BHZ": 3, "II.ABKT..BHZ": 2, "IU.BCD..BHZ": 5}, 41 | "BHR": {"II.AAK..BHR": 2, "II.ABKT..BHR": 1, "IU.BCD..BHR": 2}, 42 | "BHT": {"II.AAK..BHT": 1, "IU.BCD..BHT": 2}} 43 | assert rec_counts == _true 44 | 45 | _true = {"BHZ": 10, "BHR": 5, "BHT": 3} 46 | assert cat_wcounts == _true 47 | 48 | 49 | def test_assign_receiver_points(): 50 | rec_counts, _ = ww.calculate_receiver_window_counts(windows) 51 | 52 | def _assert(_points): 53 | for p in _points: 54 | if p.tag == "II.AAK..BHZ": 55 | npt.assert_almost_equal(p.latitude, 0.0) 56 | npt.assert_almost_equal(p.longitude, 0.0) 57 | elif p.tag == "II.ABKT..BHZ": 58 | npt.assert_almost_equal(p.latitude, 0.0) 59 | npt.assert_almost_equal(p.longitude, 120.0) 60 | elif p.tag == "IU.BCD..BHZ": 61 | npt.assert_almost_equal(p.latitude, 0.0) 62 | npt.assert_almost_equal(p.longitude, -120.0) 63 | 64 | points = ww.assign_receiver_to_points(rec_counts["BHZ"], stations) 65 | assert len(points) == 3 66 | _assert(points) 67 | 68 | points = ww.assign_receiver_to_points(rec_counts["BHR"], stations) 69 | assert len(points) == 3 70 | _assert(points) 71 | 72 | points = ww.assign_receiver_to_points(rec_counts["BHT"], stations) 73 | assert len(points) == 2 74 | _assert(points) 75 | 76 | 77 | def test_get_receiver_weights(): 78 | center = SpherePoint(0, 0, tag="source") 79 | 80 | rec_counts, _ = ww.calculate_receiver_window_counts(windows) 81 | 82 | points = ww.assign_receiver_to_points(rec_counts["BHZ"], stations) 83 | ref_distance, cond_number = ww.get_receiver_weights( 84 | "BHZ", center, points, 0.35, plot=False) 85 | for p in points: 86 | npt.assert_almost_equal(p.weight, 1.0) 87 | npt.assert_almost_equal(cond_number, 1.0) 88 | 89 | points = ww.assign_receiver_to_points(rec_counts["BHT"], stations) 90 | ref_distance, cond_number = ww.get_receiver_weights( 91 | "BHZ", center, points, 0.35, plot=False) 92 | for p in points: 93 | npt.assert_almost_equal(p.weight, 1.0) 94 | npt.assert_almost_equal(cond_number, 1.0) 95 | 96 | 97 | def test_normalize_receiver_weights(): 98 | rec_counts, cat_wcounts = ww.calculate_receiver_window_counts(windows) 99 | 100 | comp = "BHZ" 101 | channels = rec_counts[comp].keys() 102 | channels.sort() 103 | points = ww.assign_receiver_to_points(channels, stations) 104 | weights = ww.normalize_receiver_weights(points, rec_counts[comp]) 105 | assert len(weights) == 3 106 | for v in weights.itervalues(): 107 | npt.assert_almost_equal(v, 1.0) 108 | 109 | points[0].weight = 0.5 110 | points[1].weight = 0.75 111 | points[2].weight = 1.0 112 | weights = ww.normalize_receiver_weights(points, rec_counts[comp]) 113 | assert len(weights) == 3 114 | npt.assert_almost_equal(weights["II.AAK..BHZ"], 0.625) 115 | npt.assert_almost_equal(weights["II.ABKT..BHZ"], 0.9375) 116 | npt.assert_almost_equal(weights["IU.BCD..BHZ"], 1.25) 117 | 118 | 119 | def test_determin_receiver_weighting(): 120 | src = {"latitude": 0.0, "longitude": 0.0} 121 | results = ww.determine_receiver_weighting( 122 | src, stations, windows, search_ratio=0.35, weight_flag=True, 123 | plot_flag=False) 124 | 125 | assert len(results) == 5 126 | 127 | 128 | def test_receiver_validator(): 129 | src = {"latitude": 0.0, "longitude": 0.0} 130 | results = ww.determine_receiver_weighting( 131 | src, stations, windows, search_ratio=0.35, weight_flag=True, 132 | plot_flag=False) 133 | 134 | weights = results["rec_weights"] 135 | 136 | weights["BHZ"]["II.AAK..BHZ"] *= 2 137 | rec_counts, cat_wcounts = ww.calculate_receiver_window_counts(windows) 138 | with pytest.raises(ValueError): 139 | ww._receiver_validator(weights["BHZ"], rec_counts["BHZ"], 140 | cat_wcounts["BHZ"]) 141 | 142 | 143 | def test_normalize_category_weights(): 144 | cat_wcounts = {"17_40": {"BHR": 8, "BHT": 4, "BHZ": 16}, 145 | "40_100": {"BHR": 4, "BHT": 2, "BHZ": 4}, 146 | "90_250": {"BHR": 1, "BHT": 1, "BHZ": 2}} 147 | 148 | category_ratio = {"17_40": {"BHR": 0.125, "BHT": 0.25, "BHZ": 0.125}, 149 | "40_100": {"BHR": 0.25, "BHT": 0.5, "BHZ": 0.25}, 150 | "90_250": {"BHR": 1, "BHT": 1, "BHZ": 0.5}} 151 | 152 | weights = ww.normalize_category_weights(category_ratio, cat_wcounts) 153 | 154 | _true = {'17_40': {'BHR': 0.525, 'BHT': 1.05, 'BHZ': 0.525}, 155 | '40_100': {'BHR': 1.05, 'BHT': 2.1, 'BHZ': 1.05}, 156 | '90_250': {'BHR': 4.2, 'BHT': 4.2, 'BHZ': 2.1}} 157 | 158 | assert weights == _true 159 | 160 | 161 | def test_calculate_receiver_weights_interface(): 162 | pass 163 | 164 | 165 | def test_check_category_ratio_consistency(): 166 | ratio = { 167 | "17_40": {"BHR": 1.0, "BHT": 1.0, "BHZ": 1.0}, 168 | "40_100": {"BHR": 1.0, "BHT": 1.0, "BHZ": 1.0}, 169 | "90_250": {"BHR": 1.0, "BHT": 1.0, "BHZ": 1.0} 170 | } 171 | wcounts = { 172 | "17_40": {"BHR": 100, "BHT": 100, "BHZ": 100}, 173 | "40_100": {"BHR": 100, "BHT": 100, "BHZ": 100}, 174 | "90_250": {"BHR": 100, "BHT": 100, "BHZ": 100} 175 | } 176 | ww.check_category_ratio_consistency(ratio, wcounts) 177 | 178 | wcounts["90_250"].pop("BHZ", None) 179 | with pytest.raises(ValueError): 180 | ww.check_category_ratio_consistency(ratio, wcounts) 181 | 182 | 183 | def test_determin_category_weighting(): 184 | cat_wcounts = {"17_40": {"BHR": 8, "BHT": 4, "BHZ": 16}, 185 | "40_100": {"BHR": 4, "BHT": 2, "BHZ": 4}, 186 | "90_250": {"BHR": 1, "BHT": 1, "BHZ": 2}} 187 | 188 | category_ratio = {"17_40": {"BHR": 1, "BHT": 2, "BHZ": 1}, 189 | "40_100": {"BHR": 2, "BHT": 4, "BHZ": 2}, 190 | "90_250": {"BHR": 8, "BHT": 8, "BHZ": 4}} 191 | category_param = {"flag": True, "ratio": category_ratio} 192 | 193 | weights = ww.calculate_category_weights_interface( 194 | category_param, cat_wcounts) 195 | 196 | _true = {'17_40': {'BHR': 0.525, 'BHT': 1.05, 'BHZ': 0.525}, 197 | '40_100': {'BHR': 1.05, 'BHT': 2.1, 'BHZ': 1.05}, 198 | '90_250': {'BHR': 4.2, 'BHT': 4.2, 'BHZ': 2.1}} 199 | 200 | assert weights == _true 201 | 202 | 203 | def test_category_validator(): 204 | 205 | cat_wcounts = {"17_40": {"BHR": 8, "BHT": 4, "BHZ": 16}, 206 | "40_100": {"BHR": 4, "BHT": 2, "BHZ": 4}, 207 | "90_250": {"BHR": 1, "BHT": 1, "BHZ": 2}} 208 | 209 | category_ratio = {"17_40": {"BHR": 1, "BHT": 2, "BHZ": 1}, 210 | "40_100": {"BHR": 2, "BHT": 4, "BHZ": 2}, 211 | "90_250": {"BHR": 8, "BHT": 8, "BHZ": 4}} 212 | category_param = {"flag": True, "ratio": category_ratio} 213 | 214 | weights = ww.calculate_category_weights_interface( 215 | category_param, cat_wcounts) 216 | 217 | weights["17_40"]["BHR"] *= 2 218 | with pytest.raises(ValueError): 219 | ww._category_validator(weights, cat_wcounts) 220 | 221 | 222 | def test_combine_receiver_and_category_weights(): 223 | pass 224 | -------------------------------------------------------------------------------- /pytomo3d/window/tests/test_window.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import pytest 4 | import json 5 | 6 | import numpy as np 7 | import matplotlib as mpl 8 | import matplotlib.pyplot as plt 9 | 10 | from obspy import read, read_inventory, readEvents 11 | from pyflex import WindowSelector 12 | from pyflex.window import Window 13 | import pytomo3d.window.window as win 14 | import pytomo3d.window.io as wio 15 | 16 | 17 | def _upper_level(path, nlevel=4): 18 | """ 19 | Go the nlevel dir up 20 | """ 21 | for i in range(nlevel): 22 | path = os.path.dirname(path) 23 | return path 24 | 25 | 26 | def reset_matplotlib(): 27 | """ 28 | Reset matplotlib to a common default. 29 | """ 30 | # Set all default values. 31 | mpl.rcdefaults() 32 | # Force agg backend. 33 | plt.switch_backend('agg') 34 | # These settings must be hardcoded for running the comparision tests and 35 | # are not necessarily the default values. 36 | mpl.rcParams['font.family'] = 'Bitstream Vera Sans' 37 | mpl.rcParams['text.hinting'] = False 38 | # Not available for all matplotlib versions. 39 | try: 40 | mpl.rcParams['text.hinting_factor'] = 8 41 | except KeyError: 42 | pass 43 | import locale 44 | locale.setlocale(locale.LC_ALL, str('en_US.UTF-8')) 45 | 46 | 47 | # Most generic way to get the data folder path. 48 | TESTBASE_DIR = _upper_level( 49 | os.path.abspath(inspect.getfile(inspect.currentframe())), 4) 50 | DATA_DIR = os.path.join(TESTBASE_DIR, "tests", "data") 51 | 52 | obsfile = os.path.join(DATA_DIR, "proc", "IU.KBL.obs.proc.mseed") 53 | synfile = os.path.join(DATA_DIR, "proc", "IU.KBL.syn.proc.mseed") 54 | staxml = os.path.join(DATA_DIR, "stationxml", "IU.KBL.xml") 55 | quakeml = os.path.join(DATA_DIR, "quakeml", "C201009031635A.xml") 56 | 57 | 58 | def test_update_user_levels(): 59 | obs_tr = read(obsfile)[0] 60 | syn_tr = read(synfile)[0] 61 | 62 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 63 | config = wio.load_window_config_yaml(config_file) 64 | 65 | cat = readEvents(quakeml) 66 | inv = read_inventory(staxml) 67 | 68 | user_module = "pytomo3d.window.tests.user_module_example" 69 | config = win.update_user_levels(user_module, config, inv, cat, 70 | obs_tr, syn_tr) 71 | 72 | npts = obs_tr.stats.npts 73 | assert isinstance(config.stalta_waterlevel, np.ndarray) 74 | assert len(config.stalta_waterlevel) == npts 75 | assert isinstance(config.tshift_acceptance_level, np.ndarray) 76 | assert len(config.tshift_acceptance_level) == npts 77 | assert isinstance(config.dlna_acceptance_level, np.ndarray) 78 | assert len(config.dlna_acceptance_level) == npts 79 | assert isinstance(config.cc_acceptance_level, np.ndarray) 80 | assert len(config.cc_acceptance_level) == npts 81 | assert isinstance(config.s2n_limit, np.ndarray) 82 | assert len(config.s2n_limit) == npts 83 | 84 | 85 | def test_update_user_levels_raise(): 86 | user_module = "pytomo3d.window.tests.which_does_not_make_sense" 87 | with pytest.raises(Exception) as errmsg: 88 | win.update_user_levels(user_module, None, None, None, 89 | None, None) 90 | 91 | assert "Could not import the user_function module" in str(errmsg) 92 | 93 | user_module = "pytomo3d.window.io" 94 | with pytest.raises(Exception) as errmsg: 95 | win.update_user_levels(user_module, None, None, None, 96 | None, None) 97 | assert "Given user module does not have a generate_user_levels method" \ 98 | in str(errmsg) 99 | 100 | 101 | def test_window_on_trace(): 102 | obs_tr = read(obsfile).select(channel="*R")[0] 103 | syn_tr = read(synfile).select(channel="*R")[0] 104 | 105 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 106 | config = wio.load_window_config_yaml(config_file) 107 | 108 | cat = readEvents(quakeml) 109 | inv = read_inventory(staxml) 110 | 111 | windows = win.window_on_trace(obs_tr, syn_tr, config, station=inv, 112 | event=cat, _verbose=False, 113 | figure_mode=False) 114 | 115 | assert len(windows) == 5 116 | 117 | winfile_bm = os.path.join(DATA_DIR, "window", 118 | "IU.KBL..BHR.window.json") 119 | with open(winfile_bm) as fh: 120 | windows_json = json.load(fh) 121 | for _win, _win_json_bm in zip(windows, windows_json): 122 | _win_bm = Window._load_from_json_content(_win_json_bm) 123 | assert _win == _win_bm 124 | 125 | 126 | def test_window_on_trace_user_levels(): 127 | obs_tr = read(obsfile)[0] 128 | syn_tr = read(synfile)[0] 129 | 130 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 131 | config = wio.load_window_config_yaml(config_file) 132 | 133 | cat = readEvents(quakeml) 134 | inv = read_inventory(staxml) 135 | user_module = "pytomo3d.window.tests.user_module_example" 136 | 137 | windows = win.window_on_trace(obs_tr, syn_tr, config, station=inv, 138 | event=cat, user_module=user_module, 139 | _verbose=False, 140 | figure_mode=False) 141 | assert len(windows) == 4 142 | 143 | 144 | def test_window_on_trace_with_none_user_levels(): 145 | obs_tr = read(obsfile).select(channel="*R")[0] 146 | syn_tr = read(synfile).select(channel="*R")[0] 147 | 148 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 149 | config = wio.load_window_config_yaml(config_file) 150 | 151 | cat = readEvents(quakeml) 152 | inv = read_inventory(staxml) 153 | 154 | windows = win.window_on_trace(obs_tr, syn_tr, config, station=inv, 155 | event=cat, user_module="None", 156 | _verbose=False, figure_mode=False) 157 | 158 | winfile_bm = os.path.join(DATA_DIR, "window", 159 | "IU.KBL..BHR.window.json") 160 | with open(winfile_bm) as fh: 161 | windows_json = json.load(fh) 162 | for _win, _win_json_bm in zip(windows, windows_json): 163 | _win_bm = Window._load_from_json_content(_win_json_bm) 164 | assert _win == _win_bm 165 | 166 | 167 | def test_window_on_stream(): 168 | obs_tr = read(obsfile) 169 | syn_tr = read(synfile) 170 | 171 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 172 | config = wio.load_window_config_yaml(config_file) 173 | config_dict = {"Z": config, "R": config, "T": config} 174 | 175 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 176 | config = wio.load_window_config_yaml(config_file) 177 | 178 | cat = readEvents(quakeml) 179 | inv = read_inventory(staxml) 180 | 181 | windows = win.window_on_stream(obs_tr, syn_tr, config_dict, station=inv, 182 | event=cat, _verbose=False, 183 | figure_mode=False) 184 | 185 | assert len(windows) == 3 186 | nwins = dict((_w, len(windows[_w])) for _w in windows) 187 | assert nwins == {"IU.KBL..BHR": 5, "IU.KBL..BHZ": 2, "IU.KBL..BHT": 4} 188 | 189 | 190 | def test_window_on_stream_user_levels(): 191 | obs_tr = read(obsfile) 192 | syn_tr = read(synfile) 193 | 194 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 195 | config = wio.load_window_config_yaml(config_file) 196 | config_dict = {"Z": config, "R": config, "T": config} 197 | 198 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 199 | config = wio.load_window_config_yaml(config_file) 200 | 201 | cat = readEvents(quakeml) 202 | inv = read_inventory(staxml) 203 | 204 | _mod = "pytomo3d.window.tests.user_module_example" 205 | user_modules = {"BHZ": _mod, "BHR": _mod, "BHT": _mod} 206 | 207 | windows = win.window_on_stream(obs_tr, syn_tr, config_dict, station=inv, 208 | event=cat, user_modules=user_modules, 209 | _verbose=False, 210 | figure_mode=False) 211 | 212 | assert len(windows) == 3 213 | nwins = dict((_w, len(windows[_w])) for _w in windows) 214 | assert nwins == {"IU.KBL..BHR": 5, "IU.KBL..BHZ": 2, "IU.KBL..BHT": 4} 215 | 216 | 217 | def test_plot_window_figure(tmpdir): 218 | reset_matplotlib() 219 | 220 | obs_tr = read(obsfile).select(channel="*R")[0] 221 | syn_tr = read(synfile).select(channel="*R")[0] 222 | 223 | config_file = os.path.join(DATA_DIR, "window", "27_60.BHZ.config.yaml") 224 | config = wio.load_window_config_yaml(config_file) 225 | 226 | cat = readEvents(quakeml) 227 | inv = read_inventory(staxml) 228 | 229 | ws = WindowSelector(obs_tr, syn_tr, config, event=cat, station=inv) 230 | windows = ws.select_windows() 231 | 232 | assert len(windows) > 0 233 | 234 | win.plot_window_figure(str(tmpdir), obs_tr.id, ws, True, 235 | figure_format="png") 236 | --------------------------------------------------------------------------------