├── doc ├── README.md ├── requirements.txt ├── index.rst ├── Makefile └── conf.py ├── array_processing ├── __init__.py ├── algorithms │ ├── __init__.py │ ├── srcLoc.py │ ├── helpers.py │ └── fk_freq.py └── tools │ ├── __init__.py │ ├── detection.py │ ├── plotting.py │ ├── array_characterization.py │ └── generic.py ├── readthedocs.yml ├── .gitignore ├── environment.yml ├── setup.py ├── LICENSE.txt ├── example.py └── README.md /doc/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /array_processing/__init__.py: -------------------------------------------------------------------------------- 1 | from . import algorithms 2 | from . import tools 3 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinxcontrib-apidoc 2 | recommonmark 3 | sphinx_rtd_theme 4 | -------------------------------------------------------------------------------- /array_processing/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | from .fk_freq import fk_freq 2 | from .srcLoc import srcLoc 3 | from .helpers import getrij, compass2rij 4 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-22.04" 5 | tools: 6 | python: "3.12" 7 | 8 | sphinx: 9 | configuration: doc/conf.py 10 | 11 | python: 12 | install: 13 | - requirements: doc/requirements.txt 14 | - method: pip 15 | path: . 16 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | array_processing 2 | ================ 3 | 4 | Various array processing tools for infrasound and seismic data 5 | 6 | .. toctree:: 7 | :caption: README 8 | 9 | README.md 10 | 11 | .. toctree:: 12 | :caption: API Reference 13 | 14 | api/array_processing.rst 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # PyCharm project settings folder 2 | .idea/ 3 | 4 | # macOS Finder things 5 | **/.DS_Store 6 | **/._.DS_Store 7 | 8 | # Python setup and runtime things 9 | **/__pycache__ 10 | build/ 11 | dist/ 12 | *egg-info 13 | 14 | # Temporary/swap files from text editors 15 | **/*~ 16 | **/*.swp 17 | **/._* 18 | 19 | # Documentation things 20 | doc/_build/ 21 | doc/api/ 22 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: uafinfra 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python<3.11 # Temporary fix (see https://github.com/numba/numba/issues/8304) 7 | - fastkml<1 8 | - ipython 9 | - obspy 10 | - numba 11 | - pip 12 | - pip: 13 | - git+https://github.com/uafgeotools/waveform_collection.git 14 | - git+https://github.com/uafgeotools/lts_array.git 15 | - -e . 16 | -------------------------------------------------------------------------------- /array_processing/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .array_characterization import (arraySig, impulseResp, rthEllipse, 2 | co_array, chi2, cubicEqn, quadraticEqn, 3 | quarticEqn, read_kml) 4 | from .detection import fstatbland 5 | from .generic import (array_thresh, beamForm, phaseAlignData, phaseAlignIdx, 6 | tauCalcPW, tauCalcSW, tauCalcSWxy, randc, psf) 7 | from .plotting import array_plot, arraySigPlt, arraySigContourPlt 8 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | import os 3 | 4 | # https://github.com/readthedocs/readthedocs.org/issues/5512#issuecomment-475073310 5 | on_rtd = os.environ.get('READTHEDOCS') == 'True' 6 | if on_rtd: 7 | INSTALL_REQUIRES = [] 8 | else: 9 | INSTALL_REQUIRES = ['waveform_collection', 'lts_array'] 10 | 11 | config = {'name': 'array_processing', 12 | 'url': 'https://github.com/uafgeotools/array_processing', 13 | 'packages': find_packages(), 14 | 'install_requires': ['waveform_collection', 'lts_array'] 15 | } 16 | 17 | setup( 18 | name='array_processing', 19 | packages=find_packages(), 20 | install_requires=INSTALL_REQUIRES 21 | ) 22 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017-2020 The University of Alaska Fairbanks 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.insert(0, os.path.abspath('../array_processing')) 5 | 6 | project = 'array_processing' 7 | 8 | html_show_copyright = False 9 | 10 | extensions = [ 11 | 'sphinx.ext.autodoc', 12 | 'sphinx.ext.napoleon', 13 | 'sphinx.ext.intersphinx', 14 | 'recommonmark', 15 | 'sphinx.ext.viewcode', 16 | 'sphinxcontrib.apidoc', 17 | 'sphinx.ext.mathjax', 18 | ] 19 | 20 | html_theme = 'sphinx_rtd_theme' 21 | 22 | napoleon_numpy_docstring = False 23 | 24 | master_doc = 'index' 25 | 26 | autodoc_mock_imports = ['numpy', 27 | 'obspy', 28 | 'scipy', 29 | 'matplotlib', 30 | 'fastkml' 31 | ] 32 | 33 | apidoc_module_dir = '../array_processing' 34 | 35 | apidoc_output_dir = 'api' 36 | 37 | apidoc_separate_modules = True 38 | 39 | apidoc_toc_file = False 40 | 41 | intersphinx_mapping = { 42 | 'python': ('https://docs.python.org/3/', None), 43 | 'numpy': ('https://docs.scipy.org/doc/numpy', None), 44 | 'obspy': ('https://docs.obspy.org/', None), 45 | 'matplotlib': ('https://matplotlib.org/', None), 46 | 'lts_array': ('https://uaf-lts-array.readthedocs.io/en/latest/', None) 47 | } 48 | -------------------------------------------------------------------------------- /array_processing/algorithms/srcLoc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import optimize 3 | from ..tools.generic import tauCalcSWxy 4 | 5 | 6 | def srcLoc(rij, tau, nord=2, seedXY_size=0.05, seedV_size=0.3): 7 | r""" 8 | Estimate a geographical source location and propagation velocity for an 9 | event recorded on an array of sensors. 10 | 11 | Args: 12 | rij: ``(d, n)`` array; ``n`` array coordinates as [easting, northing, 13 | {elevation}] column vectors in ``d`` dimensions 14 | tau: ``(n*(n-1)/2, )`` array; unique inter-sensor TDOA information 15 | (delays) 16 | nord: Order of the norm to calculate the cost function (default is 2 17 | for the usual Euclidean :math:`L^2` norm) 18 | seedXY_size (float): Geographic seed value 19 | seedV_size (float): Propagation velocity seed value 20 | 21 | Returns: 22 | tuple: Tuple containing: 23 | 24 | - **Sxyc** – ``(d+1, )`` array; optimized source location as geographic 25 | coordinates (same as the columns of ``rij``) and propagation speed 26 | - **Srtc** – ``(d+1, )`` array; optimized source location as [range, 27 | azimuth, {elevation}, propagation speed] 28 | 29 | Notes: 30 | This is a Pythonic method for ``srcLoc`` that might've been dubbed 31 | ``srcLocLite``. It takes a naïve approach to the seed, ignoring Dr. 32 | Arnoult's spacetime approach, but takes into account the quirks of 33 | the Nelder-Mead optimization and produces a fairly good (if not great) 34 | facsimile of the MATLAB version. 35 | """ 36 | 37 | # The below line can be removed once we add rij2rTh 38 | raise NotImplementedError('rij2rTh not available!') 39 | 40 | # cost function 41 | def minTau(xyv_trial, tau_o, rij): 42 | tau_trial = tauCalcSWxy(xyv_trial[-1], xyv_trial[:-1], rij) 43 | return np.linalg.norm(tau_o - tau_trial, nord) 44 | # copnstruct naive seed 45 | xyv_seed = [seedXY_size] * len(rij) + [seedV_size] 46 | for k in range(0, len(xyv_seed)-1, 2): 47 | xyv_seed[k] = -xyv_seed[k] 48 | # perform optimization 49 | costFn = lambda xyv_trial: minTau(xyv_trial, tau, rij) 50 | xyv_opt = optimize.minimize(costFn, xyv_seed, method='Nelder-Mead') 51 | 52 | return xyv_opt.x, rij2rTh(xyv_opt.x[:len(rij)]) 53 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | #%% User-defined parameters 2 | 3 | from waveform_collection import gather_waveforms 4 | from obspy.core import UTCDateTime 5 | import numpy as np 6 | 7 | # Data collection 8 | SOURCE = 'IRIS' 9 | NETWORK = 'IM' 10 | STATION = 'I53H?' 11 | LOCATION = '*' 12 | CHANNEL = 'BDF' 13 | START = UTCDateTime('2018-12-19T01:45:00') 14 | END = START + 20*60 15 | 16 | # Filtering 17 | FMIN = 0.1 # [Hz] 18 | FMAX = 1.0 # [Hz] 19 | 20 | # Array processing 21 | WINLEN = 50 # [s] 22 | WINOVER = 0.5 23 | 24 | #%% Grab and filter waveforms 25 | 26 | st = gather_waveforms(SOURCE, NETWORK, STATION, LOCATION, CHANNEL, START, END, 27 | remove_response=True) 28 | 29 | st.filter('bandpass', freqmin=FMIN, freqmax=FMAX, corners=2, zerophase=True) 30 | st.taper(max_percentage=0.01) 31 | 32 | #%% Array processing and plotting using least squares 33 | 34 | from array_processing.tools.plotting import array_plot 35 | from lts_array import ltsva 36 | 37 | latlist = [tr.stats.latitude for tr in st] 38 | lonlist = [tr.stats.longitude for tr in st] 39 | 40 | #%% Array processing. ALPHA = 1.0: least squares processing. 41 | ALPHA = 1.0 42 | vel, baz, t, mdccm, stdict, sigma_tau, conf_int_vel, conf_int_baz = ltsva(st, latlist, lonlist, WINLEN, WINOVER, ALPHA) 43 | 44 | fig1, axs1 = array_plot(st, t, mdccm, vel, baz, ccmplot=True, mcthresh=0.6, 45 | sigma_tau=sigma_tau) 46 | 47 | #%% Array processing. 0.5 <= ALPHA < 1.0: least trimmed squares processing. 48 | ALPHA_LTS = 0.50 49 | vel_lts, baz_lts, t_lts, mdccm_lts, stdict_lts, sigma_tau, conf_int_vel, conf_int_baz = ltsva(st, latlist, lonlist, WINLEN, WINOVER, ALPHA_LTS) 50 | 51 | fig1_lts, axs1_lts = array_plot(st, t_lts, mdccm_lts, vel_lts, baz_lts, ccmplot=True, mcthresh=0.6, sigma_tau=None, stdict=stdict_lts) 52 | 53 | #%% Array uncertainty 54 | from array_processing.algorithms.helpers import getrij 55 | from array_processing.tools import arraySig 56 | from array_processing.tools.plotting import arraySigPlt, arraySigContourPlt 57 | 58 | SIGLEVEL = 1/st[0].stats.sampling_rate 59 | KMAX = 400 60 | TRACE_VELOCITY = 0.33 61 | rij = getrij(latlist, lonlist) 62 | 63 | 64 | sigV, sigTh, impResp, vel, th, kvec = arraySig(rij, kmax=KMAX, 65 | sigLevel=SIGLEVEL) 66 | 67 | fig2 = arraySigPlt(rij, SIGLEVEL, sigV, sigTh, impResp, vel, th, kvec) 68 | 69 | fig3 = arraySigContourPlt(sigV, sigTh, vel, th, trace_v=TRACE_VELOCITY) 70 | 71 | #%% Delay and sum beam 72 | 73 | from array_processing.tools import beamForm 74 | 75 | data = np.array([tr.data for tr in st]).transpose() 76 | beam = beamForm(data, rij, st[0].stats.sampling_rate, 50) 77 | 78 | #%% Pure state filter 79 | 80 | from array_processing.tools import psf 81 | 82 | x_psf, P = psf(data, p=2, w=3, n=3, window=None) 83 | -------------------------------------------------------------------------------- /array_processing/tools/detection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from obspy.core import Stream 3 | from .generic import phaseAlignIdx, phaseAlignData 4 | 5 | 6 | def fstatbland(dtmp, fs, tau): 7 | r""" 8 | Calculates the F-statistic and SNR based on Blandford's method (see Notes). 9 | 10 | Args: 11 | dtmp: ``(m, n)`` array; time series with ``m`` samples from ``n`` 12 | traces as columns 13 | fs (int or float): Sample rate [Hz] 14 | tau: ``(n(n-1)//2)`` array; time delays of relative signal arrivals 15 | (TDOA) for all unique sensor pairings 16 | 17 | Returns: 18 | tuple: Tuple containing: 19 | 20 | - **fstat** – F-statistic 21 | - **snr** – SNR 22 | 23 | References: 24 | Blandford, R. R., 1974. An automatic event detector at the Tonto 25 | Forest Seismic Observatory. Geophysics, vol. 39, no. 5, 26 | p. 633–643. `https://library.seg.org/doi/abs/10.1190/1.1440453 27 | `__ 28 | """ 29 | 30 | m, n = dtmp.shape 31 | wgt = np.ones(n) 32 | 33 | #individual trace offsets from arrival model shifts. Zeros here 34 | m_offset = [0 for i in range(n)] 35 | 36 | # calculate beam delays 37 | beam_delays = phaseAlignIdx(tau, fs, wgt, 0) 38 | 39 | # apply shifts, resulting in a zero-padded array 40 | beam = phaseAlignData(dtmp, beam_delays, wgt, 0, m, m_offset) 41 | 42 | fnum = np.sum(np.sum(beam, axis=1)**2) 43 | term1 = np.sum(beam, axis=1)/n 44 | term1_0 = term1 45 | for i in range(1, n): 46 | term1 = np.vstack((term1, term1_0)) 47 | fden = np.sum(np.sum((beam.T - term1)**2)) 48 | fstat = (n-1) * fnum/(n * fden) 49 | 50 | #calculate snr based on fstat 51 | snr = np.sqrt((fstat-1)/n) 52 | 53 | return fstat, snr 54 | 55 | 56 | def calculate_semblance(data_in): 57 | r""" 58 | Calculates the semblance, a measure of multi-channel coherence, following 59 | the definition of Neidell & Taner (1971). Assumes data are already 60 | time-shifted to construct the beam. 61 | 62 | Args: 63 | data_in: Time-shifted ObsPy Stream or time-shifted NumPy array 64 | 65 | Returns: 66 | Multi-channel coherence defined on :math:`[0, 1]` 67 | """ 68 | 69 | if isinstance(data_in, Stream): 70 | # check that all traces have the same length 71 | if len(set([len(tr) for tr in data_in])) != 1: 72 | raise ValueError('Traces in stream must have same length!') 73 | 74 | n = len(data_in) 75 | 76 | beam = np.sum([tr.data for tr in data_in], axis=0) / n 77 | beampower = n * np.sum(beam**2) 78 | 79 | avg_power = np.sum(np.sum([tr.data**2 for tr in data_in], axis=0)) 80 | 81 | elif isinstance(data_in, np.ndarray): 82 | n = data_in.shape[0] 83 | 84 | beam = np.sum(data_in, axis=0) / n 85 | beampower = n * np.sum(beam**2) 86 | 87 | avg_power = np.sum(np.sum([data_in**2], axis=0)) 88 | 89 | semblance = beampower / avg_power 90 | 91 | return semblance 92 | -------------------------------------------------------------------------------- /array_processing/algorithms/helpers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from obspy.geodetics import gps2dist_azimuth 3 | 4 | _M_PER_KM = 1000 # [m/km] 5 | 6 | def getrij(latlist, lonlist): 7 | r""" 8 | Calculate ``rij`` from lat-lon. Returns the projected geographic positions 9 | in :math:`x`–:math:`y` with zero-mean. Typically used for array locations. 10 | 11 | Args: 12 | latlist (list): List of latitude points 13 | lonlist (list): List of longitude points 14 | 15 | Returns: 16 | :class:`numpy.ndarray` with the first row corresponding to Cartesian 17 | :math:`x`-coordinates and the second row corresponding to Cartesian 18 | :math:`y`-coordinates, in units of km 19 | """ 20 | 21 | latsize = len(latlist) 22 | lonsize = len(lonlist) 23 | 24 | # Basic error checking 25 | if latsize != lonsize: 26 | raise ValueError('latsize != lonsize') 27 | 28 | xnew = np.zeros((latsize, )) 29 | ynew = np.zeros((lonsize, )) 30 | for i in range(1, lonsize): 31 | # WGS84 ellipsoid 32 | dist, az, _ = gps2dist_azimuth(latlist[0], lonlist[0], 33 | latlist[i], lonlist[i]) 34 | # Convert azimuth in degrees to angle in radians 35 | ang = np.deg2rad((450 - az) % 360) 36 | # Convert from m to km, do trig 37 | xnew[i] = (dist / _M_PER_KM) * np.cos(ang) 38 | ynew[i] = (dist / _M_PER_KM) * np.sin(ang) 39 | 40 | # Remove the mean 41 | xnew = xnew - xnew.mean() 42 | ynew = ynew - ynew.mean() 43 | 44 | # Form rij array 45 | rij = np.vstack((xnew, ynew)) 46 | 47 | return rij 48 | 49 | 50 | def compass2rij(distances, azimuths): 51 | """Convert tape-and-compass survey data to Cartesian :math:`x`–:math:`y` coordinates. 52 | 53 | The output type is the same as the :func:`getrij` function. Note that typically, 54 | distances and azimuths will be surveyed from one of the array elements. In this 55 | case, that array element will have distance 0 and azimuth 0. However, this function 56 | can handle an arbitrary reference point for the distances and azimuths. This 57 | function assumes that all array elements lie on the same plane. 58 | 59 | Args: 60 | distances (array): Distances to each array element, in meters 61 | azimuths (array): Azimuths to each array element, in degrees from **true** north 62 | 63 | Returns: 64 | :class:`numpy.ndarray` with the first row corresponding to Cartesian 65 | :math:`x`-coordinates and the second row corresponding to Cartesian 66 | :math:`y`-coordinates, in units of km 67 | """ 68 | 69 | # Type conversion and error checking 70 | distances = np.array(distances) 71 | azimuths = np.array(azimuths) 72 | if distances.size != azimuths.size: 73 | raise ValueError('There must be the same number of distances and azimuths') 74 | assert (distances >= 0).all(), 'Distances cannot be negative' 75 | assert ((azimuths >= 0) & (azimuths < 360)).all(), 'Azimuths must be 0–359°' 76 | 77 | # Convert distances and azimuths to Cartesian coordinates in units of km 78 | x = distances * np.sin(np.deg2rad(azimuths)) / _M_PER_KM 79 | y = distances * np.cos(np.deg2rad(azimuths)) / _M_PER_KM 80 | 81 | # Remove the mean 82 | x -= x.mean() 83 | y -= y.mean() 84 | 85 | # Form rij array 86 | rij = np.vstack((x, y)) 87 | 88 | return rij 89 | -------------------------------------------------------------------------------- /array_processing/algorithms/fk_freq.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from math import pow, ceil 3 | from obspy.signal.invsim import cosine_taper 4 | 5 | 6 | def fk_freq(data, fs, rij, vmin, vmax, fmin, fmax, nvel, ntheta): 7 | r""" 8 | :math:`f`–:math:`k` beamforming with loop over frequency bands. 9 | 10 | Args: 11 | data: ``(m, n)`` array; time series with ``m`` samples from ``n`` 12 | traces as columns 13 | rij: ``(d, n)`` array; ``n`` sensor coordinates as [northing, easting, 14 | {elevation}] column vectors in ``d`` dimensions 15 | fs (int or float): Sample rate [Hz] 16 | vmin (int or float): Min velocity in km/s, suggest 0.25 17 | vmax (int or float): Max velocity in km/s, suggest 0.45 18 | fmin (int or float): Minimum frequency in Hz 19 | fmax (int or float): Maximum frequency in Hz 20 | nvel (int or float): Number of velocity iterations, suggest 100–200 21 | ntheta (int or float): Number of azimuth iterations, suggest 100–200 22 | 23 | Returns: 24 | ``(ntheta, nvel)`` array; beamformed slowness map, not normalized. Can 25 | find max using 26 | 27 | .. code-block:: python 28 | 29 | ix, iy = np.unravel_index(bmpwr.argmax(), bmpwr.shape) 30 | """ 31 | 32 | #reshape rij from standard setup 33 | rij = np.transpose(rij) 34 | rij[:, 0] = rij[:, 0] - np.mean(rij[:, 0]) 35 | rij[:, 1] = rij[:, 1] - np.mean(rij[:, 1]) 36 | 37 | # Getting the size of the data 38 | [m, nsta] = np.shape(data) 39 | 40 | # set up velocity/slowness and theta vectors 41 | sits = np.linspace(1/vmax, 1/vmin, int(nvel)) 42 | theta = np.linspace(0, 2*np.pi, ntheta) 43 | 44 | # Getting initial time shifts 45 | # x time delay 46 | cost = np.cos(theta) 47 | Tx1 = np.outer(sits, np.transpose(cost)) 48 | Txm = Tx1[:, :, None] * np.transpose(rij[:, 1]) 49 | 50 | # y time delay 51 | sint = np.sin(theta) 52 | Ty1 = np.outer(sits, np.transpose(sint)) 53 | Tym = Ty1[:, :, None] * np.transpose(rij[:, 0]) 54 | 55 | # All possible time delays 56 | TT = Txm + Tym 57 | 58 | # Computing the next power of 2 for fft input 59 | n2 = ceil(np.log2(m)) 60 | nfft = int(pow(2, n2)) 61 | 62 | # Frequency increment 63 | deltaf = fs / nfft 64 | 65 | # Getting frequency bands 66 | nlow = int(fmin / float(deltaf) + 0.5) 67 | nhigh = int(fmax / float(deltaf) + 0.5) 68 | nlow = max(1, nlow) # avoid using the offset 69 | nhigh = min(nfft // 2 - 1, nhigh) # avoid using Nyquist 70 | nf = nhigh - nlow + 1 # include upper and lower frequency 71 | 72 | # Apply a 22% Cosine Taper 73 | taper = cosine_taper(m, p=0.22) 74 | 75 | # Calculated the FFT of each trace 76 | # ft are complex Fourier coefficients 77 | # is this faster in scipy? 78 | ft = np.empty((nsta, nf), dtype=np.complex128) 79 | for jj in range(nsta): 80 | data[:, jj] = data[:, jj] - np.mean(data[:, jj]) 81 | dat = data[:, jj] * taper 82 | ft[jj, :] = np.fft.rfft(dat, nfft)[nlow:nlow + nf] 83 | 84 | # Change data structure for performance boost --> Check this. 85 | ft = np.ascontiguousarray(ft, np.complex128) 86 | 87 | # Pre-allocating 88 | freqrange = np.linspace(fmin, fmax, nf) 89 | pow_mapt = np.zeros((int(nvel), int(ntheta)), dtype=np.float64, order='C') 90 | pow_mapb = np.zeros((int(nvel), int(ntheta)), dtype=np.float64, order='C') 91 | flen = len(freqrange) 92 | 93 | # loop entire slowness map over frequencies 94 | # compute covariance 95 | for ii in range(flen): 96 | # Generating the exponentials - steering vectors 97 | freq = freqrange[ii] 98 | expo = -1j * 2 * np.pi * freq * TT 99 | Master = np.exp(expo) 100 | # Broadcasting the Fourier coefficients at each station 101 | fcoeff = ft[:, ii] 102 | Master = Master * fcoeff[None, None, :] 103 | Top = np.sum(Master, axis=2) 104 | Top2 = np.real(np.multiply(Top.conj(), Top)) 105 | Bot = np.real(np.multiply(Master.conj(), Master)) 106 | Bot = np.sum(Bot, axis=2) 107 | pow_mapt += Top2 108 | pow_mapb += Bot 109 | pow_map = pow_mapt/pow_mapb 110 | 111 | return pow_map 112 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | array_processing 2 | ================ 3 | 4 | [![](https://readthedocs.org/projects/uaf-array-processing/badge/?version=master)](https://uaf-array-processing.readthedocs.io/) 5 | 6 | Various array processing tools for infrasound and seismic data. By default uses 7 | least-squares to determine the trace velocity and back-azimuth of a plane wave 8 | crossing an array in sliding time windows. More advanced processing (such as 9 | least-trimmed squares) is easily integrated. Also provides tools to characterize 10 | the array response, uncertainty, source-location of a spherical wave crossing 11 | the array, etc. See 12 | [documentation](https://uaf-array-processing.readthedocs.io/) and 13 | [`example.py`](https://github.com/uafgeotools/array_processing/blob/master/example.py) 14 | for more info. 15 | 16 | **General References and Suggested Citations** 17 | 18 | _Least squares and array uncertainty:_ 19 | 20 | Szuberla, C. A. L., & Olson, J. V. (2004). Uncertainties associated with 21 | parameter estimation in atmospheric infrasound arrays, J. Acoust. Soc. Am., 22 | 115(1), 253–258. 23 | [https://doi.org/doi:10.1121/1.1635407](https://doi.org/doi:10.1121/1.1635407) 24 | 25 | _Least-trimmed squares:_ 26 | 27 | Bishop, J. W., Fee, D., & Szuberla, C. A. L. (2020). Improved infrasound array 28 | processing with robust estimators, Geophys. J. Int., 221 p. 2058-2074. 29 | [https://doi.org/10.1093/gji/ggaa110](https://doi.org/10.1093/gji/ggaa110) 30 | 31 | Installation 32 | ------------ 33 | 34 | We recommend you install this package into a new 35 | [conda](https://docs.conda.io/projects/conda/en/latest/index.html) environment. 36 | (Please install [Anaconda](https://www.anaconda.com/products/individual) or 37 | [Miniconda](https://docs.conda.io/en/latest/miniconda.html) before proceeding.) 38 | The environment must contain all of the packages listed in the 39 | [Dependencies](#dependencies) section. For ease of installation, we've provided 40 | an 41 | [`environment.yml`](https://github.com/uafgeotools/array_processing/blob/master/environment.yml) 42 | file which specifies all of these dependencies as well as instructions for 43 | installing _array_processing_ itself. To install _array_processing_ in this 44 | manner, execute the following commands: 45 | ``` 46 | git clone https://github.com/uafgeotools/array_processing.git 47 | cd array_processing 48 | conda env create -f environment.yml 49 | ``` 50 | This creates a new conda environment named `uafinfra` and installs 51 | _array_processing_ and all of its dependencies there. 52 | 53 | The final line in the `environment.yml` file installs _array_processing_ in "editable" mode, which 54 | means that you can update it with a simple `git pull` in your local repository. 55 | We recommend you do this often, since this code is still under rapid 56 | development. 57 | 58 |
59 | 60 | For installation into a pre-existing conda environment, click here. 61 | 62 |
63 | First ensure you have ObsPy and FastKML installed (conda install -c conda-forge 64 | obspy fastkml) and then download and install the uafgeotools 65 | dependencies and this package with: 66 |
67 |
68 | 69 | ``` 70 | pip install git+https://github.com/uafgeotools/waveform_collection.git 71 | pip install git+https://github.com/uafgeotools/lts_array.git 72 | pip install git+https://github.com/uafgeotools/array_processing.git 73 | ``` 74 | (Note that this option does not produce a local clone of the repository.) 75 |
76 | 77 | Dependencies 78 | ------------ 79 | 80 | _uafgeotools_ packages: 81 | 82 | * [_waveform_collection_](https://github.com/uafgeotools/waveform_collection) 83 | * [_lts_array_](https://github.com/uafgeotools/lts_array) 84 | 85 | Python packages: 86 | 87 | * [ObsPy](https://docs.obspy.org/) 88 | * [FastKML](https://fastkml.readthedocs.io/) 89 | 90 | Usage 91 | ----- 92 | 93 | Import the package like any other Python package, ensuring the correct 94 | environment is active. For example, 95 | ``` 96 | $ conda activate uafinfra 97 | $ python 98 | >>> import array_processing 99 | ``` 100 | Documentation is available online 101 | [here](https://uaf-array-processing.readthedocs.io/). For a usage example, see 102 | [`example.py`](https://github.com/uafgeotools/array_processing/blob/master/example.py). 103 | 104 | Note: The `array_plot()` function does not allow both `sigma_tau` and dropped elements from least trimmed squares to be plotted. 105 | 106 | The `sigma_tau` variable is an indicator of nonplanar propagation across an array (using all elements), and least trimmed squares drops element pairs that appear "too far" from planar. In this way, having a large `sigma_tau` value and having consistently dropped element pairs (while not the same) suggest a departure from the plane wave model. 107 | 108 | `sigma_tau` is only calculated when ordinary least squares (`ALPHA=1.0`) is used. The ability to plot one or the other was intended as a safeguard against potentially conflicting processing assumptions. To maintain a consistent output data structure, the `sigma_tau` key returns a `np.nan` in the case that subset pairs are trimmed (0.5 <= `ALPHA` < 1.0). 109 | 110 | If `ALPHA=1.0`, the dropped stations are not plotted since least trimmed squares is not used, and `sigma_tau` may be plotted if specified. If `ALPHA` < 1.0, then `sigma_tau` is not plotted or calculated. 111 | 112 | 113 | Authors 114 | ------- 115 | 116 | (_Alphabetical order by last name._) 117 | 118 | Jordan Bishop
119 | David Fee
120 | Curt Szuberla
121 | Liam Toney
122 | Andrew Winkelman 123 | -------------------------------------------------------------------------------- /array_processing/tools/plotting.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from copy import deepcopy 4 | from collections import Counter 5 | 6 | 7 | def array_plot(st, t, mdccm, vel, baz, ccmplot=False, 8 | mcthresh=None, sigma_tau=None, stdict=None): 9 | r""" 10 | Creates plots for velocity--back-azimuth array processing. 11 | 12 | Args: 13 | st (:class:`~obspy.core.stream.Stream`): Filtered data. Assumes 14 | response has been removed. 15 | t: Array processing time vector. 16 | mdccm: Array of median cross-correlation maxima. 17 | vel: Array of trace velocity estimates. 18 | baz: Array of back-azimuth estimates. 19 | ccmplot (bool): Toggle plotting the mean/median cross-correlation 20 | maxima values on a separate subplot in addition to the color scale. 21 | mcthresh (float): Add a dashed line at this level in the ccmplot 22 | subplot. 23 | sigma_tau: Array of :math:`\sigma_\tau` values. If provided, will plot 24 | the values on a separate subplot. 25 | stdict (dict): Dropped station pairs from 26 | :func:`~lts_array.ltsva.ltsva`. If provided, will plot the dropped 27 | station pairs on a separate subplot. 28 | 29 | Returns: 30 | tuple: Tuple containing: 31 | 32 | - **fig** (:class:`~matplotlib.figure.Figure`) – Figure handle. 33 | - **axs** (Array of :class:`~matplotlib.axes.Axes`) – Axis handles. 34 | """ 35 | 36 | # Specify the colormap. 37 | cm = 'RdYlBu_r' 38 | # Colorbar/y-axis limits for MdCCM. 39 | cax = (0.2, 1) 40 | # Specify the time vector for plotting the trace. 41 | tvec = st[0].times('matplotlib') 42 | 43 | # Determine the number and order of subplots. 44 | num_subplots = 3 45 | vplot = 1 46 | bplot = 2 47 | splot = bplot 48 | if ccmplot: 49 | num_subplots += 1 50 | vplot += 1 51 | bplot += 1 52 | splot = bplot 53 | if sigma_tau is not None or stdict is not None: 54 | num_subplots += 1 55 | splot = bplot + 1 56 | 57 | # Start Plotting. 58 | # Initiate and plot the trace. 59 | fig, axs = plt.subplots(num_subplots, 1, sharex='col') 60 | fig.set_size_inches(10, 9) 61 | axs[0].plot(tvec, st[0].data, 'k') 62 | axs[0].axis('tight') 63 | axs[0].set_ylabel('Pressure [Pa]') 64 | 65 | # Plot MdCCM on its own plot. 66 | if ccmplot: 67 | sc = axs[1].scatter(t, mdccm, c=mdccm, 68 | edgecolors='k', lw=0.3, cmap=cm) 69 | if mcthresh: 70 | axs[1].plot([t[0], t[-1]], [mcthresh, mcthresh], 'k--') 71 | axs[1].axis('tight') 72 | axs[1].set_xlim(t[0], t[-1]) 73 | axs[1].set_ylim(cax) 74 | sc.set_clim(cax) 75 | axs[1].set_ylabel('MdCCM') 76 | 77 | # Plot the trace/apparent velocity. 78 | sc = axs[vplot].scatter(t, vel, c=mdccm, edgecolors='k', lw=0.3, cmap=cm) 79 | axs[vplot].set_ylim(0.25, 0.45) 80 | axs[vplot].set_xlim(t[0], t[-1]) 81 | sc.set_clim(cax) 82 | axs[vplot].set_ylabel('Trace Velocity\n [km/s]') 83 | 84 | # Plot the back-azimuth. 85 | sc = axs[bplot].scatter(t, baz, c=mdccm, edgecolors='k', lw=0.3, cmap=cm) 86 | axs[bplot].set_ylim(0, 360) 87 | axs[bplot].set_xlim(t[0], t[-1]) 88 | sc.set_clim(cax) 89 | axs[bplot].set_ylabel('Back-azimuth\n [deg]') 90 | 91 | # Plot sigma_tau if given. 92 | if sigma_tau is not None: 93 | sc = axs[splot].scatter(t, sigma_tau, c=mdccm, 94 | edgecolors='k', lw=0.3, cmap=cm) 95 | axs[splot].set_xlim(t[0], t[-1]) 96 | sc.set_clim(cax) 97 | axs[splot].set_ylabel(r'$\sigma_\tau$') 98 | 99 | # Plot dropped station pairs from LTS if given. 100 | if stdict is not None: 101 | ndict = deepcopy(stdict) 102 | n = ndict['size'] 103 | ndict.pop('size', None) 104 | tstamps = list(ndict.keys()) 105 | tstampsfloat = [float(ii) for ii in tstamps] 106 | 107 | # Set the second colormap for station pairs. 108 | cm2 = plt.get_cmap('binary', (n-1)) 109 | initplot = np.empty(len(t)) 110 | initplot.fill(1) 111 | 112 | axs[splot].scatter(np.array([t[0], t[-1]]), 113 | np.array([0.01, 0.01]), c='w') 114 | axs[splot].axis('tight') 115 | axs[splot].set_ylabel('Element [#]') 116 | axs[splot].set_xlabel('UTC Time') 117 | axs[splot].set_xlim(t[0], t[-1]) 118 | axs[splot].set_ylim(0.5, n+0.5) 119 | axs[splot].xaxis_date() 120 | axs[splot].tick_params(axis='x', labelbottom='on') 121 | 122 | # Loop through the stdict for each flag and plot 123 | for jj in range(len(tstamps)): 124 | z = Counter(list(ndict[tstamps[jj]])) 125 | keys, vals = z.keys(), z.values() 126 | keys, vals = np.array(list(keys)), np.array(list(vals)) 127 | pts = np.tile(tstampsfloat[jj], len(keys)) 128 | sc2 = axs[splot].scatter(pts, keys, c=vals, edgecolors='k', 129 | lw=0.1, cmap=cm2, vmin=0.5, vmax=n-0.5) 130 | 131 | # Add the horizontal colorbar for station pairs. 132 | p3 = axs[splot].get_position().get_points().flatten() 133 | p3 = axs[splot].get_position() 134 | cbaxes2 = fig.add_axes([p3.x0, p3.y0-.08, p3.width, 0.02]) 135 | hc2 = plt.colorbar(sc2, orientation="horizontal", 136 | cax=cbaxes2, ax=axs[splot]) 137 | hc2.set_label('Number of Flagged Element Pairs') 138 | 139 | axs[splot].xaxis_date() 140 | axs[splot].set_xlabel('UTC Time') 141 | 142 | # Add the MdCCM colorbar. 143 | cbot = axs[splot].get_position().y0 144 | ctop = axs[1].get_position().y1 145 | cbaxes = fig.add_axes([0.92, cbot, 0.02, ctop-cbot]) 146 | hc = plt.colorbar(sc, cax=cbaxes) 147 | hc.set_label('MdCCM') 148 | 149 | return fig, axs 150 | 151 | 152 | def arraySigPlt(rij, sig, sigV, sigTh, impResp, vel, th, kvec, figName=None): 153 | r""" 154 | Plots output of 155 | :func:`~array_processing.tools.array_characterization.arraySig`. 156 | 157 | Args: 158 | rij: Coordinates (km) of sensors as eastings & northings in a 159 | ``(2, N)`` array 160 | sigLevel (float): Variance in time delays (s), typically 161 | :math:`\sigma_\tau` 162 | sigV: Uncertainties in trace velocity (°) as a function of trace 163 | velocity and back-azimuth as ``(NgridTh, NgridV)`` array 164 | sigTh: Uncertainties in trace velocity (km/s) as a function of trace 165 | velocity and back-azimuth as ``(NgridTh, NgridV)`` array 166 | impResp: Impulse response over grid as ``(NgridK, NgridK)`` array 167 | vel: Vector of trace velocities (km/s) for axis in ``(NgridV, )`` 168 | array 169 | th: Vector of back-azimuths (°) for axis in ``(NgridTh, )`` array 170 | kvec: Vector wavenumbers for axes in :math:`k`-space in ``(NgridK, )`` array 171 | figName (str): Name of output file, will be written as ``figName.png`` 172 | """ 173 | 174 | # Specify output figure file type and plotting resolution. 175 | figFormat = 'png' 176 | figDpi = 600 177 | 178 | # Plot array geometry in lower RHS. 179 | fig = plt.figure() 180 | axRij = plt.subplot(2, 2, 4) 181 | for h in range(rij.shape[1]): 182 | axRij.plot(rij[0, h], rij[1, h], 'bp') 183 | plt.xlabel('km') 184 | plt.ylabel('km') 185 | axRij.axis('square') 186 | axRij.grid() 187 | 188 | # Plot impulse reponse on upper RHS. 189 | axImp = plt.subplot(2, 2, 2) 190 | plt.pcolormesh(kvec, kvec, impResp) 191 | plt.ylabel('k$_y$ (km$^{-1}$)') 192 | plt.xlabel('k$_x$ (km$^{-1}$)') 193 | axImp.axis('square') 194 | 195 | # Plot theta uncertainty on upper LHS. 196 | plt.subplot(2, 2, 1) 197 | meshTh = plt.pcolormesh(th, vel, sigTh) 198 | plt.ylabel('vel. (km/s)') 199 | plt.xlabel(r'$\theta (^\circ)$') 200 | cbrTh = plt.colorbar(meshTh, ) 201 | cbrTh.set_label(r'$\delta\theta\;\;\sigma_\tau = $' + str(sig) + ' s') 202 | 203 | # Plot velocity uncertainty on lower LHS. 204 | plt.subplot(2, 2, 3) 205 | meshV = plt.pcolormesh(th, vel, sigV) 206 | plt.ylabel('vel. (km/s)') 207 | plt.xlabel(r'$\theta (\circ)$') 208 | cbrV = plt.colorbar(meshV, ) 209 | cbrV.set_label(r'$\delta v$') 210 | 211 | # Prepare output & display in iPython workspace. 212 | plt.tight_layout() # IGNORE renderer warning from script! It is fine. 213 | if figName: 214 | plt.savefig(figName + '.' + figFormat, format=figFormat, dpi=figDpi) 215 | 216 | return fig 217 | 218 | 219 | def arraySigContourPlt(sigV, sigTh, vel, th, trace_v): 220 | r""" 221 | Plots output of 222 | :func:`~array_processing.tools.array_characterization.arraySig` onto a 223 | polar plot for a specified trace velocity. 224 | 225 | Args: 226 | sigV: Uncertainties in trace velocity (°) as a function of trace 227 | velocity and back-azimuth as ``(NgridTh, NgridV)`` array 228 | sigTh: Uncertainties in trace velocity (km/s) as a function of trace 229 | velocity and back-azimuth as ``(NgridTh, NgridV)`` array 230 | vel: Vector of trace velocities (km/s) for axis in ``(NgridV, )`` array 231 | th: Vector of back-azimuths (°) for axis in ``(NgridTh, )`` array 232 | trace_v (float): Specified trace velocity (km/s) for uncertainty plot 233 | 234 | Returns: 235 | :class:`~matplotlib.figure.Figure`: Figure handle 236 | """ 237 | 238 | tvel_ptr = np.abs(vel - trace_v).argmin() 239 | sigV_cont = sigV[tvel_ptr, :] 240 | sigTh_cont = sigTh[tvel_ptr, :] 241 | theta = np.linspace(0, 2 * np.pi, len(sigV_cont)) 242 | 243 | fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, 244 | subplot_kw={'projection': 'polar'}) 245 | 246 | # Plot trace velocity uncertainty. 247 | ax1.set_theta_direction(-1) 248 | ax1.set_theta_offset(np.pi/2.0) 249 | ax1.plot(theta, sigV_cont, color='k', lw=1) 250 | ax1.set_rmax(sigV_cont.max()*1.1) 251 | ax1.yaxis.get_major_locator().base.set_params(nbins=6) 252 | ax1.set_rlabel_position(22.5) 253 | ax1.grid(True) 254 | ax1.set_title('Trace Velocity\n Uncertainty [km/s]\n v=%.2f km/s' % trace_v, 255 | va='bottom', pad=20) 256 | 257 | # Plot back-azimuth uncertainty. 258 | ax2.set_theta_direction(-1) 259 | ax2.set_theta_offset(np.pi/2.0) 260 | ax2.plot(theta, sigTh_cont, color='b', lw=1) 261 | ax2.set_rmax(sigTh_cont.max()*1.1) 262 | ax2.yaxis.get_major_locator().base.set_params(nbins=6) 263 | ax2.set_rlabel_position(22.5) 264 | ax2.grid(True) 265 | ax2.set_title('Back-Azimuth\n Uncertainty [$^\circ$]\n v=%.2f km/s' % trace_v, 266 | va='bottom', pad=20) 267 | 268 | # Adjust subplot spacing to prevent overlap. 269 | fig.subplots_adjust(wspace=0.4) 270 | 271 | return fig 272 | -------------------------------------------------------------------------------- /array_processing/tools/array_characterization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import optimize 3 | from scipy.special import gammainc 4 | from fastkml import kml 5 | 6 | 7 | def arraySig(rij, kmax, sigLevel, p=0.9, velLims=(0.27, 0.36), NgridV=100, 8 | NgridTh=100, NgridK=100): 9 | r""" 10 | Estimate 2-D array uncertainties in trace velocity and back-azimuth, and 11 | calculate impulse response. 12 | 13 | Args: 14 | rij: Coordinates (km) of sensors as eastings & northings in a 15 | ``(2, N)`` array 16 | kmax (float): Impulse response will be calculated over the range 17 | [-`kmax`, `kmax`] in :math:`k`-space (1/km) 18 | sigLevel (float): Variance in time delays (s), typically 19 | :math:`\sigma_\tau` 20 | p (float): Confidence limit in uncertainty estimates 21 | velLims (tuple): Range of trace velocities (km/s) to estimate 22 | uncertainty over. A single value can be used, but the by default a 23 | range is used 24 | NgridV (int): Number of velocities to estimate uncertainties in range 25 | `velLims` 26 | NgridTh (int): Number of angles to estimate uncertainties in range 27 | :math:`[0^\circ, 360^\circ]` 28 | NgridK (int): Number of :math:`k`-space coordinates to calculate in 29 | each dimension 30 | 31 | Returns: 32 | tuple: Tuple containing: 33 | 34 | - **sigV** – Uncertainties in trace velocity (°) as a function of trace 35 | velocity and back-azimuth as ``(NgridTh, NgridV)`` array 36 | - **sigTh** – Uncertainties in trace velocity (km/s) as a function of 37 | trace velocity and back-azimuth as ``(NgridTh, NgridV)`` array 38 | - **impResp** – Impulse response over grid as ``(NgridK, NgridK)`` 39 | array 40 | - **vel** – Vector of trace velocities (km/s) for axis in 41 | ``(NgridV, )`` array 42 | - **th** – Vector of back azimuths (°) for axis in ``(NgridTh, )`` 43 | array 44 | - **kvec** – Vector wavenumbers for axes in :math:`k`-space in 45 | ``(NgridK, )`` array 46 | """ 47 | 48 | # calculate uncertainties 49 | # preliminaries 50 | dij = co_array(rij) 51 | th = np.linspace(0, 360 * (1 - 1 / NgridTh), NgridTh) / 180 * np.pi 52 | if len(velLims) == 1: 53 | vel = velLims 54 | else: 55 | vel = np.linspace(velLims[0], velLims[1], NgridV) 56 | Th, Vel = np.meshgrid(th, vel) 57 | S1 = np.sin(Th) / Vel 58 | S2 = np.cos(Th) / Vel 59 | sigTh = np.zeros(Th.shape) 60 | sigV = sigTh.copy() 61 | # single-pass calcs 62 | # calculate eigenvalues/vectors of design matrix (one-time shot) 63 | C = dij@dij.T 64 | cii, Ve = np.linalg.eig(C) 65 | thEigR = np.arctan2(Ve[1, 0], Ve[0, 0]) 66 | R = np.array([[np.cos(thEigR), np.sin(thEigR)], 67 | [-np.sin(thEigR), np.cos(thEigR)]]) 68 | # calculate chi2 for desired confidence level 69 | x2 = chi2(2, 1-p) 70 | sigS = sigLevel / np.sqrt(cii) 71 | # prep for loop 72 | a = np.sqrt(x2) * sigS[0] 73 | b = np.sqrt(x2) * sigS[1] 74 | N, M = Th.shape 75 | 76 | # froot loops 77 | for n in range(N): 78 | for m in range(M): 79 | # calculate elliptical extrema 80 | So = R @ [[S1[n, m]], [S2[n, m]]] 81 | eExtrm, eVec = rthEllipse(a, b, So[0][0], So[1][0]) 82 | # rotate & recalculate 83 | eVec = eVec @ R 84 | # fix up angle calcs 85 | sigTh[n, m] = np.abs(np.diff( 86 | (np.arctan2(eVec[2:, 1], eVec[2:, 0]) * 180 / np.pi - 360) 87 | % 360)) 88 | if sigTh[n, m] > 180: 89 | sigTh[n, m] = np.abs(sigTh[n, m] - 360) 90 | sigV[n, m] = np.abs(np.diff(1 / eExtrm[:2])) 91 | 92 | # prepare impulse response 93 | impResp, kvec = impulseResp(dij, kmax, NgridK) 94 | 95 | return sigV, sigTh, impResp, vel, th / np.pi * 180, kvec 96 | 97 | 98 | def impulseResp(dij, kmax, NgridK): 99 | r""" 100 | Calculate impulse response of a 2-D array. 101 | 102 | Args: 103 | dij: Coordinates of co-array of ``N`` sensors in a ``(2, (N*N-1)/2)`` 104 | array 105 | kmax (float): Impulse response will be calculated over the range 106 | [-`kmax`, `kmax`] in :math:`k`-space 107 | NgridK (int): Number of :math:`k`-space coordinates to calculate in 108 | each dimension 109 | 110 | Returns: 111 | tuple: Tuple containing: 112 | 113 | - **d** – Impulse response over grid as ``(NgridK, NgridK)`` array 114 | - **kvec** - Vector wavenumbers for axes in :math:`k`-space in 115 | ``(NgridK, )`` array 116 | """ 117 | 118 | # pre-allocate grid for :math:`k`-space 119 | kvec = np.linspace(-kmax, kmax, NgridK) 120 | Kx, Ky = np.meshgrid(kvec, kvec) 121 | N = dij.shape[1] 122 | K = np.vstack((Ky.flatten(), Kx.flatten())).T 123 | d = 2 * np.cos(K @ dij) 124 | # last term adds in fact that cos(0)==1 for ignored self-delay terms 125 | d = np.reshape(np.sum(d, axis=1), (NgridK, NgridK)) 126 | + (1 + np.sqrt(1 + 8 * N)) / 2 127 | 128 | return d, kvec 129 | 130 | 131 | def rthEllipse(a, b, x0, y0): 132 | r""" 133 | Calculate angles subtending, and extremal distances to, a 134 | coordinate-aligned ellipse from the origin. 135 | 136 | Args: 137 | a (float): Semi-major axis of ellipse 138 | b (float): Semi-minor axis of ellipse 139 | x0 (float): Horizontal center of ellipse 140 | y0 (float): Vertical center of ellipse 141 | 142 | Returns: 143 | tuple: Tuple containing: 144 | 145 | - **eExtrm** – Extremal parameters in ``(4, )`` array as 146 | 147 | .. code-block:: none 148 | 149 | [min distance, max distance, min angle (degrees), max angle (degrees)] 150 | 151 | - **eVec** – Coordinates of extremal points on ellipse in ``(4, 2)`` 152 | array as 153 | 154 | .. code-block:: none 155 | 156 | [[x min dist., y min dist.], 157 | [x max dist., y max dist.], 158 | [x max angle tangency, y max angle tangency], 159 | [x min angle tangency, y min angle tangency]] 160 | """ 161 | 162 | # set constants 163 | A = 2/a**2 164 | B = 2*x0/a**2 165 | C = 2/b**2 166 | D = 2*y0/b**2 167 | E = (B*x0+D*y0)/2-1 168 | F = C-A 169 | G = A/2 170 | H = C/2 171 | eExtrm = np.zeros((4,)) 172 | eVec = np.zeros((4, 2)) 173 | eps = np.finfo(np.float64).eps 174 | 175 | # some tolerances for numerical errors 176 | circTol = 1e8 # is it circular to better than circTol*eps? 177 | zeroTol = 1e4 # is center along a coord. axis to better than zeroTol*eps? 178 | magTol = 1e-5 # is a sol'n within ellipse*(1+magTol) (maginification) 179 | 180 | # pursue circular or elliptical solutions 181 | if np.abs(F) <= circTol * eps: 182 | # circle 183 | cent = np.sqrt(x0 ** 2 + y0 ** 2) 184 | eExtrm[0:2] = cent + np.array([-a, a]) 185 | eVec[0:2, :] = np.array([ 186 | [x0-a*x0/cent, y0-a*y0/cent], 187 | [x0+a*x0/cent, y0+a*y0/cent]]) 188 | else: 189 | # ellipse 190 | # check for trivial distance sol'n 191 | if np.abs(y0) < zeroTol * eps: 192 | eExtrm[0:2] = x0 + np.array([-a, a]) 193 | eVec[0:2, :] = np.vstack((eExtrm[0:2], [0, 0])).T 194 | elif np.abs(x0) < zeroTol * eps: 195 | eExtrm[0:2] = y0 + np.array([-b, b]) 196 | eVec[0:2, :] = np.vstack(([0, 0], eExtrm[0:2])).T 197 | else: 198 | # use dual solutions of quartics to find best, real-valued results 199 | # solve quartic for y 200 | fy = F**2*H 201 | y = quarticEqn(-D*F*(2*H+F)/fy, 202 | (B**2*(G+F)+E*F**2+D**2*(H+2*F))/fy, 203 | -D*(B**2+2*E*F+D**2)/fy, (D**2*E)/fy) 204 | y = np.array([y[i] for i in list(np.where(y == np.real(y))[0])]) 205 | xy = B*y / (D-F*y) 206 | # solve quartic for x 207 | fx = F**2*G 208 | x = quarticEqn(B*F*(2*G-F)/fx, (B**2*(G-2*F)+E*F**2+D**2*(H-F))/fx, 209 | B*(2*E*F-B**2-D**2)/fx, (B**2*E)/fx) 210 | x = np.array([x[i] for i in list(np.where(x == np.real(x))[0])]) 211 | yx = D*x / (F*x+B) 212 | # combine both approaches 213 | distE = np.hstack( 214 | (np.sqrt(x ** 2 + yx ** 2), np.sqrt(xy ** 2 + y ** 2))) 215 | # trap real, but bogus sol's (esp. near Th = 180) 216 | distEidx = np.where( 217 | (distE <= np.sqrt(x0 ** 2 + y0 ** 2) 218 | + np.max([a, b]) * (1 + magTol)) 219 | & (distE >= np.sqrt(x0 ** 2 + y0 ** 2) 220 | - np.max([a, b]) * (1 + magTol))) 221 | coords = np.hstack(((x, yx), (xy, y))).T 222 | coords = coords[distEidx, :][0] 223 | distE = distE[distEidx] 224 | eExtrm[0:2] = [distE.min(), distE.max()] 225 | eVec[0:2, :] = np.vstack( 226 | (coords[np.where(distE == distE.min()), :][0][0], 227 | coords[np.where(distE == distE.max()), :][0][0])) 228 | # angles subtended 229 | if x0 < 0: 230 | x0 = -x0 231 | y = -np.array(quadraticEqn(D ** 2 + B ** 2 * H / G, 4 * D * E, 232 | 4 * E ** 2 - B ** 2 * E / G)) 233 | x = -np.sqrt(E / G - H / G * y ** 2) 234 | else: 235 | y = -np.array(quadraticEqn(D ** 2 + B ** 2 * H / G, 4 * D * E, 236 | 4 * E ** 2 - B ** 2 * E / G)) 237 | x = np.sqrt(E / G - H / G * y ** 2) 238 | eVec[2:, :] = np.vstack((x, y)).T 239 | # various quadrant fixes 240 | if x0 == 0 or np.abs(x0) - a < 0: 241 | eVec[2, 0] = -eVec[2, 0] 242 | eExtrm[2:] = np.sort(np.arctan2(eVec[2:, 1], eVec[2:, 0]) / np.pi * 180) 243 | 244 | return eExtrm, eVec 245 | 246 | 247 | def co_array(rij): 248 | r""" 249 | Form co-array coordinates for given array coordinates. 250 | 251 | Args: 252 | rij: ``(d, n)`` array; ``n`` sensor coordinates as [northing, easting, 253 | {elevation}] column vectors in ``d`` dimensions 254 | 255 | Returns: 256 | ``(d, n(n-1)//2)`` co-array, coordinates of the sensor pairing 257 | separations 258 | """ 259 | 260 | idx = [(i, j) for i in range(rij.shape[1]-1) 261 | for j in range(i+1, rij.shape[1])] 262 | 263 | return rij[:, [i[0] for i in idx]] - rij[:, [j[1] for j in idx]] 264 | 265 | 266 | def chi2(nu, alpha, funcTol=1e-10): 267 | r""" 268 | Calculate value of a :math:`\chi^2` such that a :math:`\nu`-dimensional 269 | confidence ellipsoid encloses a fraction :math:`1 - \alpha` of normally 270 | distributed variable. 271 | 272 | Args: 273 | nu (int): Degrees of freedom (typically embedding dimension of 274 | variable) 275 | alpha (float): Confidence interval such that :math:`\alpha \in [0, 1]` 276 | funcTol (float): Optimization function evaluation tolerance for 277 | :math:`\nu \ne 2` 278 | 279 | Returns: 280 | float: Value of a :math:`\chi^2` enclosing :math:`1 - \alpha` 281 | confidence region 282 | """ 283 | 284 | if nu == 2: 285 | # this shorthand owing to Ken Arnoult 286 | return -2 * np.log(alpha) 287 | else: 288 | # but just in case we end up with a nu != 2 situation 289 | gammaTest = lambda X2test: np.abs(gammainc(nu / 2, 290 | X2test / 2) - (1-alpha)) 291 | return optimize.fmin(func=gammaTest, x0=1, ftol=funcTol, disp=False) 292 | 293 | 294 | def cubicEqn(a, b, c): 295 | r""" 296 | Roots of cubic equation in the form :math:`x^3 + ax^2 + bx + c = 0`. 297 | 298 | Args: 299 | a (int or float): Scalar coefficient of cubic equation, can be 300 | complex 301 | b (int or float): Same as above 302 | c (int or float): Same as above 303 | 304 | Returns: 305 | list: Roots of cubic equation in standard form 306 | 307 | See Also: 308 | :func:`numpy.roots` — Generic polynomial root finder 309 | 310 | Notes: 311 | Relatively stable solutions, with some tweaks by Dr. Z, 312 | per algorithm of Numerical Recipes 2nd ed., :math:`\S` 5.6. Even 313 | :func:`numpy.roots` can have some (minor) issues; e.g., 314 | :math:`x^3 - 5x^2 + 8x - 4 = 0`. 315 | """ 316 | 317 | Q = a*a/9 - b/3 318 | R = (3*c - a*b)/6 + a*a*a/27 319 | Q3 = Q*Q*Q 320 | R2 = R*R 321 | ao3 = a/3 322 | 323 | # Q & R are real 324 | if np.isreal([a, b, c]).all(): 325 | # 3 real roots 326 | if R2 < Q3: 327 | sqQ = -2 * np.sqrt(Q) 328 | theta = np.arccos(R / np.sqrt(Q3)) 329 | # This solution first published in 1615 by Viète! 330 | x = [sqQ * np.cos(theta / 3) - ao3, 331 | sqQ * np.cos((theta + 2 * np.pi) / 3) - ao3, 332 | sqQ * np.cos((theta - 2 * np.pi) / 3) - ao3] 333 | # Q & R real, but 1 real, 2 complex roots 334 | else: 335 | # this is req'd since np.sign(0) = 0 336 | if R != 0: 337 | A = -np.sign(R) * (np.abs(R) + np.sqrt(R2 - Q3)) ** (1 / 3) 338 | else: 339 | A = -np.sqrt(-Q3) ** (1 / 3) 340 | if A == 0: 341 | B = 0 342 | else: 343 | B = Q/A 344 | # one real root & two conjugate complex ones 345 | x = [ 346 | (A+B) - ao3, 347 | -.5 * (A+B) + 1j * np.sqrt(3) / 2 * (A - B) - ao3, 348 | -.5 * (A+B) - 1j * np.sqrt(3) / 2 * (A - B) - ao3] 349 | # Q & R complex, so also 1 real, 2 complex roots 350 | else: 351 | sqR2mQ3 = np.sqrt(R2 - Q3) 352 | if np.real(np.conj(R) * sqR2mQ3) >= 0: 353 | A = -(R+sqR2mQ3)**(1/3) 354 | else: 355 | A = -(R-sqR2mQ3)**(1/3) 356 | if A == 0: 357 | B = 0 358 | else: 359 | B = Q/A 360 | # one real root & two conjugate complex ones 361 | x = [ 362 | (A+B) - ao3, 363 | -.5 * (A+B) + 1j * np.sqrt(3) / 2 * (A - B) - ao3, 364 | -.5 * (A+B) - 1j * np.sqrt(3) / 2 * (A - B) - ao3 365 | ] 366 | # parse real and/or int roots for tidy output 367 | for k in range(0, 3): 368 | if np.real(x[k]) == x[k]: 369 | x[k] = float(np.real(x[k])) 370 | if int(x[k]) == x[k]: 371 | x[k] = int(x[k]) 372 | return x 373 | 374 | 375 | def quadraticEqn(a, b, c): 376 | r""" 377 | Roots of quadratic equation in the form :math:`ax^2 + bx + c = 0`. 378 | 379 | Args: 380 | a (int or float): Scalar coefficient of quadratic equation, can be 381 | complex 382 | b (int or float): Same as above 383 | c (int or float): Same as above 384 | 385 | Returns: 386 | list: Roots of quadratic equation in standard form 387 | 388 | See Also: 389 | :func:`numpy.roots` — Generic polynomial root finder 390 | 391 | Notes: 392 | Stable solutions, even for :math:`b^2 >> ac` or complex coefficients, 393 | per algorithm of Numerical Recipes 2nd ed., :math:`\S` 5.6. 394 | """ 395 | 396 | # real coefficient branch 397 | if np.isreal([a, b, c]).all(): 398 | # note np.sqrt(-1) = nan, so force complex argument 399 | if b: 400 | # std. sub-branch 401 | q = -0.5*(b + np.sign(b) * np.sqrt(complex(b * b - 4 * a * c))) 402 | else: 403 | # b = 0 sub-branch 404 | q = -np.sqrt(complex(-a * c)) 405 | # complex coefficient branch 406 | else: 407 | if np.real(np.conj(b) * np.sqrt(b * b - 4 * a * c)) >= 0: 408 | q = -0.5*(b + np.sqrt(b * b - 4 * a * c)) 409 | else: 410 | q = -0.5*(b - np.sqrt(b * b - 4 * a * c)) 411 | # stable root solution 412 | x = [q/a, c/q] 413 | # parse real and/or int roots for tidy output 414 | for k in 0, 1: 415 | if np.real(x[k]) == x[k]: 416 | x[k] = float(np.real(x[k])) 417 | if int(x[k]) == x[k]: 418 | x[k] = int(x[k]) 419 | return x 420 | 421 | 422 | def quarticEqn(a, b, c, d): 423 | r""" 424 | Roots of quartic equation in the form :math:`x^4 + ax^3 + bx^2 + 425 | cx + d = 0`. 426 | 427 | Args: 428 | a (int or float): Scalar coefficient of quartic equation, can be 429 | complex 430 | b (int or float): Same as above 431 | c (int or float): Same as above 432 | d (int or float): Same as above 433 | 434 | Returns: 435 | list: Roots of quartic equation in standard form 436 | 437 | See Also: 438 | :func:`numpy.roots` — Generic polynomial root finder 439 | 440 | Notes: 441 | Stable solutions per algorithm of CRC Std. Mathematical Tables, 29th 442 | ed. 443 | """ 444 | 445 | # find *any* root of resolvent cubic 446 | a2 = a*a 447 | y = cubicEqn(-b, a*c - 4*d, (4*b - a2)*d - c*c) 448 | y = y[0] 449 | # find R 450 | R = np.sqrt(a2 / 4 - (1 + 0j) * b + y) # force complex in sqrt 451 | foo = 3*a2/4 - R*R - 2*b 452 | if R != 0: 453 | # R is already complex. 454 | D = np.sqrt(foo + (a * b - 2 * c - a2 * a / 4) / R) 455 | E = np.sqrt(foo - (a * b - 2 * c - a2 * a / 4) / R) # ... 456 | else: 457 | sqrtTerm = 2 * np.sqrt(y * y - (4 + 0j) * d) # force complex in sqrt 458 | D = np.sqrt(foo + sqrtTerm) 459 | E = np.sqrt(foo - sqrtTerm) 460 | x = [-a/4 + R/2 + D/2, 461 | -a/4 + R/2 - D/2, 462 | -a/4 - R/2 + E/2, 463 | -a/4 - R/2 - E/2] 464 | # parse real and/or int roots for tidy output 465 | for k in range(0, 4): 466 | if np.real(x[k]) == x[k]: 467 | x[k] = float(np.real(x[k])) 468 | if int(x[k]) == x[k]: 469 | x[k] = int(x[k]) 470 | 471 | return x 472 | 473 | 474 | def read_kml(kml_file): 475 | r"""Parse an array KML file into a list of element latitudes and longitudes. 476 | 477 | KML file must contain a single folder containing the array element points. 478 | 479 | Args: 480 | kml_file (str): Full path to input KML file (extension ``.kml``) 481 | 482 | Returns: 483 | tuple: ``(latlist, lonlist)`` for input to :func:`~array_processing.algorithms.helpers.getrij` 484 | """ 485 | 486 | # Read in KML file 487 | k = kml.KML() 488 | with open(kml_file, mode='rb') as f: 489 | k.from_string(f.read()) 490 | 491 | # Extract coordinates 492 | elements = list(list(list(k.features())[0].features())[0].features()) 493 | lonlist = [element.geometry.x for element in elements] 494 | latlist = [element.geometry.y for element in elements] 495 | 496 | return latlist, lonlist 497 | -------------------------------------------------------------------------------- /array_processing/tools/generic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.signal import convolve2d 3 | from functools import reduce 4 | from itertools import groupby 5 | from operator import itemgetter 6 | 7 | 8 | def array_thresh(mcthresh, az_volc, az_diff, mdccm, az, vel): 9 | r""" 10 | Find array processing values above multiple set thresholds for MCCM, 11 | back-azimuth, and trace velocity. Uses default 0.25–0.45 km/s for trace 12 | velocity thresholds. Also finds consecutive segments that meet thresholds, 13 | but these values are not currently returned. 14 | 15 | Args: 16 | mcthresh (float): MCCM or MdCCM threshold (0–1) 17 | az_volc (float): Back-azimuth to target volcano or source (0–359) 18 | az_diff (float): Tolerance for back-azimuth from `az_volc` 19 | mdccm: MdCCM or MCCM values from array processing (0–1) 20 | az: Back-azimuth values (0–359) 21 | vel: Trace-velocity values (km/s) 22 | 23 | Returns: 24 | Indices to time segments that meet set thresholds 25 | """ 26 | 27 | # Use numpy to find where thresholds are exceeded 28 | mc_good = np.where(mdccm > mcthresh)[0] 29 | az_good = np.where((az >= az_volc - az_diff) & (az <= az_volc + az_diff))[0] 30 | vel_good = np.where((vel >= 0.25) & (vel <= 0.45))[0] 31 | igood = reduce(np.intersect1d, (mc_good, az_good, vel_good)) 32 | 33 | # Find find number of consecutive values exceeded. 34 | ranges = [] 35 | nconsec = [] 36 | for k, g in groupby(enumerate(igood), lambda x: x[0] - x[1]): 37 | group = list(map(itemgetter(1), g)) 38 | ranges.append((group[0], group[-1])) 39 | nconsec.append(group[-1] - group[0] + 1) 40 | 41 | if len(nconsec) > 0: 42 | consecmax = max(nconsec) 43 | else: 44 | consecmax = 0 45 | print("%d above trheshold, %d consecutive\n" % (len(igood), consecmax)) 46 | 47 | return igood 48 | 49 | 50 | def beamForm( 51 | data, 52 | rij, 53 | Hz, 54 | azPhi, 55 | vel=0.340, 56 | r=None, 57 | wgt=None, 58 | refTrace=None, 59 | M=None, 60 | Moffset=None, 61 | minimizeRMS=False, 62 | ): 63 | r""" 64 | Form a "best beam" from the traces of an array. 65 | 66 | Args: 67 | data: ``(m, n)`` array; time series with ``m`` samples from ``n`` 68 | traces as columns 69 | rij: ``(d, n)`` array; ``n`` sensor coordinates as [easting, northing, 70 | {elevation}] column vectors in ``d`` dimensions 71 | Hz (int or float): Sample rate 72 | azPhi: Back azimuth (float) from co-array coordinate origin (° CW from 73 | N); back azimuth and elevation angle (list) from co-array 74 | coordinate origin (° CW from N, ° from N-E plane) 75 | vel (float): Estimated signal velocity across array 76 | r (float): Range to source from co-array origin. Default is `None` (use 77 | plane wave arrival model), If not `None`, use spherical wave 78 | arrival model 79 | wgt: Vector of relative weights of length ``n`` (0 == exclude trace). 80 | Default is `None` (use all traces with equal relative weights ``[1 81 | for i in range(nTraces)]``) 82 | refTrace (int): Reference sensor for TDOA information. Default is 83 | `None` (use first non-zero-weighted trace) 84 | M (int): Length of best beam result in samples. Default is `None` (use 85 | ``m`` samples, same as input `data`) 86 | Moffset: Individual trace offsets from arrival model shifts. Default is 87 | `None` (use ``[0 for i in range(nTraces)]``) 88 | 89 | Returns: 90 | ``(M, )`` array of summed and weighted shifted traces to form a best 91 | beam 92 | 93 | Raises: 94 | IndexError: If the input argument dimensions are not consistent 95 | 96 | Notes: 97 | This beamformer handles planar- or spherical-model arrivals from 98 | arbitrarily elevated sources incident on 2- or 3-D arrays. Weights are 99 | relative and normalized upon beam output. The default value for `vel` 100 | assumes that `rij` is in units of km (e.g., the speed is in km/s). 101 | """ 102 | 103 | # (c) 2017 Curt A. L. Szuberla 104 | # University of Alaska Fairbanks, all rights reserved 105 | # 106 | # size things up 107 | m, nTraces = data.shape 108 | # -- default parsing & error checking ----------------------------------- 109 | # default weight is unity weight for all traces 110 | if wgt is None: 111 | wgt = [1 for i in range(nTraces)] 112 | else: 113 | if len(wgt) != nTraces: 114 | # catch dimension mismatch between tau & wgt 115 | raise IndexError("len(wgt) != " + str(nTraces)) 116 | wgt = np.array(wgt) # require array form here for later operations 117 | # default refTrace is first non-zero wgt 118 | if refTrace is None: 119 | refTrace = np.min(np.where(wgt != 0)) # requires array wgt 120 | # default Moffset is zero for all traces 121 | if Moffset is None: 122 | Moffset = [0 for i in range(nTraces)] 123 | else: 124 | if len(Moffset) != nTraces: 125 | # catch dimension mismatch between tau & Moffset 126 | raise IndexError("len(Moffset) != " + str(nTraces)) 127 | # -- end default parsing & error checking ------------------------------- 128 | # planar (far-field) or spherical (near-field) arrival? 129 | if r is None: 130 | tau = tauCalcPW(vel, azPhi, rij) 131 | else: 132 | 133 | # need to unpack & repack azPhi with care 134 | if np.isscalar(azPhi): 135 | tau = tauCalcSW(vel, [r, azPhi], rij) 136 | else: 137 | tau = tauCalcSW(vel, [r, azPhi[0], azPhi[1]], rij) 138 | # calculate shifts as samples 139 | beam_delays = phaseAlignIdx(tau, Hz, wgt, refTrace) 140 | # apply shifts, resulting in a zero-padded array 141 | beamMatrix = phaseAlignData(data, beam_delays, wgt, refTrace, M, Moffset) 142 | # if minimizing RMS between beam and constituent traces 143 | if minimizeRMS: 144 | # return beam with traces shifted to minimize RMS error 145 | return alignTracesMinRMS(beamMatrix, wgt) 146 | # linear algrebra to perform sum & then normalize by weights 147 | return beamMatrix @ wgt / wgt.sum() 148 | 149 | 150 | def phaseAlignData(data, delays, wgt, refTrace, M, Moffset, plotFlag=False): 151 | r""" 152 | Embeds ``n`` phase aligned traces in a data matrix. 153 | 154 | Args: 155 | data: ``(m, n)`` array; time series with ``m`` samples from ``n`` 156 | traces as columns 157 | delays: ``(n, )`` array; vector of shifts as indices for embedding 158 | traces in an array, such that trace ``i`` will begin at index 159 | ``out[i]`` 160 | wgt: Vector of relative weights of length ``n`` (0 == exclude trace by 161 | setting to padding value, see `plotFlag`) 162 | refTrace (int): Reference sensor for TDOA information 163 | M (int): Length of best beam result in samples (use ``m`` to let beam 164 | be same length as input traces) 165 | Moffset: Individual trace offsets from arrival model shifts (use ``[0 166 | for i in range(nTraces)]`` to skip this effect) 167 | plotFlag (bool): Flag to indicate output array will be used for 168 | plotting purposes. Default is `False` (pads shifts with zeros; pads 169 | with :data:`numpy.nan` if `True`) 170 | 171 | Returns: 172 | ``(M, n)`` array of shifted traces as columns 173 | 174 | Notes: 175 | The output of :func:`phaseAlignIdx` is used to calculate the input 176 | `delays`. 177 | """ 178 | 179 | # (c) 2017 Curt A. L. Szuberla 180 | # University of Alaska Fairbanks, all rights reserved 181 | # 182 | # -- this is low level code w/out error checks or defaults, designed 183 | # -- to be called by wrappers that make use of the indices provided 184 | # size up data 185 | m, nTraces = data.shape 186 | # if plotting, embed in array of np.nan 187 | if plotFlag: 188 | nanOrOne = np.nan 189 | else: 190 | nanOrOne = 1 191 | # correct for negative Moffset elements 192 | # subtract this to ensure corrected delays is positive, 193 | # semi-definite and has (at least) one zero element 194 | maxNegMoffset = min(np.array(Moffset)[np.array(Moffset) <= 0]) 195 | # apply Moffset & correction for negative elements of Moffset 196 | delays = delays + Moffset - maxNegMoffset 197 | # start with everything in the window as a default (trim||pad later) 198 | data_align = np.zeros((max(delays) + m, nTraces)) * nanOrOne 199 | # embed shifted traces in array 200 | for k in range(nTraces): 201 | if wgt[k]: 202 | data_align[delays[k] : delays[k] + m, k] = data[:, k] * wgt[k] 203 | # truncate|| pad data_align if M >< m, centered on refTrace 204 | mp = data_align.shape[0] # new value for m 205 | if M is not None and M is not mp: 206 | alignBounds = [ 207 | delays[refTrace] + m // 2 - M // 2, 208 | delays[refTrace] + m // 2 + M // 2, 209 | ] 210 | # trap round-off errors and force (M, nTraces) data_align 211 | if alignBounds[1] - alignBounds[0] != M: 212 | alignBounds[1] += 1 213 | if not (alignBounds[1] - alignBounds[0]) % 2: 214 | alignBounds[0] -= 1 215 | # -- LHS (graphically, but actually topside in array-land!) 216 | if alignBounds[0] < 0: 217 | # pad LHS of traces w zeros or np.nans 218 | data_align = np.vstack( 219 | (np.zeros((-alignBounds[0], nTraces)) * nanOrOne, data_align) 220 | ) 221 | elif alignBounds[0] > 0: 222 | data_align = data_align[alignBounds[0] :] 223 | # -- RHS (graphically, but actually bottom in array-land!) 224 | if alignBounds[1] > mp: 225 | # pad RHS of traces w zeros or np.nans 226 | 227 | data_align = np.vstack( 228 | (data_align, np.zeros((alignBounds[1] - mp, nTraces)) * nanOrOne) 229 | ) 230 | elif alignBounds[1] < mp: 231 | data_align = data_align[:M] 232 | return data_align 233 | 234 | 235 | def phaseAlignIdx(tau, Hz, wgt, refTrace): 236 | r""" 237 | Calculate shifts required to phase align ``n`` traces in a data matrix. 238 | 239 | Args: 240 | tau: ``(n(n-1)//2, )`` array; time delays of relative signal arrivals 241 | (TDOA) for all unique sensor pairings 242 | Hz (int or float): Sample rate 243 | wgt: Vector of relative weights of length ``n`` (0 = exclude trace) 244 | refTrace (int): Reference sensor for TDOA information 245 | 246 | Returns: 247 | ``(n, )`` array; vector of shifts as indices for embedding traces in an 248 | array, such that trace ``i`` will begin at index ``out[i]`` 249 | 250 | Notes: 251 | The output of this function is compatible with the inputs of 252 | :func:`phaseAlignData`. 253 | """ 254 | 255 | # -- this is low level code w/out error checks or defaults, designed 256 | # -- to be called by wrappers that make use of the indices provided 257 | # solve for number of traces from pairings in tau 258 | nTraces = int(1 + np.sqrt(1 + 8 * len(tau))) // 2 259 | # calculate delays (samples) relative to refTrace for each trace 260 | # -- first pass grabs delays starting with refTrace as i in ij 261 | delayIdx = ( 262 | nTraces * refTrace - refTrace * (refTrace + 1) // 2, 263 | nTraces * (refTrace + 1) - (refTrace + 1) * (refTrace + 2) // 2, 264 | ) 265 | delays = np.hstack((0, (tau[range(delayIdx[0], delayIdx[1])] * Hz))).astype(int) 266 | # the std. rij list comprehension for unique inter-trace pairs 267 | tau_ij = [(i, j) for i in range(nTraces) for j in range(i + 1, nTraces)] 268 | # -- second pass grabs delays with refTrace as j in ij 269 | preRefTau_idx = [k for k in range(len(tau)) if tau_ij[k][1] == refTrace] 270 | delays = np.hstack((-tau[preRefTau_idx] * Hz, delays)).astype(int) 271 | # re-shift delays such that the first sample of the trace requiring the 272 | # largest shift left relative to the refTrace (hence, largest positive, 273 | # semi-definite element of delays) has an index of zero; i.e., all traces 274 | # have a valid starting index for embedding into an array (no padding) 275 | return -delays + max(delays) 276 | 277 | 278 | def tauCalcPW(vel, azPhi, rij): 279 | r""" 280 | Calculates theoretical tau vector for a plane wave moving across an array 281 | of ``n`` elements. 282 | 283 | Args: 284 | vel (float): Signal velocity across array 285 | azPhi: Back azimuth (float) from co-array coordinate origin (° CW from 286 | N); back azimuth and elevation angle (array) from co-array 287 | coordinate origin (° CW from N, ° from N-E plane) 288 | rij: ``(d, n)`` array; ``n`` element coordinates as [easting, northing, 289 | {elevation}] column vectors in ``d`` dimensions 290 | 291 | Returns: 292 | ``(n(n-1)//2, )`` array; time delays of relative signal arrivals (TDOA) 293 | for all unique sensor pairings 294 | """ 295 | 296 | dim, nTraces = rij.shape 297 | if dim == 2: 298 | rij = np.vstack((rij, np.zeros((1, nTraces)))) 299 | idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] 300 | X = rij[:, [i[0] for i in idx]] - rij[:, [j[1] for j in idx]] 301 | if np.isscalar(azPhi): 302 | phi = 0 303 | az = azPhi 304 | else: 305 | phi = azPhi[1] / 180 * np.pi 306 | az = azPhi[0] 307 | az = np.pi * (-az / 180 + 0.5) 308 | s = np.array([np.cos(az), np.sin(az), np.sin(phi)]) 309 | s[:-1] *= np.cos(phi) 310 | 311 | return X.T @ (s / vel) 312 | 313 | 314 | def tauCalcSW(vel, rAzPhi, rij): 315 | r""" 316 | Calculates theoretical tau vector for a spherical wave moving across an 317 | array of ``n`` elements. 318 | 319 | Args: 320 | vel (float): Signal velocity across array 321 | rAzPhi: Range to source and back azimuth from co-array coordinate 322 | origin (° CW from N); range to source, back azimuth and elevation 323 | angle from co-array coordinate origin (° CW from N, ° from N-E 324 | plane) 325 | rij: ``(d, n)`` array; ``n`` element coordinates as [easting, northing, 326 | {elevation}] column vectors in ``d`` dimensions 327 | 328 | Returns: 329 | ``(n(n-1)//2, )`` array; time delays of relative signal arrivals (TDOA) 330 | for all unique sensor pairings 331 | """ 332 | 333 | dim, nTraces = rij.shape 334 | if dim == 2: 335 | rij = np.vstack((rij, np.zeros((1, nTraces)))) 336 | if len(rAzPhi) == 3: 337 | phi = rAzPhi[2] / 180 * np.pi 338 | else: 339 | phi = 0 340 | idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] 341 | # aw, this is so convolutedly elegant that it must be saved in a 342 | # comment for posterity!, but the line below it is "simpler" 343 | # az = -( (rAzPhi[1]/180*pi - 2*pi)%(2*pi) - pi/2 )%(2*pi) 344 | az = np.pi * (-rAzPhi[1] / 180 + 0.5) 345 | source = rAzPhi[0] * np.array([np.cos(az), np.sin(az), np.sin(phi)]) 346 | source[:-1] *= np.cos(phi) 347 | tau2sensor = ( 348 | np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, 2, axis=0) 349 | / vel 350 | ) 351 | 352 | return tau2sensor[[j[1] for j in idx]] - tau2sensor[[i[0] for i in idx]] 353 | 354 | 355 | def tauCalcSWxy(vel, xy, rij): 356 | r""" 357 | Calculates theoretical tau vector for a spherical wave moving across an 358 | array of ``n`` elements. 359 | 360 | Args: 361 | vel (float): Signal velocity across array 362 | xy: ``(d, )`` array; source location as 2-D [easting, northing] or 3-D 363 | [easting, northing, elevation] coordinates 364 | rij: ``(d, n)`` array; ``n`` element coordinates as [easting, northing, 365 | {elevation}] column vectors in ``d`` dimensions 366 | 367 | Returns: 368 | ``(n(n-1)//2, )`` array; time delays of relative signal arrivals (TDOA) 369 | for all unique sensor pairings 370 | """ 371 | 372 | dim, nTraces = len(rij), len(rij[0]) 373 | if dim == 2: 374 | rij = np.vstack((rij, [0] * nTraces)) 375 | else: 376 | rij = np.vstack((rij,)) 377 | if len(xy) == 2: 378 | xy0 = 0 379 | else: 380 | xy0 = [] 381 | source = np.hstack((xy, xy0)) 382 | idx = [(i, j) for i in range(rij.shape[1] - 1) for j in range(i + 1, rij.shape[1])] 383 | tau2sensor = ( 384 | np.linalg.norm(rij - np.tile(source, nTraces).reshape(nTraces, 3).T, 2, axis=0) 385 | / vel 386 | ) 387 | 388 | return tau2sensor[[j[1] for j in idx]] - tau2sensor[[i[0] for i in idx]] 389 | 390 | 391 | def randc(N, beta=0.0): 392 | r""" 393 | Colored noise generator. This function generates pseudo-random colored 394 | noise (power spectrum proportional to a power of frequency) via fast 395 | Fourier inversion of the appropriate amplitudes and complex phases. 396 | 397 | Args: 398 | N (int or tuple): Shape of output array 399 | beta (float): Spectrum of output will be proportional to ``f**(-beta)`` 400 | 401 | Returns: 402 | Colored noise sequences as columns with shape `N`, each normalized to 403 | zero-mean and unit-variance. Result is always real valued 404 | 405 | Notes: 406 | Spectrum of output will be :math:`\sim1/f^\beta`. White noise is the 407 | default (:math:`\beta = 0`); others are pink (:math:`\beta = 1`) or 408 | brown/surf (:math:`\beta = 2`). Since the output is zero-mean, the DC 409 | spectral component(s) will be identically zero. 410 | """ 411 | 412 | # catch scalar input & form tuple 413 | if type(N) is int: 414 | N = (N,) 415 | # ensure DC component of output will be zero (unless beta == 0, see below) 416 | if beta < 0: 417 | c0 = 0 418 | else: 419 | c0 = np.inf 420 | # catch the case of a 1D array in python, so dimensions act like a matrix 421 | if len(N) == 1: 422 | M = (N[0], 1) # use M[1] any time # of columns is called for 423 | else: 424 | M = N 425 | # phase array with size (# of unique complex Fourier components, 426 | # columns of original data) 427 | n = int(np.floor((N[0] - 1) / 2)) # works for odd/even cases 428 | cPhase = np.random.random_sample((n, M[1])) * 2 * np.pi 429 | # Nyquist placeholders 430 | if N[0] % 2: 431 | # odd case: Nyquist is 1/2 freq step between highest components 432 | # so it is empty 433 | cFiller = np.empty((0,)) 434 | pFiller = np.empty((0, M[1])) 435 | else: 436 | # even case: we have a Nyquist component 437 | cFiller = N[0] / 2 438 | pFiller = np.zeros((1, M[1])) 439 | # noise amplitudes are just indices (unit-offset!!) to be normalized 440 | # later, phases are arranged as Fourier conjugates 441 | r = np.hstack((c0, np.arange(1, n + 1), cFiller, np.arange(n, 0, -1))) 442 | phasor = np.exp( 443 | np.vstack((np.zeros((1, M[1])), 1j * cPhase, pFiller, -1j * np.flipud(cPhase))) 444 | ) 445 | # this is like my cols.m function in MATLAB 446 | r = np.tile(r, M[1]).reshape(M[1], N[0]).T ** (-beta / 2) 447 | # catch beta = 0 case here to ensure zero DC component 448 | if not beta: 449 | r[0] = 0 450 | # inverse transform to get time series as columns, ensuring real output 451 | X = r * phasor 452 | r = np.real(np.fft.ifft(X, axis=0) * X.shape[0]) 453 | 454 | # renormalize r such that mean = 0 & std = 1 (MATLAB dof default used) 455 | # and return it in its original shape (i.e., a 1D vector, if req'd) 456 | return r.reshape(N) / np.std(r, ddof=1) 457 | 458 | 459 | def psf(x, p=2.0, w=3, n=3.0, window=None): 460 | r""" 461 | Pure-state filter a data matrix. This function uses a generalized coherence 462 | estimator to enhance coherent channels in an ensemble of time series. 463 | 464 | Args: 465 | x: ``(m, d)`` array of real-valued time series data as columns 466 | p (float): Level of contrast in filter 467 | w (int): Width of smoothing window in frequency domain 468 | n (float): Number of times to smooth in frequency domain 469 | window: Type of smoothing window in frequency domain. Default is 470 | `None`, which results in a triangular window 471 | 472 | Returns: 473 | tuple: Tuple containing: 474 | 475 | - **x_psf** – ``(m, d)`` array; real-valued, pure state-filtered 476 | version of `x` 477 | - **P** – ``(m//2+1, )`` array; degree of polarization (generalized 478 | coherence estimate) in frequency components of `x` from DC to the 479 | Nyquist 480 | 481 | Notes: 482 | See any of Samson & Olson's early 1980s papers, or Szuberla's 1997 PhD 483 | thesis, for a full description of the underlying theory. The code 484 | implementation's defaults reflect historical values for the smoothing 485 | window — a more realistic `w` would be of order :math:`\sqrt{m}` 486 | combined with a smoother window, such as :func:`numpy.hanning`. Letting 487 | `n=3` is a reasonable choice for all window types to ensure confidence 488 | in the spectral estimates used to construct the filter. 489 | 490 | For :math:`m` samples of :math:`d` channels of data, a ``(d, d)`` 491 | spectral matrix :math:`\mathbf{S}[f]` can be formed at each of the 492 | ``m//2+1`` real frequency components from DC to the Nyquist. The 493 | generalized coherence among all of the :math:`d` channels at each 494 | frequency is estimated by 495 | 496 | .. math:: 497 | 498 | P[f] = \frac{d \left(\text{Tr}\,\mathbf{S}^2[f]\right) - 499 | \left(\text{Tr}\,\mathbf{S}[f]\right)^2} 500 | {\left(d-1\right)\left(\text{Tr}\,\mathbf{S}[f]\right)^2}, 501 | 502 | where :math:`\text{Tr}\,\mathbf{S}[f]` is the trace of the spectral 503 | matrix at frequency :math:`f`. The filter is constructed by applying 504 | the following multiplication in the frequency domain 505 | 506 | .. math:: 507 | 508 | \hat{\mathbf{X}}[f] = P[f]^p\mathbf{X}[f], 509 | 510 | where :math:`\mathbf{X}[f]` is the Fourier transform component of the 511 | all channels at frequency :math:`f` and :math:`p` is the level of 512 | contrast. The inverse Fourier transform of the matrix 513 | :math:`\hat{\mathbf{X}}` gives the filtered time series. 514 | 515 | The estimator :math:`\mathbf{P}[f] = 1`, identically, without smoothing 516 | in the spectral domain (a consequence of the variance in the raw 517 | Fourier components), but it is bound by 518 | :math:`\mathbf{P}[f]\in[0,1]` even withn smoothing, hence its 519 | utility as a multiplicative filter in the frequency domain. Similarly, 520 | this bound allows the contrast between channels to be enhanced based on 521 | their generalized coherence if :math:`p>1`. 522 | 523 | Data channels should be pre-processed to have unit-variance, since 524 | unlike the traditional two-channel magnitude squared coherence 525 | estimators, the generalized coherence estimate can be biased by 526 | relative amplitude variations among the channels. To mitigate the 527 | effects of smoothing complex values into the DC and Nyquist components, 528 | they are set to zero before computing the inverse transform of 529 | :math:`\hat{\mathbf{X}}`. 530 | """ 531 | 532 | # private functions up front 533 | def Ssmooth(S, w, n, window): 534 | # smooth special format of spectral matries as vectors 535 | for k in range(n): 536 | # f@#$ing MATLAB treats odd/even differently with mode='full' 537 | # but the behavior below now matches conv2 exactly 538 | S = convolve2d(S, window(w).reshape(-1, 1), mode="full")[ 539 | w // 2 : -w // 2 + 1, : 540 | ] 541 | return S 542 | 543 | def triang(N): 544 | # for historical reasons, the default window shape 545 | return np.bartlett(N + 2)[1:-1] 546 | 547 | # size up input data 548 | N, d = x.shape 549 | # Fourier transform of data matrix by time series columns, retain only 550 | # the diagonal & above (unique spectral components) 551 | Nx = x.shape 552 | X = np.fft.fft(x, axis=0) / Nx[0] 553 | X = X[: N // 2 + 1, :] 554 | # form spectral matrix stack in reduced vector form (**significant** 555 | # speed improvement due to memory problem swith 3D tensor format -- what 556 | # was too slow in 1995 is still too slow in 2017!) 557 | # -- pre-allocate stack & fill it in 558 | Sidx = [(i, j) for i in range(d) for j in range(i, d)] 559 | S = np.empty((X.shape[0], d * (d + 1) // 2), dtype=complex) 560 | for i in range(X.shape[0]): 561 | # at each freq w, S_w is outer product of raw Fourier transforms 562 | # of each column at that freq; select unique components to store 563 | S_w = np.outer(X[i, :], X[i, :].conj()) 564 | S[i, :] = S_w[[i[0] for i in Sidx], [j[1] for j in Sidx]] 565 | # smooth each column of S (i,e., in freq. domain) 566 | if not window: 567 | # use default window 568 | window = triang 569 | S = Ssmooth(S, w, n, window) 570 | # trace calculations (notation consistent w traceCalc.m in MATLAB), but 571 | # since results are positive, semi-definite, real -- return as such 572 | # -- diagonal elements 573 | didx = [i for i in range(len(Sidx)) if Sidx[i][0] == Sidx[i][1]] 574 | # -- traceS**2 of each flapjack (really a vector here) 575 | trS = sum(S[:, didx].real.T) ** 2 576 | # -- trace of each flapjack (ditto, vector), here we recognize that 577 | # trace(S@S.T) is just sum square magnitudes of all the 578 | # non-redundant components of S, doubling squares of the non-diagonal 579 | # elements 580 | S = (S * (S.conj()) * 2).real 581 | S[:, didx] /= 2 582 | trS2 = sum(S.T) 583 | # estimate samson-esque polarization estimate (if d==2, same as fowler) 584 | P = (d * trS2 - trS) / ((d - 1) * trS) 585 | # a litle trick here to handle odd/even number of samples and zero-out 586 | # both the DC & Nyquist (they're both complex-contaminated due to Ssmooth) 587 | P[0] = 0 588 | if N % 2: 589 | # odd case: Nyquist is 1/2 freq step between highest components 590 | fudgeIdx = 0 591 | else: 592 | # even case: we have a Nyquist component 593 | fudgeIdx = 1 594 | P[-1] = 0 595 | # apply P as contrast agent to frequency series 596 | X *= np.tile(P**p, d).reshape(X.shape[::-1]).T 597 | # inverse transform X and ensure real output 598 | XX = np.vstack( 599 | (X[list(range(N // 2 + 1))], X[list(range(N // 2 - fudgeIdx, 0, -1))].conj()) 600 | ) 601 | x_psf = np.real(np.fft.ifft(XX, axis=0) * XX.shape[0]) 602 | 603 | return x_psf, P 604 | 605 | 606 | def alignTracesMinRMS(beamMatrix, wgt, lagMag=10): 607 | r""" 608 | Align traces to minimize RMS error with the beam. This function aligns traces with the estimated beam to minimize the root mean square (RMS) error between the beam and each trace. The traces are shifted by a number of samples within a specified range to minimize the RMS error. The function returns the beam formed from the adjusted traces. 609 | 610 | Args: 611 | beamMatrix: ``(m, n)`` array; time series with ``m`` samples from ``n`` traces as columns 612 | wgt: Vector of relative weights of length ``n`` (0 == exclude trace) 613 | lagMag (int): Maximum lag value for trace adjustment 614 | 615 | Returns: 616 | ``(m, )`` array of summed and weighted shifted traces to form a best beam 617 | """ 618 | 619 | # calculate beam 620 | beam = beamMatrix @ wgt / wgt.sum() 621 | # compute RMS error between the beam and each trace 622 | rmsErrors = np.array( 623 | [ 624 | ( 625 | np.sqrt(np.nanmean((beam - beamMatrix[:, i]) ** 2)) 626 | if not np.isnan(beamMatrix[:, i]).any() 627 | else np.nan 628 | ) 629 | for i in range(beamMatrix.shape[1]) 630 | ] 631 | ) 632 | # sort the indices of traces based on RMS errors in ascending order (samllest error first) 633 | sortedIndices = np.argsort(rmsErrors) 634 | sortedBeamMatrix = beamMatrix[:, sortedIndices] 635 | # set lag values for adjusted RMS calculation 636 | lags = np.arange(-lagMag, lagMag + 1) 637 | bestLags = [] 638 | 639 | def shiftTrace(trace, lag): 640 | """Shift array without wrap-around, filling shifted-in positions with NaN""" 641 | results = np.empty_like(trace) 642 | if lag > 0: 643 | results[:lag] = np.nan 644 | results[lag:] = trace[:-lag] 645 | elif lag < 0: 646 | results[lag:] = np.nan 647 | results[:lag] = trace[-lag:] 648 | else: 649 | results = trace.copy() 650 | return results 651 | 652 | # adjust traces based on minimizing RMS error with the beam 653 | for i in range(sortedBeamMatrix.shape[1]): 654 | trace = sortedBeamMatrix[:, i] 655 | # compute rms errors at different lags by shifting the traces 656 | rmsErrorsLags = [] 657 | for lag in lags: 658 | shiftedTrace = shiftTrace(trace, lag) 659 | if np.isnan(shiftedTrace).all(): 660 | rmsError = np.nan 661 | else: 662 | rmsError = np.sqrt(np.nanmean((beam - shiftedTrace) ** 2)) 663 | rmsErrorsLags.append(rmsError) 664 | rmsErrorsLags = np.array(rmsErrorsLags) 665 | minRmsError = np.nanmin(rmsErrorsLags) 666 | bestLagValue = lags[np.nanargmin(rmsErrorsLags)] 667 | bestLags.append(bestLagValue) 668 | 669 | # shift traces based on best lag values 670 | numSamples = beamMatrix.shape[0] 671 | rmsShiftedBeamMatrix = np.empty((numSamples, sortedBeamMatrix.shape[1])) 672 | for i in range(sortedBeamMatrix.shape[1]): 673 | trace = sortedBeamMatrix[:, i] 674 | lag = bestLags[i] 675 | shiftedTrace = shiftTrace(trace, lag) 676 | rmsShiftedBeamMatrix[:, i] = shiftedTrace 677 | 678 | # calculate beam from adjusted traces 679 | beam = np.nanmean(rmsShiftedBeamMatrix, axis=1) 680 | 681 | return beam 682 | --------------------------------------------------------------------------------