├── pylops_gpu ├── avo │ ├── __init__.py │ └── poststack.py ├── optimization │ ├── __init__.py │ ├── cg.py │ └── leastsquares.py ├── utils │ ├── __init__.py │ ├── backend.py │ ├── torch2numpy.py │ ├── dottest.py │ └── complex.py ├── signalprocessing │ ├── __init__.py │ └── Convolve1D.py ├── basicoperators │ ├── __init__.py │ ├── Laplacian.py │ ├── SecondDerivative.py │ ├── VStack.py │ ├── FirstDerivative.py │ ├── Restriction.py │ ├── Identity.py │ ├── Diagonal.py │ └── MatrixMult.py ├── __init__.py └── TorchOperator.py ├── examples ├── README.txt ├── plot_matrixmult.py ├── plot_identity.py ├── plot_convolve.py ├── plot_fista.py ├── plot_diagonal.py ├── plot_tvreg.py └── plot_derivative.py ├── tutorials ├── README.txt ├── ad.py └── poststack.py ├── setup.cfg ├── docs ├── source │ ├── _static │ │ ├── g-pylops.png │ │ ├── g-pylops_b.png │ │ └── style.css │ ├── roadmap.rst │ ├── credits.rst │ ├── _templates │ │ ├── autosummary │ │ │ ├── base.rst │ │ │ ├── exception.rst │ │ │ ├── function.rst │ │ │ ├── class.rst │ │ │ └── module.rst │ │ ├── layout.html │ │ └── breadcrumbs.html │ ├── contributing.rst │ ├── changelog.rst │ ├── api │ │ ├── others.rst │ │ └── index.rst │ ├── installation.rst │ ├── index.rst │ └── conf.py └── Makefile ├── testdata └── avo │ └── poststack_model.npz ├── requirements.txt ├── readthedocs.yml ├── environment.yml ├── requirements-doc.txt ├── requirements-dev.txt ├── MANIFEST.in ├── CHANGELOG.md ├── environment-dev.yml ├── azure-pipelines.yml ├── .gitignore ├── .travis.yml ├── Makefile ├── pytests ├── test_combine.py ├── test_utils.py ├── test_torchoperator.py ├── test_diagonal.py ├── test_matrixmult.py ├── test_sparsity.py ├── test_linearoperator.py ├── test_identity.py ├── test_convolve.py ├── test_poststack.py └── test_derivative.py ├── setup.py ├── README.md └── LICENSE /pylops_gpu/avo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pylops_gpu/optimization/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | .. _general_examples: 2 | 3 | -------------------------------------------------------------------------------- /pylops_gpu/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dottest import dottest -------------------------------------------------------------------------------- /tutorials/README.txt: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | Tutorials 4 | --------- -------------------------------------------------------------------------------- /pylops_gpu/signalprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | from .Convolve1D import Convolve1D 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [tool:pytest] 5 | addopts = --verbose 6 | python_files = pytests/*.py -------------------------------------------------------------------------------- /docs/source/_static/g-pylops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyLops/pylops-gpu/master/docs/source/_static/g-pylops.png -------------------------------------------------------------------------------- /docs/source/_static/g-pylops_b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyLops/pylops-gpu/master/docs/source/_static/g-pylops_b.png -------------------------------------------------------------------------------- /testdata/avo/poststack_model.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyLops/pylops-gpu/master/testdata/avo/poststack_model.npz -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.15.0 2 | scipy>=1.4.0 3 | torch>=1.2.0 4 | torchvision 5 | pytorch_complex_tensor 6 | pylops<=1.13.0 -------------------------------------------------------------------------------- /docs/source/roadmap.rst: -------------------------------------------------------------------------------- 1 | .. _roadmap: 2 | 3 | .. role:: strike 4 | :class: strike 5 | 6 | Roadmap 7 | ======= 8 | 9 | Coming soon... -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | 3 | python: 4 | version: 3.6 5 | setup_py_install: true 6 | 7 | requirements_file: requirements-doc.txt -------------------------------------------------------------------------------- /docs/source/credits.rst: -------------------------------------------------------------------------------- 1 | .. _credits: 2 | 3 | Contributors 4 | ============ 5 | 6 | * `Matteo Ravasi `_, mrava87 7 | * `Francesco Picetti `_, fpicetti 8 | -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. auto{{ objtype }}:: {{ objname }} 6 | 7 | .. raw:: html 8 | 9 |
10 | -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/exception.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoexception:: {{ objname }} 6 | 7 | 8 | .. raw:: html 9 | 10 |
11 | 12 | -------------------------------------------------------------------------------- /docs/source/contributing.rst: -------------------------------------------------------------------------------- 1 | .. _contributing: 2 | 3 | Contributing 4 | ============ 5 | 6 | Contributions are welcome and greatly appreciated! 7 | 8 | Follow the instructions in our `main repository `_ -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: pylops-gpu 2 | channels: 3 | - defaults 4 | - conda-forge 5 | - pytorch 6 | dependencies: 7 | - python>=3.6.4 8 | - numpy>=1.15.0 9 | - scipy>=1.4.0 10 | - pytorch>=1.2.0 11 | - pylops<=1.13.0 12 | - pip: 13 | - pytorch-complex-tensor -------------------------------------------------------------------------------- /requirements-doc.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.15.0 2 | scipy>=1.4.0 3 | https://download.pytorch.org/whl/cpu/torch-1.4.0%2Bcpu-cp36-cp36m-linux_x86_64.whl 4 | pytorch_complex_tensor 5 | pylops<=1.13.0 6 | matplotlib 7 | Sphinx 8 | sphinx-rtd-theme 9 | sphinx-gallery 10 | numpydoc 11 | nbsphinx 12 | image -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.15.0 2 | scipy>=1.4.0 3 | torch>=1.2.0 4 | pytorch_complex_tensor 5 | pylops[advanced]<=1.13.0 6 | matplotlib 7 | ipython 8 | pytest 9 | pytest-runner 10 | setuptools_scm 11 | Sphinx 12 | sphinx-rtd-theme 13 | sphinx-gallery 14 | numpydoc 15 | nbsphinx 16 | image -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/function.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autofunction:: {{ objname }} 6 | 7 | .. include:: backreferences/{{ fullname }}.examples 8 | 9 | .. raw:: html 10 | 11 |
12 | 13 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/__init__.py: -------------------------------------------------------------------------------- 1 | from .MatrixMult import MatrixMult, aslinearoperator 2 | from .Diagonal import Diagonal 3 | from .Identity import Identity 4 | from .Restriction import Restriction 5 | from .VStack import VStack 6 | from .FirstDerivative import FirstDerivative 7 | from .SecondDerivative import SecondDerivative 8 | from .Laplacian import Laplacian 9 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude .* 2 | exclude environment.yml requirements.txt Makefile 3 | exclude environment-dev.yml requirements-dev.txt azure-pipelines.yml 4 | exclude environment-dev.yml requirements-doc.txt readthedocs.yml 5 | recursive-exclude docs * 6 | recursive-exclude examples * 7 | recursive-exclude pytests * 8 | recursive-exclude testdata * 9 | recursive-exclude tutorials * 10 | -------------------------------------------------------------------------------- /pylops_gpu/utils/backend.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def device(): 5 | r"""Automatically identify device to be used with PyTorch 6 | 7 | Returns 8 | ------- 9 | device : :obj:`str` 10 | Identified device, ``cpu`` or ``gpu`` 11 | 12 | """ 13 | if torch.cuda.device_count() > 0 and torch.cuda.is_available(): 14 | device = 'cuda' 15 | else: 16 | device = 'cpu' 17 | return device 18 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.0.1 2 | * Added ``pylops_gpu.optimization.sparsity.FISTA`` and 3 | ``pylops_gpu.optimization.sparsity.SplitBregman`` solvers 4 | * Modified ``pylops_gpu.TorchOperator`` to work with cupy arrays 5 | * Modified ``pylops_gpu.avo.poststack._PoststackLinearModelling`` to use 6 | the code written in pylops library whilst still dealing with torch arrays 7 | * Allowed passing numpy dtypes to operators (automatic conversion 8 | to torch types) 9 | 10 | # 0.0.0 11 | * First official release. 12 | -------------------------------------------------------------------------------- /environment-dev.yml: -------------------------------------------------------------------------------- 1 | name: pylops-gpu 2 | channels: 3 | - defaults 4 | - conda-forge 5 | - pytorch 6 | dependencies: 7 | - python>=3.6.4 8 | - numpy>=1.15.0 9 | - scipy>=1.4.0 10 | - pytorch>=1.2.0 11 | - torchvision 12 | - pylops<=1.13.0 13 | - pytest 14 | - Sphinx 15 | - numpydoc 16 | - pip: 17 | - spgl1 18 | - pytorch-complex-tensor 19 | - pytest-runner 20 | - setuptools_scm 21 | - sphinx-rtd-theme 22 | - sphinx-gallery 23 | - nbsphinx 24 | - image 25 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | # Azure pipeline for PyLops-gpu 2 | 3 | pool: 4 | vmImage: 'ubuntu-latest' 5 | 6 | steps: 7 | - task: UsePythonVersion@0 8 | inputs: 9 | versionSpec: '3.6' 10 | architecture: 'x64' 11 | 12 | - script: | 13 | python -m pip install --upgrade pip setuptools wheel django 14 | pip install -r requirements-dev.txt 15 | pip install pytest-azurepipelines pytest-cov 16 | pip install . 17 | displayName: 'Install dependencies and library' 18 | 19 | - script: | 20 | python -m pytest pytests/ 21 | pytest pytests/ --cov pylops_gpu --cov-report html 22 | condition: succeededOrFailed() 23 | displayName: 'Run tests' 24 | -------------------------------------------------------------------------------- /docs/source/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changlog: 2 | 3 | Changelog 4 | ========= 5 | 6 | 7 | Version 0.0.1 8 | ------------- 9 | 10 | *Released on: 03/05/2021* 11 | 12 | * Added :py:func:`pylops_gpu.optimization.sparsity.FISTA` and 13 | :py:func:`pylops_gpu.optimization.sparsity.SplitBregman` solvers 14 | * Modified :py:class:`pylops_gpu.TorchOperator` to work with cupy arrays 15 | * Modified :py:func:`pylops_gpu.avo.poststack._PoststackLinearModelling` to use 16 | the code written in pylops library whilst still dealing with torch arrays 17 | * Allowed passing numpy dtypes to operators (automatic conversion 18 | to torch types) 19 | 20 | Version 0.0.0 21 | ------------- 22 | 23 | *Released on: 12/01/2020* 24 | 25 | * First official release. 26 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # Disable numba 5 | # export NUMBA_DISABLE_JIT=1 6 | 7 | # You can set these variables from the command line. 8 | SPHINXOPTS = 9 | SPHINXBUILD = sphinx-build 10 | SPHINXPROJ = Pylops-gpu 11 | SOURCEDIR = source 12 | BUILDDIR = build 13 | 14 | # Put it first so that "make" without argument is like "make help". 15 | help: 16 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 17 | 18 | .PHONY: help Makefile 19 | 20 | # Catch-all target: route all unknown targets to Sphinx using the new 21 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 22 | %: Makefile 23 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # OS generated files # 2 | .*.swp 3 | *.py[cod] 4 | .DS_Store 5 | .DS_Store? 6 | ._* 7 | .Spotlight-V100 8 | .Trashes 9 | ehthumbs.db 10 | Thumbs.db 11 | .idea/* 12 | static/* 13 | db.sqlite3 14 | 15 | # Build # 16 | build 17 | dist 18 | pylops_gpu.egg-info/ 19 | 20 | # setuptools_scm generated # 21 | pylops_gpu/version.py 22 | 23 | # Development # 24 | .ipynb_checkpoints/ 25 | notebooks 26 | TODO 27 | 28 | # Documentation # 29 | docs/build 30 | docs/source/api/generated 31 | docs/source/gallery 32 | docs/source/tutorials 33 | 34 | # Pylint # 35 | pylint_plot.py 36 | pylintrc 37 | 38 | # Coverage reports 39 | COVERAGE 40 | .coverage 41 | coverage.xml 42 | htmlcov/ 43 | 44 | # Airspeed velocity benchmarks 45 | ASV 46 | .asv/ 47 | asv.conf.json 48 | benchmarks/ -------------------------------------------------------------------------------- /pylops_gpu/__init__.py: -------------------------------------------------------------------------------- 1 | from .LinearOperator import LinearOperator 2 | from .TorchOperator import TorchOperator 3 | from .basicoperators import MatrixMult, aslinearoperator 4 | from .basicoperators import Diagonal 5 | from .basicoperators import Identity 6 | from .basicoperators import Restriction 7 | from .basicoperators import VStack 8 | from .basicoperators import FirstDerivative 9 | from .basicoperators import SecondDerivative 10 | from .basicoperators import Laplacian 11 | 12 | from .optimization.cg import cg, cgls 13 | from .optimization.leastsquares import NormalEquationsInversion 14 | from .optimization.sparsity import FISTA, SplitBregman 15 | 16 | from .avo.poststack import PoststackLinearModelling 17 | 18 | from . import avo 19 | from . import basicoperators 20 | from . import signalprocessing 21 | from . import optimization 22 | from . import utils 23 | 24 | 25 | try: 26 | from .version import version as __version__ 27 | except ImportError: 28 | __version__ = '0.0.0' -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | 7 | .. rubric:: Methods 8 | 9 | .. autosummary:: 10 | {% for item in methods %} 11 | ~{{ objname }}.{{ item }} 12 | {%- endfor %} 13 | 14 | {# .. rubric:: Attributes#} 15 | {##} 16 | {# .. autosummary::#} 17 | {# {% for item in attributes %}#} 18 | {# ~{{ objname }}.{{ item }}#} 19 | {# {%- endfor %}#} 20 | 21 | .. raw:: html 22 | 23 |
24 | 25 | 26 | .. include:: backreferences/{{ fullname }}.examples 27 | 28 | .. raw:: html 29 | 30 |
31 | 32 | 33 | {#{% for item in methods %} #} 34 | {#{% if item != '__init__' %} #} 35 | {#.. automethod:: {{ objname }}.{{ item }} #} 36 | {#{% endif %} #} 37 | {#{% endfor %} #} 38 | 39 | {#.. raw:: html #} 40 | {# #} 41 | {#
#} 42 | 43 | -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 |
4 | 5 | ``{{ fullname }}`` 6 | {% for i in range(fullname|length + 15) %}-{% endfor %} 7 | 8 | .. raw:: html 9 | 10 |
11 | 12 | .. automodule:: {{ fullname }} 13 | 14 | {% block classes %} 15 | {% if classes %} 16 | .. rubric:: Classes 17 | 18 | .. autosummary:: 19 | :toctree: ./ 20 | {% for item in classes %} 21 | {{ fullname }}.{{ item }} 22 | {% endfor %} 23 | {% endif %} 24 | {% endblock %} 25 | 26 | 27 | {% block functions %} 28 | {% if functions %} 29 | .. rubric:: Functions 30 | 31 | .. autosummary:: 32 | :toctree: ./ 33 | {% for item in functions %} 34 | {{ fullname }}.{{ item }} 35 | {% endfor %} 36 | {% endif %} 37 | {% endblock %} 38 | 39 | 40 | {% block exceptions %} 41 | {% if exceptions %} 42 | .. rubric:: Exceptions 43 | 44 | .. autosummary:: 45 | :toctree: ./ 46 | {% for item in exceptions %} 47 | {{ fullname }}.{{ item }} 48 | {% endfor %} 49 | {% endif %} 50 | {% endblock %} 51 | 52 | .. raw:: html 53 | 54 |
55 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: python 3 | python: 4 | - '3.7' 5 | os: 6 | - linux 7 | script: 8 | - pip3 install -r requirements-dev.txt 9 | - pip3 install . 10 | - python3 setup.py test 11 | deploy: 12 | provider: pypi 13 | user: __token__ 14 | distributions: sdist bdist_wheel 15 | skip_existing: true 16 | skip_upload_docs: true 17 | skip_cleanup: true 18 | on: 19 | tags: true 20 | password: 21 | secure: Er4noQo2sMKhtvtheKvvKKOfj8uNEhpjhBRf54y0S5e3HNK1U38nMZuKT7GXRwCDjPXzWRF6oZdlAyi1SRB0hPGtT4Ucm31k6qAEzi9Ph43T9BNIMIuIcEIwY3X2D9g5ySHyfEYLgGNAYzxttiKIhMPOk7vafUMsFUSh5ldZbY/ykSpcB8DiLKw+Z6+AV9pM5YTFn1Djn4pfC88G7tzFySw+b8BL9d8/hLAWmw70Kczh20l86zIOFV/CaW6ph9irssx9nrxz7W3Kb6YFl/QaOh34mEC0ZKoiz6LMdNAGX0RI0iCwtPHdUlPi2qSXXHLvLgqPXKjoqc2bTI19n39EBoSnIveyTYP1wUj3jRLG5pqeFr/Bo2Ti8By8Hye8Iqqdx2PT5wR8bWLiy+M1FMkeJjMT/ZvUudy00gg+7J/xRutBWhRmk2bZt6aCBG0NwpAVoN9UqssoXcYwFmRcOpleGNHo6Wi/0Rg59oAlN0PI+SCWMOcW1veKbsOgSi6nXisffgnZEsFWfEVZB0sLGMqFtBLAmY56PPKbEKqJXDZZ2MRcHDiZB0YfczewCeKdlPiRIQlpLDPu/xaUCiEAnvFZS1EKkpZ9F8/gpJey3e4UPUL+PPmAyMNA/4yBM5xrSjKVl09wuyzqvyTktBGDjaMHvZw0kwnysrmsdP5p30Rf3y8= 22 | -------------------------------------------------------------------------------- /docs/source/api/others.rst: -------------------------------------------------------------------------------- 1 | .. _others: 2 | 3 | 4 | PyLops-GPU Utilities 5 | ==================== 6 | Alongside with its *Linear Operators* and *Solvers*, PyLops-GPU contains 7 | also a number of auxiliary routines. 8 | 9 | 10 | Shared 11 | ------ 12 | 13 | Backends 14 | ~~~~~~~~ 15 | 16 | .. currentmodule:: pylops_gpu.utils 17 | 18 | .. autosummary:: 19 | :toctree: generated/ 20 | 21 | backend.device 22 | 23 | 24 | Dot-test 25 | ~~~~~~~~ 26 | 27 | .. currentmodule:: pylops_gpu.utils 28 | 29 | .. autosummary:: 30 | :toctree: generated/ 31 | 32 | dottest 33 | 34 | 35 | Torch2Numpy 36 | ~~~~~~~~~~~ 37 | 38 | .. currentmodule:: pylops_gpu.utils 39 | 40 | .. autosummary:: 41 | :toctree: generated/ 42 | 43 | torch2numpy.numpytype_from_torchtype 44 | torch2numpy.torchtype_from_numpytype 45 | 46 | 47 | Complex Tensors 48 | ~~~~~~~~~~~~~~~ 49 | 50 | .. currentmodule:: pylops_gpu.utils 51 | 52 | .. autosummary:: 53 | :toctree: generated/ 54 | 55 | complex.complextorch_fromnumpy 56 | complex.complexnumpy_fromtorch 57 | complex.conj 58 | complex.divide 59 | complex.reshape 60 | complex.flatten 61 | 62 | -------------------------------------------------------------------------------- /docs/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {# Import the theme's layout. #} 2 | {% extends "!layout.html" %} 3 | 4 | {% block extrahead %} 5 | {# Include require.js so that we can use WorldWind #} 6 | 7 | {% endblock %} 8 | 9 | 10 | {% block htmltitle %} 11 | {% if title == '' or title == 'Home' %} 12 | {{ docstitle|e }} 13 | {% else %} 14 | {{ title|striptags|e }}{{ titlesuffix }} 15 | {% endif %} 16 | {% endblock %} 17 | 18 | 19 | {% block menu %} 20 | {{ super() }} 21 | 22 | {% if menu_links %} 23 |

24 | 25 | {% if menu_links_name %} 26 | {{ menu_links_name }} 27 | {% else %} 28 | External links 29 | {% endif %} 30 | 31 |

32 |
    33 | {% for text, link in menu_links %} 34 |
  • {{ text }}
  • 35 | {% endfor %} 36 |
37 | {% endif %} 38 | {% endblock %} 39 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PIP := $(shell command -v pip3 2> /dev/null || command which pip 2> /dev/null) 2 | PYTHON := $(shell command -v python3 2> /dev/null || command which python 2> /dev/null) 3 | 4 | .PHONY: install dev-install install_conda dev-install_conda tests doc docupdate 5 | 6 | pipcheck: 7 | ifndef PIP 8 | $(error "Ensure pip or pip3 are in your PATH") 9 | endif 10 | @echo Using pip: $(PIP) 11 | 12 | pythoncheck: 13 | ifndef PYTHON 14 | $(error "Ensure python or python3 are in your PATH") 15 | endif 16 | @echo Using python: $(PYTHON) 17 | 18 | install: 19 | make pipcheck 20 | $(PIP) install -r requirements.txt && $(PIP) install . 21 | 22 | dev-install: 23 | make pipcheck 24 | $(PIP) install -r requirements-dev.txt && $(PIP) install -e . 25 | 26 | install_conda: 27 | conda env create -f environment.yml && source activate pylops-gpu && pip install . 28 | 29 | dev-install_conda: 30 | conda env create -f environment-dev.yml && source activate pylops-gpu && pip install -e . 31 | 32 | tests: 33 | make pythoncheck 34 | $(PYTHON) setup.py test 35 | 36 | doc: 37 | cd docs && rm -rf source/api/generated && rm -rf source/gallery &&\ 38 | rm -rf source/tutorials && rm -rf build && make html && cd .. 39 | 40 | docupdate: 41 | cd docs && make html && cd .. 42 | -------------------------------------------------------------------------------- /pytests/test_combine.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | import numpy as np 4 | 5 | from numpy.testing import assert_array_almost_equal 6 | 7 | from pylops_gpu.utils import dottest 8 | from pylops_gpu import MatrixMult, VStack 9 | from pylops_gpu.optimization.cg import cg 10 | 11 | par1 = {'ny': 101, 'nx': 101, 'imag': 0} # square real 12 | par2 = {'ny': 301, 'nx': 101, 'imag': 0} # overdetermined real 13 | 14 | 15 | @pytest.mark.parametrize("par", [(par1), (par2)]) 16 | def test_VStack(par): 17 | """Dot-test and inversion for VStack operator 18 | """ 19 | np.random.seed(10) 20 | G1 = torch.from_numpy(np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32')) 21 | G2 = torch.from_numpy(np.random.normal(0, 10, (par['ny'], par['nx'])).astype('float32')) 22 | x = torch.ones(par['nx'], dtype=torch.float32) + \ 23 | par['imag']*torch.ones(par['nx'], dtype=torch.float32) 24 | 25 | Vop = VStack([MatrixMult(G1, dtype=torch.float32), 26 | MatrixMult(G2, dtype=torch.float32)], 27 | dtype=torch.float32) 28 | assert dottest(Vop, 2*par['ny'], par['nx'], 29 | complexflag=0 if par['imag'] == 0 else 3) 30 | 31 | xcg = cg(Vop.H * Vop, Vop.H * (Vop * x), niter=300)[0] 32 | assert_array_almost_equal(x.numpy(), xcg.numpy(), decimal=4) 33 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from distutils.core import setup 3 | from setuptools import find_packages 4 | 5 | def src(pth): 6 | return os.path.join(os.path.dirname(__file__), pth) 7 | 8 | # Project description 9 | descr = """ 10 | An extension to PyLops for linear operators on GPUs. 11 | """ 12 | 13 | # Setup 14 | setup( 15 | name='pylops_gpu', 16 | description=descr, 17 | long_description=open(src('README.md')).read(), 18 | long_description_content_type='text/markdown', 19 | keywords=['algebra', 20 | 'inverse problems', 21 | 'large-scale optimization'], 22 | classifiers=[ 23 | 'Development Status :: 1 - Planning', 24 | 'Intended Audience :: Developers', 25 | 'Intended Audience :: Science/Research', 26 | 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)', 27 | 'Natural Language :: English', 28 | 'Programming Language :: Python :: 3.6', 29 | 'Programming Language :: Python :: 3.7', 30 | 'Topic :: Scientific/Engineering :: Mathematics' 31 | ], 32 | author='mrava', 33 | author_email='matteoravasi@gmail.com', 34 | install_requires=['numpy >= 1.15.0', 35 | 'torch >= 1.2.0', 36 | 'pytorch_complex_tensor', 37 | 'pylops <= 1.13.0'], 38 | packages=find_packages(exclude=['pytests']), 39 | use_scm_version=dict(root = '.', 40 | relative_to = __file__, 41 | write_to = src('pylops_gpu/version.py')), 42 | setup_requires=['pytest-runner', 'setuptools_scm'], 43 | test_suite='pytests', 44 | tests_require=['pytest'], 45 | zip_safe=True) 46 | -------------------------------------------------------------------------------- /examples/plot_matrixmult.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Matrix Multiplication 3 | ===================== 4 | 5 | This example shows how to use the :py:class:`pylops_gpu.MatrixMult` operator 6 | to perform *Matrix inversion* of the following linear system. 7 | 8 | .. math:: 9 | \mathbf{y}= \mathbf{A} \mathbf{x} 10 | 11 | For square :math:`\mathbf{A}`, we will use the 12 | :py:func:`pylops_gpu.optimization.leastsquares.cg` solver. 13 | 14 | """ 15 | import torch 16 | import numpy as np 17 | import matplotlib.pyplot as plt 18 | import matplotlib.gridspec as pltgs 19 | import pylops_gpu 20 | 21 | from pylops_gpu.utils.backend import device 22 | from pylops_gpu.optimization.cg import cg 23 | 24 | torch.manual_seed(0) 25 | dev = device() 26 | print('PyLops-gpu working on %s...' % dev) 27 | plt.close('all') 28 | 29 | 30 | ############################################################################### 31 | # Let's define the size ``N`` of thesquare matrix :math:`\mathbf{A}` and 32 | # fill the matrix with random numbers 33 | N = 20 34 | A = torch.randn((N, N), dtype=torch.float32).to(dev) 35 | A = torch.matmul(A.t(), A) # need semi-definite positive matrix for cg 36 | Aop = pylops_gpu.MatrixMult(A, dtype=torch.float32) 37 | 38 | x = torch.ones(N, dtype=torch.float32).to(dev) 39 | 40 | ############################################################################### 41 | # We can now apply the forward operator to create the data vector :math:`\mathbf{y}` 42 | # and use ``/`` to solve the system by means of an explicit solver. 43 | # If you prefer to customize the solver (e.g., choosing the number of 44 | # iterations) use the method ``div`` instead. 45 | y = Aop * x 46 | xest = Aop / y 47 | xest = Aop.div(y, niter=2*N) 48 | 49 | print('x', x) 50 | print('xest', xest) -------------------------------------------------------------------------------- /docs/source/api/index.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | PyLops-GPU API 4 | ============== 5 | 6 | 7 | Linear operators 8 | ---------------- 9 | 10 | Templates 11 | ~~~~~~~~~ 12 | .. automodule:: pylops_gpu 13 | 14 | .. currentmodule:: pylops_gpu 15 | 16 | .. autosummary:: 17 | :toctree: generated/ 18 | 19 | LinearOperator 20 | TorchOperator 21 | 22 | 23 | Basic operators 24 | ~~~~~~~~~~~~~~~ 25 | 26 | .. currentmodule:: pylops_gpu 27 | 28 | .. autosummary:: 29 | :toctree: generated/ 30 | 31 | MatrixMult 32 | Identity 33 | Diagonal 34 | VStack 35 | 36 | 37 | Smoothing and derivatives 38 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 39 | 40 | .. autosummary:: 41 | :toctree: generated/ 42 | 43 | FirstDerivative 44 | SecondDerivative 45 | Laplacian 46 | 47 | 48 | Signal processing 49 | ~~~~~~~~~~~~~~~~~ 50 | 51 | .. currentmodule:: pylops_gpu.signalprocessing 52 | 53 | .. autosummary:: 54 | :toctree: generated/ 55 | 56 | Convolve1D 57 | 58 | 59 | Solvers 60 | ------- 61 | 62 | Low-level solvers 63 | ~~~~~~~~~~~~~~~~~ 64 | 65 | .. currentmodule:: pylops_gpu.optimization.cg 66 | 67 | .. autosummary:: 68 | :toctree: generated/ 69 | 70 | cg 71 | cgls 72 | 73 | Least-squares 74 | ~~~~~~~~~~~~~ 75 | 76 | .. currentmodule:: pylops_gpu.optimization 77 | 78 | .. autosummary:: 79 | :toctree: generated/ 80 | 81 | leastsquares.NormalEquationsInversion 82 | 83 | Sparsity 84 | ~~~~~~~~ 85 | 86 | .. autosummary:: 87 | :toctree: generated/ 88 | 89 | sparsity.FISTA 90 | sparsity.SplitBregman 91 | 92 | 93 | Applications 94 | ------------ 95 | 96 | Geophysical subsurface characterization 97 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 98 | 99 | .. currentmodule:: pylops.avo 100 | 101 | .. autosummary:: 102 | :toctree: generated/ 103 | 104 | poststack.PoststackInversion -------------------------------------------------------------------------------- /pylops_gpu/utils/torch2numpy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | 5 | 6 | def numpytype_from_strtype(strtype): 7 | """Convert str into equivalent numpy type 8 | 9 | Parameters 10 | ---------- 11 | strtype : :obj:`str` 12 | String type 13 | 14 | Returns 15 | ------- 16 | numpytype : :obj:`numpy.dtype` 17 | Numpy equivalent type 18 | 19 | """ 20 | numpytype = np.dtype(strtype) 21 | return numpytype 22 | 23 | 24 | def numpytype_from_torchtype(torchtype): 25 | """Convert torch type into equivalent numpy type 26 | 27 | Parameters 28 | ---------- 29 | torchtype : :obj:`torch.dtype` 30 | Torch type 31 | 32 | Returns 33 | ------- 34 | numpytype : :obj:`numpy.dtype` 35 | Numpy equivalent type 36 | 37 | """ 38 | if isinstance(torchtype, torch.dtype): 39 | numpytype = torch.scalar_tensor(1, dtype=torchtype).numpy().dtype 40 | else: 41 | # in case it is already a numpy dtype 42 | numpytype = torchtype 43 | return numpytype 44 | 45 | 46 | def torchtype_from_numpytype(numpytype): 47 | """Convert torch type into equivalent numpy type 48 | 49 | Parameters 50 | ---------- 51 | numpytype : :obj:`numpy.dtype` 52 | Numpy type 53 | 54 | Returns 55 | ------- 56 | torchtype : :obj:`torch.dtype` 57 | Torch equivalent type 58 | 59 | Notes 60 | ----- 61 | Given limitations of torch to handle complex numbers, complex numpy types 62 | are casted into equivalent real types and the equivalent torch type is 63 | returned. 64 | 65 | """ 66 | if isinstance(numpytype, torch.dtype): 67 | # in case it is already a torch dtype 68 | torchtype = numpytype 69 | else: 70 | torchtype = \ 71 | torch.from_numpy(np.real(np.ones(1, dtype=numpytype_from_strtype(numpytype)))).dtype 72 | return torchtype -------------------------------------------------------------------------------- /pytests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | import numpy as np 4 | 5 | from numpy.testing import assert_array_equal 6 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype, \ 7 | numpytype_from_torchtype 8 | from pylops_gpu.utils.complex import * 9 | 10 | par1 = {'dims': (5,)} # 1d 11 | par2 = {'dims': (5, 3)} # 2d 12 | 13 | 14 | def test_typeconversion(): 15 | """Verify numpy to torch (and viceversa) type conversions 16 | """ 17 | numpytypes = [np.float32, np.float64, np.int16, np.int32] 18 | torchtypes = [torch.float32, torch.float64, torch.int16, torch.int32] 19 | for numpytype, torchtype in zip(numpytypes, torchtypes): 20 | torchtype_check = torchtype_from_numpytype(numpytype) 21 | numpytype_check = numpytype_from_torchtype(torchtype) 22 | assert numpytype_check == numpytype 23 | assert torchtype_check == torchtype 24 | 25 | 26 | """ 27 | @pytest.mark.parametrize("par", [(par1), (par2)]) 28 | def test_complex_attrs(par): 29 | #Compare attributes of numpy complex and torch ComplexTensor 30 | x = np.ones(par['dims'], dtype=np.float32) + \ 31 | 3j * np.ones(par['dims'], dtype=np.float32) 32 | y = 2*np.ones(par['dims'], dtype=np.float32) - \ 33 | 1j * np.ones(par['dims'], dtype=np.float32) 34 | sum = x + y 35 | sub = x - y 36 | mul = x * y 37 | xc = x.conjugate() 38 | 39 | xt = complextorch_fromnumpy(x) 40 | yt = complextorch_fromnumpy(y) 41 | sumt = xt + yt 42 | subt = xt - yt 43 | mult = xt * yt 44 | xct = conj(xt) 45 | xflattened = flatten(xt) 46 | 47 | 48 | assert_array_equal(np.abs(x), xt.abs().numpy().squeeze()) # abs 49 | assert_array_equal(sum, complexnumpy_fromtorch(sumt)) # sum 50 | assert_array_equal(sub, complexnumpy_fromtorch(subt)) # sub 51 | assert_array_equal(mul, complexnumpy_fromtorch(mult)) # mul 52 | assert_array_equal(xc, complexnumpy_fromtorch(xct)) # conj 53 | assert xflattened.shape[1] == np.prod(np.array(par['dims'])) # flatten 54 | """ -------------------------------------------------------------------------------- /pytests/test_torchoperator.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from numpy.testing import assert_array_equal, assert_array_almost_equal 4 | from pylops_gpu.utils.backend import device 5 | from pylops_gpu.utils.torch2numpy import * 6 | from pylops_gpu import TorchOperator, MatrixMult 7 | 8 | par1 = {'ny': 11, 'nx': 11, 9 | 'dtype': torch.float32} # square 10 | par2 = {'ny': 21, 'nx': 11, 11 | 'dtype': torch.float32} # overdetermined 12 | 13 | dev = device() 14 | np.random.seed(0) 15 | torch.manual_seed(0) 16 | 17 | 18 | @pytest.mark.parametrize("par", [(par1)]) 19 | def test_TorchOperator(par): 20 | """Apply forward and gradient. As for linear operators the gradient 21 | must equal the adjoint of operator applied to the same vector, the two 22 | results are also checked to be the same. 23 | """ 24 | Dop = MatrixMult(torch.randn(par['ny'], par['nx']), device=dev) 25 | Top = TorchOperator(Dop, batch=False, pylops=False) 26 | 27 | x = torch.randn(par['nx']).to(dev) 28 | xt = x.view(-1) 29 | xt.requires_grad = True 30 | v = torch.randn(par['ny']).to(dev) 31 | 32 | # pylops-gpu operator 33 | y = Dop * x 34 | xadj = Dop.H * v 35 | 36 | # torch operator 37 | yt = Top.apply(xt) 38 | yt.backward(v, retain_graph=True) 39 | 40 | assert_array_equal(y.detach().cpu().numpy(), yt.detach().cpu().numpy()) 41 | assert_array_equal(xadj.detach().cpu().numpy(), xt.grad.cpu().numpy()) 42 | 43 | 44 | @pytest.mark.parametrize("par", [(par1)]) 45 | def test_TorchOperator_batch(par): 46 | """Apply forward for input with multiple samples (= batch) 47 | """ 48 | Dop = MatrixMult(torch.randn(par['ny'], par['nx']), device=dev) 49 | Top = TorchOperator(Dop, batch=True, pylops=False) 50 | 51 | x = torch.randn((4, par['nx'])).to(dev) 52 | x1 = x.T 53 | 54 | y = Dop.matmat(x, kfirst=True) 55 | y1 = Dop.matmat(x1, kfirst=False) 56 | yt = Top.apply(x) 57 | 58 | assert_array_equal(y.cpu().numpy(), y1.T.cpu().numpy()) 59 | assert_array_equal(y.cpu().numpy(), yt.cpu().numpy()) 60 | -------------------------------------------------------------------------------- /docs/source/_templates/breadcrumbs.html: -------------------------------------------------------------------------------- 1 | {# Extend the RTD template to include "Edit on Github" and option to download 2 | notebook generated pages from nbsphinx #} 3 | {% extends "!breadcrumbs.html" %} 4 | 5 | 6 | {% block breadcrumbs_aside %} 7 | 34 | {% endblock %} 35 | -------------------------------------------------------------------------------- /docs/source/_static/style.css: -------------------------------------------------------------------------------- 1 | /* To stick the footer to the bottom of the page */ 2 | html { 3 | } 4 | 5 | body { 6 | font-family: 'Open Sans', sans-serif; 7 | } 8 | 9 | h1, h2, h3, h4 { 10 | font-weight: 300; 11 | font-family: "Open Sans",sans-serif; 12 | } 13 | 14 | h1 { 15 | font-size: 200%; 16 | } 17 | 18 | .sidebar-title { 19 | margin-top: 10px; 20 | margin-bottom: 0px; 21 | } 22 | 23 | .banner { 24 | padding-bottom: 60px; 25 | text-align: center; 26 | } 27 | 28 | .banner img { 29 | margin-bottom: 40px; 30 | } 31 | 32 | .api-module { 33 | margin-bottom: 80px; 34 | } 35 | 36 | .youtube-embed { 37 | max-width: 600px; 38 | margin-bottom: 24px; 39 | } 40 | 41 | .video-container { 42 | position:relative; 43 | padding-bottom:56.25%; 44 | padding-top:30px; 45 | height:0; 46 | overflow:hidden; 47 | } 48 | 49 | .video-container iframe, .video-container object, .video-container embed { 50 | position:absolute; 51 | top:0; 52 | left:0; 53 | width:100%; 54 | height:100%; 55 | } 56 | 57 | .wy-nav-content { 58 | max-width: 1000px; 59 | } 60 | 61 | .wy-nav-top { 62 | background-color: #555555; 63 | } 64 | 65 | .wy-side-nav-search { 66 | background-color: #555555; 67 | } 68 | 69 | .wy-side-nav-search > a img.logo { 70 | width: 50%; 71 | } 72 | 73 | .wy-side-nav-search input[type="text"] { 74 | border-color: #555555; 75 | } 76 | 77 | /* Remove the padding from the Parameters table */ 78 | .rst-content table.field-list .field-name { 79 | padding-left: 0px; 80 | } 81 | 82 | /* Lign up the Parameters section with the descriptions */ 83 | .rst-content table.field-list td { 84 | padding-top: 8px; 85 | } 86 | 87 | .rst-content .highlight > pre { 88 | font-size: 14px; 89 | } 90 | 91 | .rst-content img { 92 | max-width: 100%; 93 | } 94 | 95 | .source-link { 96 | float: right; 97 | } 98 | 99 | .strike { 100 | text-decoration: line-through; 101 | } 102 | 103 | /* Don't let the edit and notebook download links disappear on mobile. */ 104 | @media screen and (max-width: 480px) { 105 | .wy-breadcrumbs li.source-link { 106 | float:none; 107 | display: block; 108 | margin-top: 20px; 109 | } 110 | } 111 | 112 | /* Sphinx-Gallery */ 113 | /****************************************************************************/ 114 | /* Don't let captions be italic */ 115 | .rst-content div.figure p.caption { 116 | font-style: normal; 117 | } -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/Laplacian.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pylops_gpu.basicoperators import SecondDerivative 5 | 6 | 7 | def Laplacian(dims, dirs=(0, 1), weights=(1, 1), sampling=(1, 1), 8 | device='cpu', togpu=(False, False), tocpu=(False, False), 9 | dtype=torch.float32): 10 | r"""Laplacian. 11 | 12 | Apply second-order centered laplacian operator to a multi-dimensional 13 | array (at least 2 dimensions are required) 14 | 15 | Parameters 16 | ---------- 17 | dims : :obj:`tuple` 18 | Number of samples for each dimension. 19 | dirs : :obj:`tuple`, optional 20 | Directions along which laplacian is applied. 21 | weights : :obj:`tuple`, optional 22 | Weight to apply to each direction (real laplacian operator if 23 | ``weights=[1,1]``) 24 | sampling : :obj:`tuple`, optional 25 | Sampling steps ``dx`` and ``dy`` for each direction 26 | edge : :obj:`bool`, optional 27 | Use reduced order derivative at edges (``True``) or 28 | ignore them (``False``) 29 | device : :obj:`str`, optional 30 | Device to be used 31 | togpu : :obj:`tuple`, optional 32 | Move model and data from cpu to gpu prior to applying ``matvec`` and 33 | ``rmatvec``, respectively (only when ``device='gpu'``) 34 | tocpu : :obj:`tuple`, optional 35 | Move data and model from gpu to cpu after applying ``matvec`` and 36 | ``rmatvec``, respectively (only when ``device='gpu'``) 37 | dtype : :obj:`str`, optional 38 | Type of elements in input array. 39 | 40 | Returns 41 | ------- 42 | l2op : :obj:`pylops.LinearOperator` 43 | Laplacian linear operator 44 | 45 | Notes 46 | ----- 47 | Refer to :class:`pylops.basicoperators.Laplacian` for implementation 48 | details. 49 | 50 | Note that since the Torch implementation is based on a convolution 51 | with a compact filter :math:`[1., -2., 1.]`, edges are treated 52 | differently compared to the PyLops equivalent operator. 53 | 54 | """ 55 | l2op = weights[0]*SecondDerivative(np.prod(dims), dims=dims, dir=dirs[0], 56 | sampling=sampling[0], device=device, 57 | togpu=togpu, tocpu=tocpu, dtype=dtype) 58 | l2op += weights[1]*SecondDerivative(np.prod(dims), dims=dims, dir=dirs[1], 59 | sampling=sampling[1], device=device, 60 | togpu=togpu, tocpu=tocpu, dtype=dtype) 61 | return l2op 62 | -------------------------------------------------------------------------------- /examples/plot_identity.py: -------------------------------------------------------------------------------- 1 | """ 2 | Identity 3 | ======== 4 | This example shows how to use the :py:class:`pylops.Identity` operator to transfer model 5 | into data and viceversa. 6 | """ 7 | import torch 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | import matplotlib.gridspec as pltgs 11 | 12 | import pylops_gpu 13 | 14 | plt.close('all') 15 | 16 | ############################################################################### 17 | # Let's define an identity operator :math:`\mathbf{I}` with same number of elements for data 18 | # :math:`N` and model :math:`M`. 19 | N, M = 5, 5 20 | x = torch.arange(M, dtype=torch.int) 21 | Iop = pylops_gpu.Identity(M, dtype=torch.int) 22 | 23 | y = Iop * x 24 | xadj = Iop.H * y 25 | 26 | gs = pltgs.GridSpec(1, 6) 27 | fig = plt.figure(figsize=(7, 3)) 28 | ax = plt.subplot(gs[0, 0:3]) 29 | im = ax.imshow(np.eye(N), cmap='rainbow') 30 | ax.set_title('A', size=20, fontweight='bold') 31 | ax.set_xticks(np.arange(N-1)+0.5) 32 | ax.set_yticks(np.arange(M-1)+0.5) 33 | ax.grid(linewidth=3, color='white') 34 | ax.xaxis.set_ticklabels([]) 35 | ax.yaxis.set_ticklabels([]) 36 | ax = plt.subplot(gs[0, 3]) 37 | ax.imshow(x[:, np.newaxis], cmap='rainbow') 38 | ax.set_title('x', size=20, fontweight='bold') 39 | ax.set_xticks([]) 40 | ax.set_yticks(np.arange(M-1)+0.5) 41 | ax.grid(linewidth=3, color='white') 42 | ax.xaxis.set_ticklabels([]) 43 | ax.yaxis.set_ticklabels([]) 44 | ax = plt.subplot(gs[0, 4]) 45 | ax.text(0.35, 0.5, '=', horizontalalignment='center', 46 | verticalalignment='center', size=40, fontweight='bold') 47 | ax.axis('off') 48 | ax = plt.subplot(gs[0, 5]) 49 | ax.imshow(y[:, np.newaxis], cmap='rainbow') 50 | ax.set_title('y', size=20, fontweight='bold') 51 | ax.set_xticks([]) 52 | ax.set_yticks(np.arange(N - 1) + 0.5) 53 | ax.grid(linewidth=3, color='white') 54 | ax.xaxis.set_ticklabels([]) 55 | ax.yaxis.set_ticklabels([]) 56 | fig.colorbar(im, ax=ax, ticks=[0, 1], pad=0.3, shrink=0.7) 57 | 58 | ############################################################################### 59 | # Similarly we can consider the case with data bigger than model 60 | N, M = 10, 5 61 | x = torch.arange(M, dtype=torch.int) 62 | Iop = pylops_gpu.Identity(N, M, dtype=torch.int) 63 | 64 | y = Iop*x 65 | xadj = Iop.H*y 66 | 67 | print('x = %s ' % x) 68 | print('I*x = %s ' % y) 69 | print('I\'*y = %s ' % xadj) 70 | 71 | ############################################################################### 72 | # and model bigger than data 73 | N, M = 5, 10 74 | x = torch.arange(M, dtype=torch.int) 75 | Iop = pylops_gpu.Identity(N, M, dtype=torch.int) 76 | 77 | y = Iop * x 78 | xadj = Iop.H * y 79 | 80 | print('x = %s ' % x) 81 | print('I*x = %s ' % y) 82 | print('I\'*y = %s ' % xadj) 83 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/SecondDerivative.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pylops_gpu import LinearOperator 4 | from pylops_gpu.signalprocessing import Convolve1D 5 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 6 | 7 | 8 | class SecondDerivative(LinearOperator): 9 | r"""Second derivative. 10 | 11 | Apply second-order centered second derivative. 12 | 13 | Parameters 14 | ---------- 15 | N : :obj:`int` 16 | Number of samples in model. 17 | dims : :obj:`tuple`, optional 18 | Number of samples for each dimension 19 | (``None`` if only one dimension is available) 20 | dir : :obj:`int`, optional 21 | Direction along which smoothing is applied. 22 | sampling : :obj:`float`, optional 23 | Sampling step ``dx``. 24 | device : :obj:`str`, optional 25 | Device to be used 26 | togpu : :obj:`tuple`, optional 27 | Move model and data from cpu to gpu prior to applying ``matvec`` and 28 | ``rmatvec``, respectively (only when ``device='gpu'``) 29 | tocpu : :obj:`tuple`, optional 30 | Move data and model from gpu to cpu after applying ``matvec`` and 31 | ``rmatvec``, respectively (only when ``device='gpu'``) 32 | dtype : :obj:`torch.dtype` or :obj:`np.dtype`, optional 33 | Type of elements in input array. 34 | 35 | Attributes 36 | ---------- 37 | shape : :obj:`tuple` 38 | Operator shape 39 | explicit : :obj:`bool` 40 | Operator contains a matrix that can be solved explicitly (``True``) or 41 | not (``False``) 42 | 43 | Notes 44 | ----- 45 | Refer to :class:`pylops.basicoperators.SecondDerivative` for implementation 46 | details. 47 | 48 | Note that since the Torch implementation is based on a convolution 49 | with a compact filter :math:`[1., -2., 1.]`, edges are treated 50 | differently compared to the PyLops equivalent operator. 51 | 52 | """ 53 | def __init__(self, N, dims=None, dir=0, sampling=1., device='cpu', 54 | togpu=(False, False), tocpu=(False, False), 55 | dtype=torch.float32): 56 | # convert dtype to torch.dtype 57 | dtype = torchtype_from_numpytype(dtype) 58 | 59 | h = torch.torch.tensor([1., -2, 1.], 60 | dtype=dtype).to(device) / sampling**2 61 | self.device = device 62 | self.togpu = togpu 63 | self.tocpu = tocpu 64 | self.shape = (N, N) 65 | self.dtype = dtype 66 | self.explicit = False 67 | self.Op = Convolve1D(N, h, offset=1, dims=dims, dir=dir, 68 | zero_edges=True, device=device, 69 | togpu=togpu, tocpu=tocpu, dtype=dtype) 70 | -------------------------------------------------------------------------------- /examples/plot_convolve.py: -------------------------------------------------------------------------------- 1 | """ 2 | Convolution 3 | =========== 4 | This example shows how to use the :py:class:`pylops_gpu.signalprocessing.Convolve1D` 5 | operator to perform convolution between two signals. 6 | 7 | This example closely follow the equivalent 8 | 9 | `PyLops example `_. 10 | """ 11 | import torch 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import pylops_gpu 15 | 16 | from pylops.utils.wavelets import ricker 17 | from pylops_gpu.utils.backend import device 18 | from pylops_gpu.optimization.cg import cg 19 | 20 | dev = device() 21 | print('PyLops-gpu working on %s...' % dev) 22 | plt.close('all') 23 | 24 | ############################################################################### 25 | # We will start by creating a zero signal of length :math:`nt` and we will 26 | # place a unitary spike at its center. We also create our filter to be 27 | # applied by means of :py:class:`pylops_gpu.signalprocessing.Convolve1D` 28 | # operator. 29 | nt = 1001 30 | dt = 0.004 31 | t = np.arange(nt)*dt 32 | 33 | x = torch.zeros(nt, dtype=torch.float32) 34 | x[int(nt/2)] = 1 35 | 36 | h, th, hcenter = ricker(t[:101], f0=30) 37 | h = torch.from_numpy(h.astype(np.float32)) 38 | Cop = pylops_gpu.signalprocessing.Convolve1D(nt, h=h, offset=hcenter, 39 | dtype=torch.float32) 40 | y = Cop*x 41 | 42 | xinv = Cop / y 43 | 44 | fig, ax = plt.subplots(1, 1, figsize=(10, 3)) 45 | ax.plot(t, x.cpu().numpy(), 'k', lw=2, label=r'$x$') 46 | ax.plot(t, y.cpu().numpy(), 'r', lw=2, label=r'$y=Ax$') 47 | ax.plot(t, xinv.cpu().numpy(), '--g', lw=2, label=r'$x_{ext}$') 48 | ax.set_title('Convolve in 1st direction', fontsize=14, fontweight='bold') 49 | ax.legend() 50 | ax.set_xlim(1.9, 2.1) 51 | 52 | ############################################################################### 53 | # We show now that also a filter with mixed phase (i.e., not centered around zero) 54 | # can be applied and inverted for using the :py:class:`pylops.signalprocessing.Convolve1D` 55 | # operator. 56 | Cop = pylops_gpu.signalprocessing.Convolve1D(nt, h=h, offset=hcenter - 3, 57 | dtype=torch.float32) 58 | y = Cop * x 59 | y1 = Cop.H * x 60 | xinv = cg(Cop.H*Cop, Cop.H*y, niter=100)[0] 61 | 62 | fig, ax = plt.subplots(1, 1, figsize=(10, 3)) 63 | ax.plot(t, x.cpu().numpy(), 'k', lw=2, label=r'$x$') 64 | ax.plot(t, y.cpu().numpy(), 'r', lw=2, label=r'$y=Ax$') 65 | ax.plot(t, y1.cpu().numpy(), 'b', lw=2, label=r'$y=A^Hx$') 66 | ax.plot(t, xinv.cpu().numpy(), '--g', lw=2, label=r'$x_{ext}$') 67 | ax.set_title('Convolve in 1st direction', fontsize=14, fontweight='bold') 68 | ax.set_xlim(1.9, 2.1) 69 | ax.legend() -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | Installation 4 | ============ 5 | 6 | You will need **Python 3.6 or greater** to get started. 7 | 8 | 9 | Dependencies 10 | ------------ 11 | 12 | Our mandatory dependencies are limited to: 13 | 14 | * `numpy `_ 15 | * `scipy `_ 16 | * `pytorch `_ 17 | * `pytorch_complex_tensor `_ 18 | * `pylops `_ 19 | 20 | We advise using the `Anaconda Python distribution `_ 21 | to ensure that these dependencies are installed via the ``Conda`` package manager. 22 | 23 | 24 | Step-by-step installation for users 25 | ----------------------------------- 26 | 27 | Activate your Python environment, and simply type the following command in your terminal 28 | to install the PyPi distribution: 29 | 30 | .. code-block:: bash 31 | 32 | >> pip install pylops-gpu 33 | 34 | Alternatively, to access the latest source from github: 35 | 36 | .. code-block:: bash 37 | 38 | >> pip install https://git@github.com/PyLops/pylops-gpu.git@master 39 | 40 | or just clone the repository 41 | 42 | .. code-block:: bash 43 | 44 | >> git clone https://github.com/equinor/pylops-gpu.git 45 | 46 | or download the zip file from the repository (green button in the top right corner of the 47 | main github repo page) and install PyLops from terminal using the command: 48 | 49 | .. code-block:: bash 50 | 51 | >> make install 52 | 53 | **Note**: you may see an error if `pytorch-complex-tensor` has not been 54 | previously installed. In that case first run 55 | `pip install pytorch-complex-tensor` and then install pylops-gpu 56 | 57 | 58 | Step-by-step installation for developers 59 | ---------------------------------------- 60 | Fork and clone the repository by executing the following in your terminal: 61 | 62 | .. code-block:: bash 63 | 64 | >> git clone https://github.com/your_name_here/pylops-gpu.git 65 | 66 | The first time you clone the repository run the following command: 67 | 68 | .. code-block:: bash 69 | 70 | >> make dev-install 71 | 72 | If you prefer to build a new Conda enviroment just for PyLops, run the following command: 73 | 74 | .. code-block:: bash 75 | 76 | >> make dev-install_conda 77 | 78 | To ensure that everything has been setup correctly, run tests: 79 | 80 | .. code-block:: bash 81 | 82 | >> make tests 83 | 84 | Make sure no tests fail, this guarantees that the installation has been successfull. 85 | 86 | If using Conda environment, always remember to activate the conda environment every time you open 87 | a new *bash* shell by typing: 88 | 89 | .. code-block:: bash 90 | 91 | >> source activate pylops-gpu -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/VStack.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import scipy as sp 4 | 5 | # need to check scipy version since the interface submodule changed into 6 | # _interface from scipy>=1.8.0 7 | sp_version = sp.__version__.split(".") 8 | if int(sp_version[0]) <= 1 and int(sp_version[1]) < 8: 9 | from scipy.sparse.linalg.interface import _get_dtype 10 | else: 11 | from scipy.sparse.linalg._interface import _get_dtype 12 | from pylops_gpu.LinearOperator import LinearOperator 13 | 14 | 15 | class VStack(LinearOperator): 16 | r"""Vertical stacking. 17 | 18 | Stack a set of N linear operators vertically. 19 | 20 | Parameters 21 | ---------- 22 | ops : :obj:`list` 23 | Linear operators to be stacked 24 | device : :obj:`str`, optional 25 | Device to be used 26 | togpu : :obj:`tuple`, optional 27 | Move model and data from cpu to gpu prior to applying ``matvec`` and 28 | ``rmatvec``, respectively (only when ``device='gpu'``) 29 | tocpu : :obj:`tuple`, optional 30 | Move data and model from gpu to cpu after applying ``matvec`` and 31 | ``rmatvec``, respectively (only when ``device='gpu'``) 32 | dtype : :obj:`str`, optional 33 | Type of elements in input array 34 | 35 | Attributes 36 | ---------- 37 | shape : :obj:`tuple` 38 | Operator shape 39 | explicit : :obj:`bool` 40 | Operator contains a matrix that can be solved explicitly (``True``) or 41 | not (``False``) 42 | 43 | Notes 44 | ----- 45 | Refer to :class:`pylops.basicoperators.VStack` for 46 | implementation details. 47 | 48 | """ 49 | def __init__(self, ops, device='cpu', togpu=(False, False), 50 | tocpu=(False, False), dtype=torch.float32): 51 | self.ops = ops 52 | nops = np.zeros(len(ops), dtype=np.int) 53 | for iop, oper in enumerate(ops): 54 | nops[iop] = oper.shape[0] 55 | self.nops = nops.sum() 56 | self.mops = ops[0].shape[1] 57 | self.nnops = np.insert(np.cumsum(nops), 0, 0) 58 | self.shape = (self.nops, self.mops) 59 | self.device = device 60 | self.togpu = togpu 61 | self.tocpu = tocpu 62 | self.dtype = dtype 63 | self.explicit = False 64 | self.Op = None 65 | 66 | def _matvec(self, x): 67 | y = torch.zeros(self.nops, dtype=self.dtype) 68 | for iop, oper in enumerate(self.ops): 69 | y[self.nnops[iop]:self.nnops[iop + 1]] = oper.matvec(x).squeeze() 70 | return y 71 | 72 | def _rmatvec(self, x): 73 | y = torch.zeros(self.mops, dtype=self.dtype) 74 | for iop, oper in enumerate(self.ops): 75 | y += oper.rmatvec(x[self.nnops[iop]:self.nnops[iop + 1]]).squeeze() 76 | return y 77 | -------------------------------------------------------------------------------- /pytests/test_diagonal.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | from numpy.testing import assert_array_almost_equal 5 | from pylops_gpu.utils import dottest 6 | from pylops_gpu.utils.backend import device 7 | from pylops_gpu.utils.complex import * 8 | from pylops_gpu.basicoperators import Diagonal 9 | from pylops_gpu.optimization.cg import cg 10 | 11 | 12 | par1 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 0, 13 | 'dtype': 'float32'} # real 14 | par2 = {'ny': 21, 'nx': 11, 'nt': 20, 'imag': 1j, 15 | 'dtype': 'float32'} # complex 16 | 17 | dev = device() 18 | np.random.seed(0) 19 | torch.manual_seed(0) 20 | 21 | 22 | @pytest.mark.parametrize("par", [(par1)])#, (par2)]) 23 | def test_Diagonal_1dsignal(par): 24 | """Dot-test and inversion for Diagonal operator for 1d signal 25 | """ 26 | for ddim in (par['nx'], par['nt']): 27 | d = (np.arange(0, ddim, dtype=par['dtype']) + 1.) + \ 28 | par['imag']*(np.arange(0, ddim, dtype=par['dtype']) + 1.) 29 | if par['imag'] == 0: 30 | d = torch.from_numpy(d).to(dev) 31 | else: 32 | d = complextorch_fromnumpy(d).to(dev) 33 | 34 | Dop = Diagonal(d, dtype=d.dtype) 35 | assert dottest(Dop, ddim, ddim, tol=1e-4, 36 | complexflag=0 if par['imag'] == 0 else 3) 37 | 38 | x = np.ones(ddim, dtype=par['dtype']) + \ 39 | par['imag'] * np.ones(ddim, dtype=par['dtype']) 40 | if par['imag'] == 0: 41 | x = torch.from_numpy(x).to(dev) 42 | else: 43 | x = complextorch_fromnumpy(x).to(dev) 44 | xcg = cg(Dop, Dop * x, niter=ddim)[0] 45 | assert_array_almost_equal(x.numpy(), xcg.cpu().numpy(), decimal=4) 46 | 47 | 48 | @pytest.mark.parametrize("par", [(par1)])#, (par2)]) 49 | def test_Diagonal_2dsignal(par): 50 | """Dot-test and inversion for Diagonal operator for 2d signal 51 | """ 52 | for idim, ddim in enumerate((par['nx'], par['nt'])): 53 | d = (np.arange(0, ddim, dtype=par['dtype']) + 1.) + \ 54 | par['imag'] * (np.arange(0, ddim, dtype=par['dtype']) + 1.) 55 | if par['imag'] == 0: 56 | d = torch.from_numpy(d).to(dev) 57 | else: 58 | d = complextorch_fromnumpy(d).to(dev) 59 | 60 | Dop = Diagonal(d, dims=(par['nx'], par['nt']), 61 | dir=idim, dtype=par['dtype']) 62 | assert dottest(Dop, par['nx']*par['nt'], par['nx']*par['nt'], tol=1e-4, 63 | complexflag=0 if par['imag'] == 0 else 3) 64 | 65 | x = np.ones((par['nx'], par['nt']), dtype=par['dtype']) + \ 66 | par['imag'] * np.ones((par['nx'], par['nt']), dtype=par['dtype']) 67 | if par['imag'] == 0: 68 | x = torch.from_numpy(x).to(dev) 69 | else: 70 | x = complextorch_fromnumpy(x).to(dev) 71 | xcg = cg(Dop, Dop * x.flatten(), niter=Dop.shape[0])[0] 72 | assert_array_almost_equal(x.flatten().numpy(), 73 | xcg.flatten().cpu().numpy(), decimal=4) 74 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/FirstDerivative.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pylops_gpu import LinearOperator 4 | from pylops_gpu.signalprocessing import Convolve1D 5 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 6 | 7 | 8 | """ 9 | def FirstDerivative(N, dims=None, dir=0, sampling=1., device='cpu', 10 | togpu=(False, False), tocpu=(False, False), 11 | dtype=torch.float32): 12 | 13 | h = torch.torch.tensor([0.5, 0, -0.5], dtype=dtype).to(device) / sampling 14 | dop = Convolve1D(N, h, offset=1, dims=dims, dir=dir, device=device, 15 | togpu=togpu, tocpu=tocpu, dtype=dtype) 16 | return dop 17 | """ 18 | 19 | class FirstDerivative(LinearOperator): 20 | r"""First derivative. 21 | 22 | Apply second-order centered first derivative. 23 | 24 | Parameters 25 | ---------- 26 | N : :obj:`int` 27 | Number of samples in model. 28 | dims : :obj:`tuple`, optional 29 | Number of samples for each dimension 30 | (``None`` if only one dimension is available) 31 | dir : :obj:`int`, optional 32 | Direction along which smoothing is applied. 33 | sampling : :obj:`float`, optional 34 | Sampling step ``dx``. 35 | device : :obj:`str`, optional 36 | Device to be used 37 | togpu : :obj:`tuple`, optional 38 | Move model and data from cpu to gpu prior to applying ``matvec`` and 39 | ``rmatvec``, respectively (only when ``device='gpu'``) 40 | tocpu : :obj:`tuple`, optional 41 | Move data and model from gpu to cpu after applying ``matvec`` and 42 | ``rmatvec``, respectively (only when ``device='gpu'``) 43 | dtype : :obj:`torch.dtype` or :obj:`np.dtype`, optional 44 | Type of elements in input array. 45 | 46 | Attributes 47 | ---------- 48 | shape : :obj:`tuple` 49 | Operator shape 50 | explicit : :obj:`bool` 51 | Operator contains a matrix that can be solved explicitly (``True``) or 52 | not (``False``) 53 | 54 | Notes 55 | ----- 56 | Refer to :class:`pylops.basicoperators.FirstDerivative` for implementation 57 | details. 58 | 59 | Note that since the Torch implementation is based on a convolution 60 | with a compact filter :math:`[0.5, 0., -0.5]`, edges are treated 61 | differently compared to the PyLops equivalent operator. 62 | 63 | """ 64 | def __init__(self, N, dims=None, dir=0, sampling=1., device='cpu', 65 | togpu=(False, False), tocpu=(False, False), 66 | dtype=torch.float32): 67 | # convert dtype to torch.dtype 68 | dtype = torchtype_from_numpytype(dtype) 69 | 70 | h = torch.torch.tensor([0.5, 0, -0.5], 71 | dtype=dtype).to(device) / sampling 72 | self.device = device 73 | self.togpu = togpu 74 | self.tocpu = tocpu 75 | self.shape = (N, N) 76 | self.dtype = dtype 77 | self.explicit = False 78 | self.Op = Convolve1D(N, h, offset=1, dims=dims, dir=dir, 79 | zero_edges=True, device=device, 80 | togpu=togpu, tocpu=tocpu, dtype=self.dtype) 81 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | PyLops-GPU 2 | ========== 3 | 4 | .. note:: This library is under early development. 5 | 6 | Expect things to constantly change until version v1.0.0. 7 | 8 | This library is an extension of `PyLops `_ 9 | to run operators on GPUs. 10 | 11 | As much as `numpy `_ and 12 | `scipy `_ lie at the core of the parent project 13 | PyLops, PyLops-GPU heavily builds on top of `PyTorch `_ 14 | and takes advantage of the same optimized tensor computations used in PyTorch 15 | for deep learning using GPUs and CPUs. Doing so, linear operators can be computed on GPUs. 16 | 17 | Here is a simple example showing how a diagonal operator can be created, 18 | applied and inverted using PyLops: 19 | 20 | .. code-block:: python 21 | 22 | import numpy as np 23 | from pylops import Diagonal 24 | 25 | n = int(1e6) 26 | x = np.ones(n) 27 | d = np.arange(n) + 1. 28 | 29 | Dop = Diagonal(d) 30 | 31 | # y = Dx 32 | y = Dop*x 33 | 34 | and similarly using PyLops-GPU: 35 | 36 | .. code-block:: python 37 | 38 | import numpy as np 39 | import torch 40 | from pylops_gpu.utils.backend import device 41 | from pylops_gpu import Diagonal 42 | 43 | dev = device() # will return 'gpu' if GPU is available 44 | 45 | n = int(1e6) 46 | x = torch.ones(n, dtype=torch.float64).to(dev) 47 | d = (torch.arange(0, n, dtype=torch.float64) + 1.).to(dev) 48 | 49 | Dop = Diagonal(d, device=dev) 50 | 51 | # y = Dx 52 | y = Dop*x 53 | 54 | Running these two snippets of code in Google Colab with GPU enabled gives a 50+ 55 | speed up for the forward pass. 56 | 57 | As a by-product of implementing PyLops linear operators in PyTorch, we can easily 58 | chain our operators with any nonlinear mathematical operation (e.g., log, sin, tan, pow, ...) 59 | as well as with operators from the ``torch.nn`` submodule and obtain *Automatic 60 | Differentiation* (AD) for the entire chain. Since the gradient of a linear 61 | operator is simply its *adjoint*, we have implemented a single class, 62 | :py:class:`pylops_gpu.TorchOperator`, which can wrap any linear operator 63 | from PyLops and PyLops-gpu libraries and return a :py:class:`torch.autograd.Function` object. 64 | 65 | 66 | History 67 | ------- 68 | PyLops-GPU was initially written and it is currently maintained by `Equinor `_ 69 | It is an extension of `PyLops `_ for large-scale optimization with 70 | *GPU*-powered linear operators that can be tailored to our needs, and as contribution to the free software community. 71 | 72 | 73 | .. toctree:: 74 | :maxdepth: 1 75 | :hidden: 76 | :caption: Getting started: 77 | 78 | installation.rst 79 | tutorials/index.rst 80 | 81 | .. toctree:: 82 | :maxdepth: 2 83 | :hidden: 84 | :caption: Reference documentation: 85 | 86 | api/index.rst 87 | api/others.rst 88 | 89 | .. toctree:: 90 | :maxdepth: 1 91 | :hidden: 92 | :caption: Getting involved: 93 | 94 | Contributing 95 | Changelog 96 | Roadmap 97 | Credits 98 | 99 | -------------------------------------------------------------------------------- /pytests/test_matrixmult.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | from numpy.testing import assert_array_almost_equal 5 | from pylops_gpu.utils.backend import device 6 | from pylops_gpu.utils import dottest 7 | from pylops_gpu.utils.complex import * 8 | from pylops_gpu.utils.torch2numpy import * 9 | from pylops_gpu import MatrixMult 10 | from pylops_gpu.optimization.cg import cg 11 | 12 | 13 | par1 = {'ny': 7, 'nx': 7, 'imag': 0, 14 | 'dtype': 'float32'} # square real 15 | par2 = {'ny': 9, 'nx': 7, 'imag': 0, 16 | 'dtype': 'float32'} # overdetermined real 17 | par1j = {'ny': 9, 'nx': 7, 'imag': 1j, 18 | 'dtype': 'float32'} # square complex 19 | par2j = {'ny': 9, 'nx': 7, 'imag': 1j, 20 | 'dtype': 'float32'} # overdetermined complex 21 | 22 | dev = device() 23 | np.random.seed(0) 24 | torch.manual_seed(0) 25 | 26 | 27 | @pytest.mark.parametrize("par", [(par1), (par2)])#, (par1j), (par2j)]) 28 | def test_MatrixMult(par): 29 | """Dot-test and inversion for MatrixMult operator 30 | """ 31 | np.random.seed(10) 32 | G = np.random.normal(0, 10, (par['ny'], 33 | par['nx'])).astype(par['dtype']) + \ 34 | par['imag']*np.random.normal(0, 10, (par['ny'], 35 | par['nx'])).astype(par['dtype']) 36 | if par['imag'] == 0: 37 | G = torch.from_numpy(G).to(dev) 38 | else: 39 | G = complextorch_fromnumpy(G).to(dev) 40 | Gop = MatrixMult(G, dtype=G.dtype) 41 | assert dottest(Gop, par['ny'], par['nx'], tol=1e-4, 42 | complexflag=0 if par['imag'] == 0 else 3) 43 | 44 | x = np.ones(par['nx'], dtype=par['dtype']) + \ 45 | par['imag']*np.ones(par['nx'], dtype=par['dtype']) 46 | if par['imag'] == 0: 47 | x = torch.from_numpy(x).to(dev) 48 | else: 49 | x = complextorch_fromnumpy(x).to(dev) 50 | y = Gop * x 51 | xcg = cg(Gop.H * Gop, Gop.H * y, niter=2*par['nx'])[0] 52 | if par['imag'] == 0: # need to also get test to work with complex numbers! 53 | assert_array_almost_equal(x.numpy(), xcg.numpy(), decimal=3) 54 | 55 | 56 | @pytest.mark.parametrize("par", [(par1), (par2)])#, (par1j), (par2j)]) 57 | def test_MatrixMult_repeated(par): 58 | """Dot-test and inversion for test_MatrixMult operator repeated 59 | along another dimension 60 | """ 61 | np.random.seed(10) 62 | G = np.random.normal(0, 10, (par['ny'], par['nx'])).astype(par['dtype']) + \ 63 | par['imag'] * np.random.normal(0, 10, (par['ny'], 64 | par['nx'])).astype(par['dtype']) 65 | if par['imag'] == 0: 66 | G = torch.from_numpy(G).to(dev) 67 | else: 68 | G = complextorch_fromnumpy(G).to(dev) 69 | Gop = MatrixMult(G, dims=5, dtype=G.dtype) 70 | assert dottest(Gop, par['ny']*5, par['nx']*5, tol=1e-4, 71 | complexflag=0 if par['imag'] == 0 else 3) 72 | 73 | x = (np.ones((par['nx'], 5), dtype=par['dtype'])).flatten() +\ 74 | (par['imag'] * np.ones((par['nx'], 5), dtype=par['dtype'])).flatten() 75 | if par['imag'] == 0: 76 | x = torch.from_numpy(x).to(dev) 77 | else: 78 | x = complextorch_fromnumpy(x).to(dev) 79 | y = Gop * x 80 | xcg = cg(Gop.H * Gop, Gop.H * y, niter=2 * par['nx'])[0] 81 | if par['imag'] == 0: 82 | assert_array_almost_equal(x.numpy(), xcg.numpy(), decimal=3) 83 | -------------------------------------------------------------------------------- /pytests/test_sparsity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | import numpy as np 5 | from numpy.testing import assert_array_almost_equal 6 | 7 | from pylops_gpu.utils.backend import device 8 | from pylops_gpu.basicoperators import MatrixMult, Identity, FirstDerivative 9 | from pylops_gpu.optimization.sparsity import FISTA, SplitBregman 10 | 11 | par1 = {'ny': 11, 'nx': 11, 'imag': 0, 'x0': False, 12 | 'dtype': 'float64'} # square real, zero initial guess 13 | par2 = {'ny': 11, 'nx': 11, 'imag': 0, 'x0': True, 14 | 'dtype': 'float64'} # square real, non-zero initial guess 15 | par3 = {'ny': 31, 'nx': 11, 'imag': 0, 'x0':False, 16 | 'dtype':'float64'} # overdetermined real, zero initial guess 17 | par4 = {'ny': 31, 'nx': 11, 'imag': 0, 'x0': True, 18 | 'dtype': 'float64'} # overdetermined real, non-zero initial guess 19 | par5 = {'ny': 21, 'nx': 41, 'imag': 0, 'x0': True, 20 | 'dtype': 'float64'} # underdetermined real, non-zero initial guess 21 | par1j = {'ny': 11, 'nx': 11, 'imag': 1j, 'x0': False, 22 | 'dtype': 'complex64'} # square complex, zero initial guess 23 | par2j = {'ny': 11, 'nx': 11, 'imag': 1j, 'x0': True, 24 | 'dtype': 'complex64'} # square complex, non-zero initial guess 25 | par3j = {'ny': 31, 'nx': 11, 'imag': 1j, 'x0':False, 26 | 'dtype':'complex64'} # overdetermined complex, zero initial guess 27 | par4j = {'ny': 31, 'nx': 11, 'imag': 1j, 'x0': True, 28 | 'dtype': 'complex64'} # overdetermined complex, non-zero initial guess 29 | par5j = {'ny': 21, 'nx': 41, 'imag': 1j, 'x0': True, 30 | 'dtype': 'complex64'} # underdetermined complex, non-zero initial guess 31 | 32 | dev = device() 33 | 34 | 35 | @pytest.mark.parametrize("par", [(par1), (par3), (par5), 36 | (par1j), (par3j), (par5j)]) 37 | def test_FISTA(par): 38 | """Invert problem with FISTA 39 | """ 40 | np.random.seed(42) 41 | A = np.random.randn(par['ny'], par['nx']).astype(np.float32) 42 | Aop = MatrixMult(torch.from_numpy(A).to(dev), device=dev) 43 | 44 | x = torch.zeros(par['nx']) 45 | x[par['nx'] // 2] = 1 46 | x[3] = 1 47 | x[par['nx'] - 4] = -1 48 | y = Aop * x 49 | 50 | eps = 0.5 51 | maxit = 2000 52 | 53 | # FISTA 54 | xinv, _, _, _, _ = FISTA(Aop, y, maxit, eps=eps, eigsiter=100, 55 | tol=0, returninfo=True) 56 | assert_array_almost_equal(x.cpu().numpy(), xinv.cpu().numpy(), decimal=1) 57 | 58 | 59 | @pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)]) 60 | def test_SplitBregman(par): 61 | """Invert denoise problem with SplitBregman 62 | """ 63 | np.random.seed(42) 64 | nx = 3 * par['nx'] # need enough samples for TV regularization to be effective 65 | Iop = Identity(nx) 66 | Dop = FirstDerivative(nx) 67 | 68 | x = torch.zeros(nx) 69 | x[:nx // 2] = 10 70 | x[nx // 2:3 * nx // 4] = -5 71 | n = torch.from_numpy(np.random.normal(0, 1, nx).astype(np.float32)) 72 | y = x + n 73 | 74 | mu = 0.01 75 | lamda = 0.2 76 | niter_end = 100 77 | niter_in = 3 78 | x0 = torch.ones(nx) 79 | xinv, _ = SplitBregman(Iop, [Dop], y, niter_end, niter_in, 80 | mu=mu, epsRL1s=[lamda], 81 | tol=1e-4, tau=1, 82 | x0=x0 if par['x0'] else None, 83 | restart=False, **dict(niter=5)) 84 | assert (np.linalg.norm(x.cpu().numpy() - xinv.cpu().numpy()) / 85 | np.linalg.norm(x.cpu().numpy())) < 1e-1 86 | -------------------------------------------------------------------------------- /examples/plot_fista.py: -------------------------------------------------------------------------------- 1 | r""" 2 | FISTA 3 | ===== 4 | 5 | This example shows how to use the 6 | :py:class:`pylops_gpu.optimization.sparsity.FISTA` solver. 7 | 8 | This solver can be used when the model to retrieve is supposed to have 9 | a sparse representation in a certain domain. FISTA solves an 10 | uncostrained problem with a L1 regularization term: 11 | 12 | .. math:: 13 | J = ||\mathbf{d} - \mathbf{Op} \mathbf{x}||_2 + \epsilon ||\mathbf{x}||_1 14 | 15 | """ 16 | import torch 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | import pylops 20 | import pylops_gpu 21 | 22 | from pylops_gpu.utils.backend import device 23 | 24 | dev = device() 25 | print('PyLops-gpu working on %s...' % dev) 26 | plt.close('all') 27 | 28 | torch.manual_seed(0) 29 | np.random.seed(1) 30 | dtype = torch.float32 31 | 32 | ############################################################################### 33 | # Let's start with a simple example, where we create a dense mixing matrix 34 | # and a sparse signal and we use OMP and ISTA to recover such a signal. 35 | # Note that the mixing matrix leads to an underdetermined system of equations 36 | # (:math:`N < M`) so being able to add some extra prior information regarding 37 | # the sparsity of our desired model is essential to be able to invert 38 | # such a system. 39 | 40 | N, M = 15, 20 41 | A = np.random.randn(N, M).astype(np.float32) 42 | Aop = pylops_gpu.MatrixMult(torch.from_numpy(A), device=dev) 43 | 44 | x = torch.from_numpy(np.random.rand(M).astype(np.float32)) 45 | x[x < 0.9] = 0 46 | y = Aop * x 47 | 48 | # FISTA 49 | eps = 0.5 50 | maxit = 1000 51 | x_fista = pylops_gpu.optimization.sparsity.FISTA(Aop, y, maxit, eps=eps, 52 | tol=1e-10)[0] 53 | 54 | fig, ax = plt.subplots(1, 1, figsize=(8, 3)) 55 | ax.stem(x, linefmt='k', basefmt='k', 56 | markerfmt='ko', label='True') 57 | ax.stem(x_fista, linefmt='--r', 58 | markerfmt='ro', label='FISTA') 59 | ax.set_title('Model', size=15, fontweight='bold') 60 | ax.legend() 61 | plt.tight_layout() 62 | 63 | 64 | ############################################################################### 65 | # We now consider a more interesting problem problem, *wavelet deconvolution* 66 | # from a signal that we assume being composed by a train of spikes convolved 67 | # with a certain wavelet. We will see how solving such a problem with a 68 | # least-squares solver such as 69 | # :py:class:`pylops_gpu.optimization.leastsquares.RegularizedInversion` does 70 | # not produce the expected results (especially in the presence of noisy data), 71 | # conversely using the :py:class:`pylops_gpu.optimization.sparsity.FISTA` 72 | # solver allows us to succesfully retrieve the input signal even 73 | # in the presence of noise. 74 | 75 | nt = 61 76 | dt = 0.004 77 | t = np.arange(nt)*dt 78 | x = np.zeros(nt, dtype=np.float32) 79 | x[10] = -.4 80 | x[int(nt/2)] = 1 81 | x[nt-20] = 0.5 82 | x = torch.from_numpy(x) 83 | 84 | h, th, hcenter = pylops.utils.wavelets.ricker(t[:101], f0=20) 85 | h = torch.from_numpy(h.astype(np.float32)) 86 | Cop = pylops_gpu.signalprocessing.Convolve1D(nt, h=h, offset=int(hcenter), 87 | dtype=dtype) 88 | y = Cop * x 89 | 90 | xls = pylops_gpu.optimization.cg.cg(Cop, y, niter=10, tol=1e-10)[0] 91 | 92 | xfista = \ 93 | pylops_gpu.optimization.sparsity.FISTA(Cop, y, niter=400, eps=5e-1, 94 | tol=1e-8)[0] 95 | 96 | fig, ax = plt.subplots(1, 1, figsize=(8, 3)) 97 | ax.plot(t, x, 'k', lw=8, label=r'$x$') 98 | ax.plot(t, y, 'r', lw=4, label=r'$y=Ax$') 99 | ax.plot(t, xls, '--g', lw=4, label=r'$x_{LS}$') 100 | ax.plot(t, xfista, '--m', lw=4, label=r'$x_{FISTA}$') 101 | ax.set_title('Deconvolution', fontsize=14, fontweight='bold') 102 | ax.legend() 103 | plt.tight_layout() 104 | -------------------------------------------------------------------------------- /pytests/test_linearoperator.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from numpy.testing import assert_array_equal, assert_array_almost_equal 4 | from pylops_gpu.utils.backend import device 5 | from pylops_gpu.utils.torch2numpy import * 6 | from pylops_gpu import Diagonal, MatrixMult 7 | 8 | par1 = {'ny': 11, 'nx': 11, 9 | 'dtype': torch.float32} # square 10 | par2 = {'ny': 21, 'nx': 11, 11 | 'dtype': torch.float32} # overdetermined 12 | 13 | dev = device() 14 | np.random.seed(0) 15 | torch.manual_seed(0) 16 | 17 | 18 | @pytest.mark.parametrize("par", [(par1)]) 19 | def test_LinearOperator_fromnumpy(par): 20 | """Pass numpy to linear operator in forward and adjoint mode and check that 21 | matvec and rmatvec converts it to numpy 22 | """ 23 | d = torch.arange(0, par['ny'], dtype=par['dtype']).to(dev) 24 | Dop = Diagonal(d) 25 | 26 | x = np.ones(par['ny'], dtype=numpytype_from_torchtype(par['dtype'])) 27 | y = Dop.matvec(x) 28 | xadj = Dop.rmatvec(y) 29 | assert isinstance(y, np.ndarray) 30 | assert isinstance(xadj, np.ndarray) 31 | 32 | y = Dop * x 33 | xadj = Dop.H * x 34 | assert isinstance(y, np.ndarray) 35 | assert isinstance(xadj, np.ndarray) 36 | 37 | 38 | @pytest.mark.parametrize("par", [(par1), (par2)]) 39 | def test_LinearOperator_adjoint(par): 40 | """Adjoint operator 41 | """ 42 | A = np.random.randn(par['ny'], par['nx']).astype(np.float32) 43 | Aop = MatrixMult(torch.from_numpy(A)) 44 | 45 | x = torch.ones(par['ny'], dtype=par['dtype']).to(dev) 46 | y = torch.from_numpy(A).t().matmul(x) 47 | y1 = Aop.rmatvec(x) 48 | y2 = Aop.H * x 49 | assert_array_equal(y.cpu().numpy(), y1.cpu().numpy()) 50 | assert_array_equal(y.cpu().numpy(), y2.cpu().numpy()) 51 | 52 | 53 | @pytest.mark.parametrize("par", [(par1)]) 54 | def test_LinearOperator_sum(par): 55 | """Sum of operators 56 | """ 57 | d = torch.arange(0, par['ny'], dtype=par['dtype']).to(dev) 58 | d1 = torch.arange(10, par['ny'] + 10, dtype=par['dtype']).to(dev) 59 | Dop = Diagonal(d) 60 | D1op = Diagonal(d1) 61 | 62 | x = torch.ones(par['ny'], dtype=par['dtype']).to(dev) 63 | y = (Dop + D1op) * x 64 | y1 = d * x + d1 * x 65 | assert_array_equal(y.cpu().numpy(), y1.cpu().numpy()) 66 | 67 | 68 | @pytest.mark.parametrize("par", [(par1)]) 69 | def test_LinearOperator_prod(par): 70 | """Product of operators 71 | """ 72 | d = torch.arange(0, par['ny'], dtype=par['dtype']).to(dev) 73 | d1 = torch.arange(10, par['ny'] + 10, dtype=par['dtype']).to(dev) 74 | Dop = Diagonal(d) 75 | D1op = Diagonal(d1) 76 | Dprodop = D1op * Dop 77 | 78 | x = torch.ones(par['ny'], dtype=par['dtype']).to(dev) 79 | y = d * d1 * x 80 | y1 = D1op.matvec(Dop.matvec(x)) 81 | y2 = Dprodop * x 82 | assert_array_equal(y.cpu().numpy(), y1.cpu().numpy()) 83 | assert_array_equal(y.cpu().numpy(), y2.cpu().numpy()) 84 | 85 | 86 | @pytest.mark.parametrize("par", [(par1)]) 87 | def test_LinearOperator_power(par): 88 | """Power operators - (Op ** 2) * x == Op * Op * x 89 | """ 90 | d = torch.arange(0, par['ny'], dtype=par['dtype']).to(dev) 91 | Dop = Diagonal(d) 92 | 93 | x = torch.ones(par['ny'], dtype=par['dtype']).to(dev) 94 | y = Dop * (Dop * x) 95 | y1 = (Dop ** 2) * x 96 | assert_array_equal(y.cpu().numpy(), y1.cpu().numpy()) 97 | 98 | 99 | @pytest.mark.parametrize("par", [(par1)]) 100 | def test_LinearOperator_div(par): 101 | """Division / to solve 102 | """ 103 | d = torch.arange(1, par['ny'] + 1, dtype=par['dtype']).to(dev) 104 | Dop = Diagonal(d) 105 | 106 | x = torch.ones(par['ny'], dtype=par['dtype']).to(dev) 107 | y = Dop * x 108 | xinv = Dop / y 109 | assert_array_almost_equal(x.cpu().numpy(), xinv.cpu().numpy(), 110 | decimal=3) 111 | -------------------------------------------------------------------------------- /examples/plot_diagonal.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Diagonal 3 | ======== 4 | This example shows how to use the :py:class:`pylops_gpu.Diagonal` operator 5 | to perform *Element-wise multiplication* between the input vector and a vector :math:`\mathbf{d}`. 6 | 7 | In other words, the operator acts as a diagonal operator :math:`\mathbf{D}` whose elements along 8 | the diagonal are the elements of the vector :math:`\mathbf{d}`. 9 | 10 | """ 11 | import torch 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import matplotlib.gridspec as pltgs 15 | 16 | import pylops_gpu 17 | 18 | plt.close('all') 19 | 20 | ############################################################################### 21 | # Let's define a diagonal operator :math:`\mathbf{d}` with increasing numbers from 22 | # ``0`` to ``N`` and a unitary model :math:`\mathbf{x}`. 23 | N = 10 24 | d = torch.arange(N, dtype=torch.float32) 25 | x = torch.ones(N, dtype=torch.float32) 26 | 27 | Dop = pylops_gpu.Diagonal(d) 28 | 29 | y = Dop * x 30 | y1 = Dop.H * x 31 | 32 | gs = pltgs.GridSpec(1, 6) 33 | fig = plt.figure(figsize=(7, 3)) 34 | ax = plt.subplot(gs[0, 0:3]) 35 | im = ax.imshow(Dop.matrix(), cmap='rainbow', vmin=0, vmax=N) 36 | ax.set_title('A', size=20, fontweight='bold') 37 | ax.set_xticks(np.arange(N-1)+0.5) 38 | ax.set_yticks(np.arange(N-1)+0.5) 39 | ax.grid(linewidth=3, color='white') 40 | ax.xaxis.set_ticklabels([]) 41 | ax.yaxis.set_ticklabels([]) 42 | ax.axis('tight') 43 | ax = plt.subplot(gs[0, 3]) 44 | ax.imshow(x[:, np.newaxis], cmap='rainbow', vmin=0, vmax=N) 45 | ax.set_title('x', size=20, fontweight='bold') 46 | ax.set_xticks([]) 47 | ax.set_yticks(np.arange(N-1)+0.5) 48 | ax.grid(linewidth=3, color='white') 49 | ax.xaxis.set_ticklabels([]) 50 | ax.yaxis.set_ticklabels([]) 51 | ax = plt.subplot(gs[0, 4]) 52 | ax.text(0.35, 0.5, '=', horizontalalignment='center', 53 | verticalalignment='center', size=40, fontweight='bold') 54 | ax.axis('off') 55 | ax = plt.subplot(gs[0, 5]) 56 | ax.imshow(y[:, np.newaxis], cmap='rainbow', vmin=0, vmax=N) 57 | ax.set_title('y', size=20, fontweight='bold') 58 | ax.set_xticks([]) 59 | ax.set_yticks(np.arange(N - 1) + 0.5) 60 | ax.grid(linewidth=3, color='white') 61 | ax.xaxis.set_ticklabels([]) 62 | ax.yaxis.set_ticklabels([]) 63 | fig.colorbar(im, ax=ax, ticks=[0, N], pad=0.3, shrink=0.7) 64 | 65 | 66 | ############################################################################### 67 | # Similarly we can consider the input model as composed of two or more 68 | # dimensions. In this case the diagonal operator can be still applied to 69 | # each element or broadcasted along a specific direction. Let's start with the 70 | # simplest case where each element is multipled by a different value 71 | nx, ny = 3, 5 72 | x = torch.ones((nx, ny), dtype=torch.float32) 73 | print('x =\n%s' % x) 74 | 75 | d = torch.arange(nx*ny, dtype=torch.float32).reshape(nx, ny) 76 | Dop = pylops_gpu.Diagonal(d) 77 | 78 | y = Dop * x.flatten() 79 | y1 = Dop.H * x.flatten() 80 | 81 | print('y = D*x =\n%s' % y.reshape(nx, ny)) 82 | print('xadj = D\'*x =\n%s ' % y1.reshape(nx, ny)) 83 | 84 | ############################################################################### 85 | # And we now broadcast 86 | nx, ny = 3, 5 87 | x = torch.ones((nx, ny), dtype=torch.float32) 88 | print('x =\n%s' % x) 89 | 90 | # 1st dim 91 | d = torch.arange(nx, dtype=torch.float32) 92 | Dop = pylops_gpu.Diagonal(d, dims=(nx, ny), dir=0) 93 | 94 | y = Dop * x.flatten() 95 | y1 = Dop.H * x.flatten() 96 | 97 | print('1st dim: y = D*x =\n%s' % y.reshape(nx, ny)) 98 | print('1st dim: xadj = D\'*x =\n%s ' % y1.reshape(nx, ny)) 99 | 100 | # 2nd dim 101 | d = torch.arange(ny, dtype=torch.float32) 102 | Dop = pylops_gpu.Diagonal(d, dims=(nx, ny), dir=1) 103 | 104 | y = Dop * x.flatten() 105 | y1 = Dop.H * x.flatten() 106 | 107 | print('2nd dim: y = D*x =\n%s' % y.reshape(nx, ny)) 108 | print('2nd dim: xadj = D\'*x =\n%s ' % y1.reshape(nx, ny)) 109 | -------------------------------------------------------------------------------- /tutorials/ad.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 01. Automatic Differentiation 3 | ============================= 4 | This tutorial focuses on one of the two main benefits of re-implementing 5 | some of PyLops linear operators within the PyTorch framework, namely the 6 | possibility to perform Automatic Differentiation (AD) on chains of operators 7 | which can be: 8 | 9 | - native PyTorch mathematical operations (e.g., :func:`torch.log`, 10 | :func:`torch.sin`, :func:`torch.tan`, :func:`torch.pow`, ...) 11 | - neural network operators in :mod:`torch.nn` 12 | - PyLops and/or PyLops-gpu linear operators 13 | 14 | This opens up many opportunities, such as easily including linear regularization 15 | terms to nonlinear cost functions or using linear preconditioners with nonlinear 16 | modelling operators. 17 | 18 | """ 19 | import numpy as np 20 | import torch 21 | import matplotlib.pyplot as plt 22 | from torch.autograd import gradcheck 23 | 24 | import pylops_gpu 25 | from pylops_gpu.utils.backend import device 26 | 27 | dev = device() 28 | plt.close('all') 29 | np.random.seed(10) 30 | torch.manual_seed(10) 31 | 32 | ############################################################################### 33 | # In this example we consider a simple multidimensional functional: 34 | # 35 | # .. math:: 36 | # \mathbf{y} = \mathbf{A} sin(\mathbf{x}) 37 | # 38 | # and we use AD to compute the gradient with respect to the input vector 39 | # evaluated at :math:`\mathbf{x}=\mathbf{x}_0` : 40 | # :math:`\mathbf{g} = d\mathbf{y} / d\mathbf{x} |_{\mathbf{x}=\mathbf{x}_0}`. 41 | # 42 | # Let's start by defining the Jacobian: 43 | # 44 | # .. math:: 45 | # \textbf{J} = \begin{bmatrix} 46 | # dy_1 / dx_1 & ... & dy_1 / dx_M \\ 47 | # ... & ... & ... \\ 48 | # dy_N / dx_1 & ... & dy_N / dx_M 49 | # \end{bmatrix} = \begin{bmatrix} 50 | # a_{11} cos(x_1) & ... & a_{1M} cos(x_M) \\ 51 | # ... & ... & ... \\ 52 | # a_{N1} cos(x_1) & ... & a_{NM} cos(x_M) 53 | # \end{bmatrix} = \textbf{A} cos(\mathbf{x}) 54 | # 55 | # Since both input and output are multidimensional, 56 | # PyTorch ``backward`` actually computes the product between the transposed 57 | # Jacobian and a vector :math:`\mathbf{v}`: 58 | # :math:`\mathbf{g}=\mathbf{J^T} \mathbf{v}`. 59 | # 60 | # To validate the correctness of the AD result, we can in this simple case 61 | # also compute the Jacobian analytically and apply it to the same vector 62 | # :math:`\mathbf{v}` that we have provided to PyTorch ``backward``. 63 | 64 | nx, ny = 10, 6 65 | x0 = torch.arange(nx, dtype=torch.double, requires_grad=True) 66 | 67 | # Forward 68 | A = torch.normal(0., 1., (ny, nx), dtype=torch.double) 69 | Aop = pylops_gpu.TorchOperator(pylops_gpu.MatrixMult(A)) 70 | y = Aop.apply(torch.sin(x0)) 71 | 72 | # AD 73 | v = torch.ones(ny, dtype=torch.double) 74 | y.backward(v, retain_graph=True) 75 | adgrad = x0.grad 76 | 77 | # Analytical 78 | J = (A * torch.cos(x0)) 79 | anagrad = torch.matmul(J.T, v) 80 | 81 | print('Input: ', x0) 82 | print('AD gradient: ', adgrad) 83 | print('Analytical gradient: ', anagrad) 84 | 85 | 86 | ############################################################################### 87 | # Similarly we can use the :func:`torch.autograd.gradcheck` directly from 88 | # PyTorch. Note that doubles must be used for this to succeed with very small 89 | # `eps` and `atol` 90 | input = (torch.arange(nx, dtype=torch.double, requires_grad=True), 91 | Aop.matvec, Aop.rmatvec, Aop.pylops, Aop.device) 92 | test = gradcheck(Aop.Top, input, eps=1e-6, atol=1e-4) 93 | print(test) 94 | 95 | 96 | ############################################################################### 97 | # Note that while matrix-vector multiplication could have been performed using 98 | # the native PyTorch operator :func:`torch.matmul`, in this case we have shown 99 | # that we are also able to use a PyLops-gpu operator wrapped in 100 | # :class:`pylops_gpu.TorchOperator`. As already mentioned, this gives us the 101 | # ability to use much more complex linear operators provided by PyLops within 102 | # a chain of mixed linear and nonlinear AD-enabled operators. 103 | -------------------------------------------------------------------------------- /pytests/test_identity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import torch 3 | 4 | from numpy.testing import assert_array_almost_equal 5 | from pylops_gpu.utils import dottest 6 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 7 | from pylops_gpu.utils.backend import device 8 | from pylops_gpu.utils.complex import * 9 | from pylops_gpu.basicoperators import Identity 10 | from pylops_gpu.optimization.cg import cg 11 | 12 | 13 | par1 = {'ny': 11, 'nx': 11, 'imag': 0, 14 | 'dtype': 'float32'} # square real 15 | par2 = {'ny': 21, 'nx': 11, 'imag': 0, 16 | 'dtype': 'float32'} # overdetermined real 17 | par1j = {'ny': 11, 'nx': 11, 'imag': 1j, 18 | 'dtype': 'float32'} # square complex 19 | par2j = {'ny': 21, 'nx': 11, 'imag': 1j, 20 | 'dtype': 'float32'} # overdetermined complex 21 | par3 = {'ny': 11, 'nx': 21, 'imag': 0, 22 | 'dtype': 'float32'} # underdetermined real 23 | 24 | dev = device() 25 | np.random.seed(0) 26 | torch.manual_seed(0) 27 | 28 | 29 | @pytest.mark.parametrize("par", [(par1), (par2), (par3)])#, (par1j), (par2j)]) 30 | def test_Identity_inplace(par): 31 | """Dot-test, forward and adjoint for Identity operator 32 | """ 33 | Iop = Identity(par['ny'], par['nx'], 34 | complex=True if par['imag'] == 1j else False, 35 | dtype=torchtype_from_numpytype(par['dtype']), 36 | inplace=True) 37 | assert dottest(Iop, par['ny'], par['nx'], 38 | complexflag=0 if par['imag'] == 0 else 3) 39 | 40 | x = np.ones(par['nx'], dtype='float32') + \ 41 | par['imag'] * np.ones(par['nx'], dtype='float32') 42 | if par['imag'] == 0: 43 | x = torch.from_numpy(x).to(dev) 44 | else: 45 | x = complextorch_fromnumpy(x).to(dev) 46 | 47 | y = Iop*x 48 | x1 = Iop.H*y 49 | 50 | if par['imag'] == 0: 51 | x = x.cpu().numpy() 52 | y = y.cpu().numpy() 53 | x1 = x1.cpu().numpy() 54 | else: 55 | x = complexnumpy_fromtorch(x) 56 | y = complexnumpy_fromtorch(y) 57 | x1 = complexnumpy_fromtorch(x1) 58 | 59 | assert_array_almost_equal(x[:min(par['ny'], par['nx'])], 60 | y[:min(par['ny'], par['nx'])], 61 | decimal=4) 62 | assert_array_almost_equal(x[:min(par['ny'], par['nx'])], 63 | x1[:min(par['ny'], par['nx'])], 64 | decimal=4) 65 | 66 | 67 | @pytest.mark.parametrize("par", [(par1), (par2), (par3)]) # (par1j), (par2j), 68 | def test_Identity_noinplace(par): 69 | """Dot-test, forward and adjoint for Identity operator (not in place) 70 | """ 71 | print('complex', True if par['imag'] == 1j else False) 72 | Iop = Identity(par['ny'], par['nx'], 73 | complex=True if par['imag'] == 1j else False, 74 | dtype=torchtype_from_numpytype(par['dtype']), 75 | inplace=False) 76 | assert dottest(Iop, par['ny'], par['nx'], 77 | complexflag=0 if par['imag'] == 0 else 3) 78 | 79 | x = np.ones(par['nx'], dtype='float32') + \ 80 | par['imag'] * np.ones(par['nx'], dtype='float32') 81 | if par['imag'] == 0: 82 | x = torch.from_numpy(x).to(dev) 83 | else: 84 | x = complextorch_fromnumpy(x).to(dev) 85 | y = Iop*x 86 | x1 = Iop.H*y 87 | 88 | if par['imag'] == 0: 89 | x = x.cpu().numpy() 90 | y = y.cpu().numpy() 91 | x1 = x1.cpu().numpy() 92 | else: 93 | x = complexnumpy_fromtorch(x) 94 | y = complexnumpy_fromtorch(y) 95 | x1 = complexnumpy_fromtorch(x1) 96 | 97 | assert_array_almost_equal(x[:min(par['ny'], par['nx'])], 98 | y[:min(par['ny'], par['nx'])], 99 | decimal=4) 100 | assert_array_almost_equal(x[:min(par['ny'], par['nx'])], 101 | x1[:min(par['ny'], par['nx'])], 102 | decimal=4) 103 | 104 | # change value in x and check it doesn't change in y 105 | x[0] = 10 106 | assert x[0] != y[0] 107 | -------------------------------------------------------------------------------- /examples/plot_tvreg.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Total Variation (TV) Regularization 3 | =================================== 4 | This set of examples shows how to add Total Variation (TV) regularization to an 5 | inverse problem in order to enforce blockiness in the reconstructed model. 6 | 7 | To do so we will use the generalizated Split Bregman iterations by means of 8 | :func:`pylops_gpu.optimization.sparsity.SplitBregman` solver. 9 | 10 | The first example is concerned with denoising of a piece-wise step function 11 | which has been contaminated by noise. The forward model is: 12 | 13 | .. math:: 14 | \mathbf{y} = \mathbf{x} + \mathbf{n} 15 | 16 | meaning that we have an identity operator (:math:`\mathbf{I}`) and inverting 17 | for :math:`\mathbf{x}` from :math:`\mathbf{y}` is impossible without adding 18 | prior information. We will enforce blockiness in the solution by adding a 19 | regularization term that enforces sparsity in the first derivative of 20 | the solution: 21 | 22 | .. math:: 23 | J = \mu/2 ||\mathbf{y} - \mathbf{I} \mathbf{x}||_2 + 24 | || \nabla \mathbf{x}||_1 25 | 26 | """ 27 | # sphinx_gallery_thumbnail_number = 3 28 | import torch 29 | import numpy as np 30 | import matplotlib.pyplot as plt 31 | import pylops_gpu 32 | 33 | from pylops_gpu.utils.backend import device 34 | 35 | dev = device() 36 | print('PyLops-gpu working on %s...' % dev) 37 | plt.close('all') 38 | 39 | torch.manual_seed(0) 40 | np.random.seed(1) 41 | dtype = torch.float32 42 | 43 | ############################################################################### 44 | # Let's start by creating the model and data 45 | nx = int(101) 46 | x = torch.zeros(nx, dtype=dtype).to(dev) 47 | x[:nx//2] = 10 48 | x[nx//2:3*nx//4] = -5 49 | Iop = pylops_gpu.Identity(nx, device=dev, dtype=dtype) 50 | noise = torch.from_numpy(np.random.normal(0, 1, nx).astype(np.float32)).to(dev) 51 | 52 | y = Iop * (x + noise) 53 | 54 | plt.figure(figsize=(10, 5)) 55 | plt.plot(x.cpu(), 'k', lw=3, label='x') 56 | plt.plot(y.cpu(), '.k', label='y=x+n') 57 | plt.legend() 58 | plt.title('Model and data') 59 | plt.tight_layout() 60 | 61 | ############################################################################### 62 | # To start we will try to use a simple L2 regularization that enforces 63 | # smoothness in the solution. We can see how denoising is succesfully achieved 64 | # but the solution is much smoother than we wish for. 65 | D2op = pylops_gpu.SecondDerivative(nx, device=dev, dtype=dtype) 66 | lamda = 1e2 67 | 68 | xinv = xinv = \ 69 | pylops_gpu.optimization.leastsquares.NormalEquationsInversion(Op=Iop, 70 | Regs=[D2op], 71 | epsRs=[np.sqrt(lamda/2)], 72 | data=y, 73 | device=dev, 74 | **dict(niter=30)) 75 | 76 | plt.figure(figsize=(10, 5)) 77 | plt.plot(x.cpu(), 'k', lw=3, label='x') 78 | plt.plot(y.cpu(), '.k', label='y=x+n') 79 | plt.plot(xinv.cpu(), 'r', lw=5, label='xinv') 80 | plt.legend() 81 | plt.title('L2 inversion') 82 | plt.tight_layout() 83 | 84 | ############################################################################### 85 | # Now we impose blockiness in the solution using the Split Bregman solver 86 | Dop = pylops_gpu.FirstDerivative(nx, device=dev, dtype=dtype) 87 | mu = 0.01 88 | lamda = 0.3 89 | niter_out = 50 90 | niter_in = 3 91 | 92 | xinv, niter = \ 93 | pylops_gpu.optimization.sparsity.SplitBregman(Iop, [Dop], y, niter_out, 94 | niter_in, mu=mu, epsRL1s=[lamda], 95 | tol=1e-4, tau=1., 96 | **dict(niter=30, epsI=1e-10)) 97 | 98 | plt.figure(figsize=(10, 5)) 99 | plt.plot(x.cpu(), 'k', lw=3, label='x') 100 | plt.plot(y.cpu(), '.k', label='y=x+n') 101 | plt.plot(xinv.cpu(), 'r', lw=5, label='xinv') 102 | plt.legend() 103 | plt.title('TV inversion') 104 | plt.tight_layout() 105 | -------------------------------------------------------------------------------- /pylops_gpu/utils/dottest.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pylops_gpu.utils.complex import complextorch_fromnumpy 5 | 6 | 7 | def dottest(Op, nr, nc, tol=1e-6, dtype=torch.float32, 8 | complexflag=0, device='cpu', raiseerror=True, verb=False): 9 | r"""Dot test. 10 | 11 | Generate random vectors :math:`\mathbf{u}` and :math:`\mathbf{v}` 12 | and perform dot-test to verify the validity of forward and adjoint operators. 13 | This test can help to detect errors in the operator implementation. 14 | 15 | Parameters 16 | ---------- 17 | Op : :obj:`torch.Tensor` 18 | Linear operator to test. 19 | nr : :obj:`int` 20 | Number of rows of operator (i.e., elements in data) 21 | nc : :obj:`int` 22 | Number of columns of operator (i.e., elements in model) 23 | tol : :obj:`float`, optional 24 | Dottest tolerance 25 | dtype : :obj:`torch.dtype`, optional 26 | Type of elements in random vectors 27 | complexflag : :obj:`bool`, optional 28 | generate random vectors with real (0) or complex numbers 29 | (1: only model, 2: only data, 3:both) 30 | device : :obj:`str`, optional 31 | Device to be used 32 | raiseerror : :obj:`bool`, optional 33 | Raise error or simply return ``False`` when dottest fails 34 | verb : :obj:`bool`, optional 35 | Verbosity 36 | 37 | Raises 38 | ------ 39 | ValueError 40 | If dot-test is not verified within chosen tolerance. 41 | 42 | Notes 43 | ----- 44 | A dot-test is mathematical tool used in the development of numerical 45 | linear operators. 46 | 47 | More specifically, a correct implementation of forward and adjoint for 48 | a linear operator should verify the the following *equality* 49 | within a numerical tolerance: 50 | 51 | .. math:: 52 | (\mathbf{Op}*\mathbf{u})^H*\mathbf{v} = 53 | \mathbf{u}^H*(\mathbf{Op}^H*\mathbf{v}) 54 | 55 | """ 56 | np_dtype = torch.ones(1, dtype=torch.float32).numpy().dtype 57 | if complexflag in (0, 2): 58 | u = torch.randn(nc, dtype=dtype) 59 | else: 60 | u = complextorch_fromnumpy(np.random.randn(nc).astype(np_dtype) + 61 | 1j*np.random.randn(nc).astype(np_dtype)) 62 | 63 | if complexflag in (0, 1): 64 | v = torch.randn(nr, dtype=dtype) 65 | else: 66 | v = complextorch_fromnumpy(np.random.randn(nr).astype(np_dtype) + \ 67 | 1j*np.random.randn(nr).astype(np_dtype)) 68 | u, v = u.to(device), v.to(device) 69 | 70 | y = Op.matvec(u) # Op * u 71 | x = Op.rmatvec(v) # Op'* v 72 | 73 | if complexflag == 0: 74 | yy = torch.dot(y, v) # (Op * u)' * v 75 | xx = torch.dot(u, x) # u' * (Op' * v) 76 | else: 77 | yy = np.vdot(y, v) # (Op * u)' * v 78 | xx = np.vdot(u, x) # u' * (Op' * v) 79 | 80 | if complexflag == 0: 81 | if torch.abs((yy-xx)/((yy+xx+1e-15)/2)) < tol: 82 | if verb: print('Dot test passed, v^T(Opu)=%f - u^T(Op^Tv)=%f' 83 | % (yy, xx)) 84 | return True 85 | else: 86 | if raiseerror: 87 | raise ValueError('Dot test failed, v^T(Opu)=%f - u^T(Op^Tv)=%f' 88 | % (yy, xx)) 89 | if verb: print('Dot test failed, v^T(Opu)=%f - u^T(Op^Tv)=%f' 90 | % (yy, xx)) 91 | return False 92 | else: 93 | checkreal = np.abs((np.real(yy) - np.real(xx)) / 94 | ((np.real(yy) + np.real(xx)+1e-15) / 2)) < tol 95 | checkimag = np.abs((np.real(yy) - np.real(xx)) / 96 | ((np.real(yy) + np.real(xx)+1e-15) / 2)) < tol 97 | 98 | if checkreal and checkimag: 99 | if verb: print('Dot test passed, v^T(Opu)=%f - u^T(Op^Tv)=%f' 100 | % (yy, xx)) 101 | return True 102 | else: 103 | if raiseerror: 104 | raise ValueError('Dot test failed, v^H(Opu)=%f - u^H(Op^Hv)=%f' 105 | % (yy, xx)) 106 | if verb: print('Dot test failed, v^H(Opu)=%f - u^H(Op^Hv)=%f' 107 | % (yy, xx)) 108 | return False 109 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/Restriction.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pylops_gpu.LinearOperator import LinearOperator 5 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 6 | 7 | 8 | class Restriction(LinearOperator): 9 | r"""Restriction (or sampling) operator. 10 | 11 | Extract subset of values from input vector at locations ``iava`` 12 | in forward mode and place those values at locations ``iava`` 13 | in an otherwise zero vector in adjoint mode. 14 | 15 | Parameters 16 | ---------- 17 | M : :obj:`int` 18 | Number of samples in model. 19 | iava : :obj:`list` or :obj:`numpy.ndarray` 20 | Integer indices of available samples for data selection. 21 | dims : :obj:`list` 22 | Number of samples for each dimension 23 | (``None`` if only one dimension is available) 24 | dir : :obj:`int`, optional 25 | Direction along which restriction is applied. 26 | inplace : :obj:`bool`, optional 27 | Work inplace (``True``) or make a new copy (``False``). By default, 28 | data is a reference to the model (in forward) and model is a reference 29 | to the data (in adjoint). 30 | device : :obj:`str`, optional 31 | Device to be used 32 | togpu : :obj:`tuple`, optional 33 | Move model and data from cpu to gpu prior to applying ``matvec`` and 34 | ``rmatvec``, respectively (only when ``device='gpu'``) 35 | tocpu : :obj:`tuple`, optional 36 | Move data and model from gpu to cpu after applying ``matvec`` and 37 | ``rmatvec``, respectively (only when ``device='gpu'``) 38 | dtype : :obj:`torch.dtype`, optional 39 | Type of elements in input array. 40 | 41 | 42 | Attributes 43 | ---------- 44 | shape : :obj:`tuple` 45 | Operator shape 46 | explicit : :obj:`bool` 47 | Operator contains a matrix that can be solved 48 | explicitly (``True``) or not (``False``) 49 | 50 | Notes 51 | ----- 52 | Refer to :class:`pylops.basicoperators.Restriction` for 53 | implementation details. 54 | 55 | """ 56 | def __init__(self, M, iava, dims=None, dir=0, inplace=True, 57 | device='cpu', togpu=(False, False), tocpu=(False, False), 58 | dtype=torch.float32): 59 | self.M = M 60 | self.dir = dir 61 | self.iava = iava 62 | if dims is None: 63 | self.N = len(iava) 64 | self.dims = (self.M, ) 65 | self.reshape = False 66 | else: 67 | if np.prod(dims) != self.M: 68 | raise ValueError('product of dims must equal M!') 69 | else: 70 | self.dims = dims # model dimensions 71 | self.dimsd = list(dims) # data dimensions 72 | self.dimsd[self.dir] = len(iava) 73 | self.iavareshape = [1] * self.dir + [len(self.iava)] + \ 74 | [1] * (len(self.dims) - self.dir - 1) 75 | self.N = np.prod(self.dimsd) 76 | self.reshape = True 77 | self.inplace = inplace 78 | self.shape = (self.N, self.M) 79 | self.device = device 80 | self.togpu = togpu 81 | self.tocpu = tocpu 82 | self.dtype = torchtype_from_numpytype(dtype) 83 | self.explicit = True 84 | self.Op = None 85 | 86 | def _matvec(self, x): 87 | if not self.inplace: 88 | x = x.copy() 89 | if not self.reshape: 90 | y = x[self.iava] 91 | else: 92 | raise NotImplementedError('Restriction currently works only on ' 93 | '1d arrays') 94 | # x = torch.reshape(x, self.dims) 95 | # y = torch.take(x, self.iava, axis=self.dir) 96 | # y = y.view(-1) 97 | return y 98 | 99 | def _rmatvec(self, x): 100 | if not self.inplace: 101 | x = x.copy() 102 | if not self.reshape: 103 | y = torch.zeros(self.dims, dtype=self.dtype).to(self.device) 104 | y[self.iava] = x 105 | else: 106 | raise NotImplementedError('Restriction currently works only on ' 107 | '1d arrays') 108 | # x = torch.reshape(x, self.dimsd) 109 | # y = torch.zeros(self.dims, dtype=self.dtype) 110 | # torch.put_along_axis(y, torch.reshape(self.iava, self.iavareshape), 111 | # x, axis=self.dir) 112 | # y = y.view(-1) 113 | return y 114 | -------------------------------------------------------------------------------- /pylops_gpu/optimization/cg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pytorch_complex_tensor import ComplexTensor 4 | from pylops_gpu.utils.complex import divide 5 | #from pylops_gpu import LinearOperator, aslinearoperator 6 | 7 | 8 | def cg(A, y, x=None, niter=10, tol=1e-10): 9 | r"""Conjugate gradient 10 | 11 | Solve a system of equations given the square operator ``A`` and data ``y`` 12 | using conjugate gradient iterations. 13 | 14 | Parameters 15 | ---------- 16 | A : :obj:`pylops_gpu.LinearOperator` 17 | Operator to invert of size :math:`[N \times N]` 18 | y : :obj:`torch.Tensor` 19 | Data of size :math:`[N \times 1]` 20 | x0 : :obj:`torch.Tensor`, optional 21 | Initial guess 22 | niter : :obj:`int`, optional 23 | Number of iterations 24 | tol : :obj:`int`, optional 25 | Residual norm tolerance 26 | 27 | Returns 28 | ------- 29 | x : :obj:`torch.Tensor` 30 | Estimated model 31 | iiter : :obj:`torch.Tensor` 32 | Max number of iterations model 33 | 34 | """ 35 | complex_problem = True if isinstance(y, ComplexTensor) else False 36 | #if not isinstance(A, LinearOperator): 37 | # A = aslinearoperator(A) 38 | if x is None: 39 | if complex_problem: 40 | x = ComplexTensor(torch.zeros((2 * y.shape[-1], 1), 41 | dtype=y.dtype)).t() 42 | else: 43 | x = torch.zeros_like(y) 44 | r = y - A.matvec(x) 45 | c = r.clone() 46 | if complex_problem: 47 | c = ComplexTensor(c) 48 | kold = torch.sum(r * r) 49 | 50 | iiter = 0 51 | while iiter < niter and torch.abs(kold) > tol: 52 | Ac = A.matvec(c) 53 | cAc = (c * Ac).sum() if complex_problem else torch.sum(c * Ac) 54 | a = divide(kold, cAc) if complex_problem else kold / cAc 55 | x += a * c 56 | r -= a * Ac 57 | k = torch.sum(r * r) 58 | b = k / kold 59 | c = r + b * c 60 | kold = k 61 | iiter += 1 62 | return x, iiter 63 | 64 | 65 | def cgls(A, y, x=None, niter=10, damp=0., tol=1e-10): 66 | r"""Conjugate gradient least squares 67 | 68 | Solve an overdetermined system of equations given an operator ``A`` and 69 | data ``y`` using conjugate gradient iterations. 70 | 71 | Parameters 72 | ---------- 73 | A : :obj:`pylops_gpu.LinearOperator` 74 | Operator to invert of size :math:`[N \times M]` 75 | y : :obj:`torch.Tensor` 76 | Data of size :math:`[N \times 1]` 77 | x0 : :obj:`torch.Tensor`, optional 78 | Initial guess 79 | niter : :obj:`int`, optional 80 | Number of iterations 81 | damp : :obj:`float`, optional 82 | Damping coefficient 83 | tol : :obj:`int`, optional 84 | Residual norm tolerance 85 | 86 | Returns 87 | ------- 88 | x : :obj:`torch.Tensor` 89 | Estimated model 90 | iiter : :obj:`torch.Tensor` 91 | Max number of iterations model 92 | 93 | Notes 94 | ----- 95 | Minimize the following functional using conjugate gradient 96 | iterations: 97 | 98 | .. math:: 99 | J = || \mathbf{y} - \mathbf{Ax} ||^2 + \epsilon || \mathbf{x} ||^2 100 | 101 | where :math:`\epsilon` is the damping coefficient. 102 | """ 103 | # naive approach ## 104 | # Op = A.H * A 105 | # y = A.H * y 106 | # return cg(Op, y, x=x, niter=niter, tol=tol) 107 | 108 | complex_problem = True if isinstance(y, ComplexTensor) else False 109 | # if not isinstance(A, LinearOperator): 110 | # A = aslinearoperator(A) 111 | if x is None: 112 | if complex_problem: 113 | x = ComplexTensor(torch.zeros((2 * A.shape[1], 1), 114 | dtype=y.dtype)).t() 115 | else: 116 | x = torch.zeros(A.shape[1], dtype=y.dtype) 117 | s = y - A.matvec(x) 118 | r = A.rmatvec(s) - damp * x 119 | c = r.clone() 120 | if complex_problem: 121 | c = ComplexTensor(c) 122 | kold = torch.sum(r * r) 123 | q = A.matvec(c) 124 | iiter = 0 125 | while iiter < niter and torch.abs(kold) > tol: 126 | qq = (q * q).sum() 127 | a = divide(kold, qq) if complex_problem else kold / qq 128 | x += a * c 129 | s -= a * q 130 | r = A.rmatvec(s) - damp * x 131 | k = torch.sum(r * r) if complex_problem else torch.sum(r * r) 132 | b = k / kold 133 | c = r + b * c 134 | q = A.matvec(c) 135 | kold = k 136 | iiter += 1 137 | return x, iiter 138 | -------------------------------------------------------------------------------- /pylops_gpu/TorchOperator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from torch.utils.dlpack import from_dlpack, to_dlpack 4 | from pylops.utils import deps 5 | 6 | if deps.cupy_enabled: 7 | import cupy as cp 8 | else: 9 | cp = None 10 | 11 | 12 | class _TorchOperator(torch.autograd.Function): 13 | """Wrapper class for PyLops operators into Torch functions 14 | 15 | The flag pylops is used to discriminate pylops and pylops-gpu operators; 16 | the former one requires the input to be converted into numpy.ndarray and 17 | the output to be converted back to torch.Tensor 18 | 19 | """ 20 | @staticmethod 21 | def forward(ctx, x, forw, adj, pylops, device): 22 | ctx.forw = forw 23 | ctx.adj = adj 24 | ctx.pylops = pylops 25 | ctx.device = device 26 | 27 | # prepare input 28 | if ctx.pylops: 29 | if ctx.device == 'cpu': 30 | # bring x to cpu and numpy 31 | x = x.cpu().detach().numpy() 32 | else: 33 | # pass x to cupy using DLPack 34 | x = cp.fromDlpack(to_dlpack(x)) 35 | 36 | # apply forward operator 37 | y = ctx.forw(x) 38 | 39 | # prepare output 40 | if ctx.pylops: 41 | if ctx.device == 'cpu': 42 | # move y to torch and device 43 | y = torch.from_numpy(y) 44 | else: 45 | # move y to torch and device 46 | y = from_dlpack(y.toDlpack()) 47 | return y 48 | 49 | @staticmethod 50 | def backward(ctx, y): 51 | # prepare input 52 | if ctx.pylops: 53 | if ctx.device == 'cpu': 54 | y = y.cpu().detach().numpy() 55 | else: 56 | # pass x to cupy using DLPack 57 | y = cp.fromDlpack(to_dlpack(y)) 58 | 59 | # apply adjoint operator 60 | x = ctx.adj(y) 61 | 62 | # prepare output 63 | if ctx.pylops: 64 | if ctx.device == 'cpu': 65 | x = torch.from_numpy(x) 66 | else: 67 | x = from_dlpack(x.toDlpack()) 68 | return x, None, None, None, None 69 | 70 | 71 | class TorchOperator(): 72 | """Wrap a PyLops operator into a Torch function. 73 | 74 | This class can be used to wrap a pylops (or pylops-gpu) operator into a 75 | torch function. Doing so, users can mix native torch functions (e.g. 76 | basic linear algebra operations, neural networks, etc.) and pylops 77 | operators. 78 | 79 | Since all operators in PyLops are linear operators, a Torch function is 80 | simply implemented by using the forward operator for its forward pass 81 | and the adjont operator for its backward (gradient) pass. 82 | 83 | Parameters 84 | ---------- 85 | Op : :obj:`pylops_gpu.LinearOperator` or :obj:`pylops.LinearOperator` 86 | PyLops operator 87 | batch : :obj:`bool`, optional 88 | Input has single sample (``False``) or batch of samples (``True``). 89 | If ``batch==False`` the input must be a 1-d Torch tensor, 90 | if `batch==False`` the input must be a 2-d Torch tensor with 91 | batches along the first dimension 92 | pylops : :obj:`bool`, optional 93 | ``Op`` is a pylops operator (``True``) or a pylops-gpu 94 | operator (``False``) 95 | device : :obj:`str`, optional 96 | Device to be used for output vectors when ``Op`` is a pylops operator 97 | 98 | Returns 99 | ------- 100 | y : :obj:`torch.Tensor` 101 | Output array resulting from the application of the operator to ``x``. 102 | 103 | """ 104 | def __init__(self, Op, batch=False, pylops=False, device='cpu'): 105 | self.pylops = pylops 106 | self.device = device 107 | if not batch: 108 | self.matvec = Op.matvec 109 | self.rmatvec = Op.rmatvec 110 | else: 111 | self.matvec = lambda x: Op.matmat(x, kfirst=True) 112 | self.rmatvec = lambda x: Op.rmatmat(x, kfirst=True) 113 | self.Top = _TorchOperator.apply 114 | 115 | def apply(self, x): 116 | """Apply forward pass to input vector 117 | 118 | Parameters 119 | ---------- 120 | x : :obj:`torch.Tensor` 121 | Input array 122 | 123 | Returns 124 | ------- 125 | y : :obj:`torch.Tensor` 126 | Output array resulting from the application of the operator to ``x``. 127 | 128 | """ 129 | return self.Top(x, self.matvec, self.rmatvec, 130 | self.pylops, self.device) 131 | -------------------------------------------------------------------------------- /pylops_gpu/utils/complex.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from pytorch_complex_tensor import ComplexTensor 4 | from pytorch_complex_tensor.complex_scalar import ComplexScalar 5 | 6 | 7 | def complextorch_fromnumpy(x): 8 | r"""Convert complex numpy array into torch ComplexTensor 9 | 10 | Parameters 11 | ---------- 12 | x : :obj:`numpy.ndarray` 13 | Numpy complex multi-dimensional array 14 | 15 | Returns 16 | ------- 17 | xt : :obj:`pytorch_complex_tensor.ComplexTensor` 18 | Torch ComplexTensor multi-dimensional array 19 | 20 | """ 21 | xt = ComplexTensor(np.vstack((np.real(x), np.imag(x)))) 22 | return xt 23 | 24 | 25 | def complexnumpy_fromtorch(xt): 26 | r"""Convert torch ComplexTensor into complex numpy array 27 | 28 | Parameters 29 | ---------- 30 | xt : :obj:`pytorch_complex_tensor.ComplexTensor` 31 | Torch ComplexTensor 32 | 33 | Returns 34 | ------- 35 | x : :obj:`numpy.ndarray` 36 | Numpy complex multi-dimensional array 37 | 38 | """ 39 | x = xt.numpy() 40 | xrows = x.shape[0] 41 | x = x[:xrows//2] + 1j*x[xrows//2:] 42 | return x.squeeze() 43 | 44 | 45 | def complexscalar_fromtorchscalar(xt): 46 | r"""Convert torch ComplexScalar into complex number 47 | 48 | Parameters 49 | ---------- 50 | xt : :obj:`pytorch_complex_scalar.ComplexScalar` 51 | Torch ComplexScalar 52 | 53 | Returns 54 | ------- 55 | x : :obj:`complex` 56 | Complex scalar 57 | 58 | """ 59 | x = xt.real.item() +1j*xt.imag.item() 60 | return x 61 | 62 | 63 | def conj(x): 64 | r"""Apply complex conjugation to torch ComplexTensor 65 | 66 | Parameters 67 | ---------- 68 | x : :obj:`pytorch_complex_tensor.ComplexTensor` 69 | Torch ComplexTensor 70 | 71 | Returns 72 | ------- 73 | x : :obj:`pytorch_complex_tensor.ComplexTensor` 74 | Complex conjugated Torch ComplexTensor 75 | 76 | """ 77 | xc = x.__graph_copy__(x.real, -x.imag) 78 | return xc 79 | 80 | 81 | def divide(x, y): 82 | r"""Element-wise division of torch Tensor and torch ComplexTensor. 83 | 84 | Divide each element of ``x`` and ``y``, where one or both of them 85 | can contain complex numbers. 86 | 87 | Parameters 88 | ---------- 89 | x : :obj:`pytorch_complex_tensor.ComplexTensor` or :obj:`torch.Tensor` 90 | Numerator 91 | y : :obj:`pytorch_complex_tensor.ComplexTensor` 92 | Denominator 93 | 94 | Returns 95 | ------- 96 | div : :obj:`pytorch_complex_tensor.ComplexTensor` 97 | Complex conjugated Torch ComplexTensor 98 | 99 | """ 100 | # convert to numpy 101 | if isinstance(x, ComplexTensor): 102 | xn = complexnumpy_fromtorch(x) 103 | elif isinstance(x, ComplexScalar): 104 | xn = complexscalar_fromtorchscalar(x) 105 | else: 106 | xn = x.cpu().numpy() 107 | if isinstance(y, ComplexTensor): 108 | yn = complexnumpy_fromtorch(y) 109 | elif isinstance(y, ComplexScalar): 110 | yn = complexscalar_fromtorchscalar(y) 111 | else: 112 | yn = y.cpu().numpy() 113 | # divide 114 | divn = xn / yn 115 | # convert back to torch 116 | if divn.size == 1: 117 | divn = divn.item() 118 | else: 119 | if np.iscomplexobj(divn): 120 | divn = complextorch_fromnumpy(divn) 121 | else: 122 | divn = torch.from_numpy(divn) 123 | return divn 124 | 125 | 126 | def reshape(x, shape): 127 | r"""Reshape torch ComplexTensor 128 | 129 | Parameters 130 | ---------- 131 | x : :obj:`pytorch_complex_tensor.ComplexTensor` 132 | Torch ComplexTensor 133 | shape : :obj:`tuple` 134 | New shape 135 | 136 | Returns 137 | ------- 138 | xreshaped : :obj:`pytorch_complex_tensor.ComplexTensor` 139 | Reshaped Torch ComplexTensor 140 | 141 | """ 142 | xreshaped = x.reshape([2] + list(shape)) 143 | xreshaped = ComplexTensor(np.vstack((xreshaped[0], xreshaped[1]))) 144 | return xreshaped 145 | 146 | 147 | def flatten(x): 148 | r"""Flatten torch ComplexTensor 149 | 150 | Parameters 151 | ---------- 152 | x : :obj:`pytorch_complex_tensor.ComplexTensor` 153 | Torch ComplexTensor 154 | 155 | Returns 156 | ------- 157 | xflattened : :obj:`pytorch_complex_tensor.ComplexTensor` 158 | Flattened Torch ComplexTensor 159 | 160 | """ 161 | xflattened = ComplexTensor(np.vstack((x.real.view(-1), 162 | x.imag.view(-1)))) 163 | return xflattened 164 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/Identity.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pytorch_complex_tensor import ComplexTensor 5 | from pylops_gpu import LinearOperator 6 | from pylops_gpu.utils.torch2numpy import numpytype_from_torchtype 7 | from pylops_gpu.utils.complex import complextorch_fromnumpy 8 | 9 | _complextypes = (torch.complex32, torch.complex64, torch.complex128) 10 | 11 | 12 | class Identity(LinearOperator): 13 | r"""Identity operator. 14 | 15 | Simply move model to data in forward model and viceversa in adjoint mode if 16 | :math:`M = N`. If :math:`M > N` removes last :math:`M - N` elements from 17 | model in forward and pads with :math:`0` in adjoint. If :math:`N > M` 18 | removes last :math:`N - M` elements from data in adjoint and pads with 19 | :math:`0` in forward. 20 | 21 | Parameters 22 | ---------- 23 | N : :obj:`int` 24 | Number of samples in data (and model, if ``M`` is not provided). 25 | M : :obj:`int`, optional 26 | Number of samples in model. 27 | inplace : :obj:`bool`, optional 28 | Work inplace (``True``) or make a new copy (``False``). By default, 29 | data is a reference to the model (in forward) and model is a reference 30 | to the data (in adjoint). 31 | complex : :obj:`bool`, optional 32 | Input model and data are complex arrays 33 | device : :obj:`str`, optional 34 | Device to be used 35 | togpu : :obj:`tuple`, optional 36 | Move model and data from cpu to gpu prior to applying ``matvec`` and 37 | ``rmatvec``, respectively (only when ``device='gpu'``) 38 | tocpu : :obj:`tuple`, optional 39 | Move data and model from gpu to cpu after applying ``matvec`` and 40 | ``rmatvec``, respectively (only when ``device='gpu'``) 41 | dtype : :obj:`torch.dtype`, optional 42 | Type of elements in input array (if ``complex=True``, provide the 43 | type of the real component of the array) 44 | 45 | Attributes 46 | ---------- 47 | shape : :obj:`tuple` 48 | Operator shape 49 | explicit : :obj:`bool` 50 | Operator contains a matrix that can be solved explicitly (``True``) or 51 | not (``False``) 52 | 53 | Notes 54 | ----- 55 | Refer to :class:`pylops.basicoperators.Identity` for implementation 56 | details. 57 | 58 | """ 59 | def __init__(self, N, M=None, inplace=True, complex=False, device='cpu', 60 | togpu=(False, False), tocpu=(False, False), 61 | dtype=torch.float32): 62 | M = N if M is None else M 63 | self.inplace = inplace 64 | self.shape = (N, M) 65 | self.device = device 66 | self.togpu = togpu 67 | self.tocpu = tocpu 68 | self.dtype = dtype 69 | self.npdtype = numpytype_from_torchtype(self.dtype) 70 | self.complex = complex 71 | self.explicit = False 72 | self.Op = None 73 | 74 | def _matvec(self, x): 75 | if not self.inplace: 76 | if self.complex: 77 | x = x.__graph_copy__(x.real, x.imag) 78 | else: 79 | x = x.clone() 80 | if self.shape[0] == self.shape[1]: 81 | y = x 82 | elif self.shape[0] < self.shape[1]: 83 | if self.complex: 84 | y = x[:, :self.shape[0]] 85 | else: 86 | y = x[:self.shape[0]] 87 | else: 88 | if self.complex: 89 | y = complextorch_fromnumpy(np.zeros(self.shape[0], 90 | dtype=self.npdtype)) 91 | y[:, :self.shape[1]] = x 92 | else: 93 | y = torch.zeros(self.shape[0], dtype=self.dtype) 94 | y[:self.shape[1]] = x 95 | return y 96 | 97 | def _rmatvec(self, x): 98 | if not self.inplace: 99 | if self.complex: 100 | x = x.__graph_copy__(x.real, x.imag) 101 | else: 102 | x = x.clone() 103 | if self.shape[0] == self.shape[1]: 104 | y = x 105 | elif self.shape[0] < self.shape[1]: 106 | if self.complex: 107 | y = complextorch_fromnumpy(np.zeros(self.shape[1], 108 | dtype=self.npdtype)) 109 | y[:, :self.shape[0]] = x 110 | else: 111 | y = torch.zeros(self.shape[1], dtype=self.dtype) 112 | y[:self.shape[0]] = x 113 | else: 114 | if self.complex: 115 | y = x[:, :self.shape[1]] 116 | else: 117 | y = x[:self.shape[1]] 118 | return y 119 | -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/Diagonal.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pytorch_complex_tensor import ComplexTensor 5 | from pylops_gpu import LinearOperator 6 | from pylops_gpu.utils.complex import conj, flatten, reshape, \ 7 | complextorch_fromnumpy 8 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 9 | 10 | 11 | class Diagonal(LinearOperator): 12 | r"""Diagonal operator. 13 | 14 | Applies element-wise multiplication of the input vector with the vector 15 | ``diag`` in forward and with its complex conjugate in adjoint mode. 16 | 17 | This operator can also broadcast; in this case the input vector is 18 | reshaped into its dimensions ``dims`` and the element-wise multiplication 19 | with ``diag`` is perfomed on the direction ``dir``. Note that the 20 | vector ``diag`` will need to have size equal to ``dims[dir]``. 21 | 22 | Parameters 23 | ---------- 24 | diag : :obj:`numpy.ndarray` or :obj:`torch.Tensor` or :obj:`pytorch_complex_tensor.ComplexTensor` 25 | Vector to be used for element-wise multiplication. 26 | dims : :obj:`list`, optional 27 | Number of samples for each dimension 28 | (``None`` if only one dimension is available) 29 | dir : :obj:`int`, optional 30 | Direction along which multiplication is applied. 31 | device : :obj:`str`, optional 32 | Device to be used 33 | togpu : :obj:`tuple`, optional 34 | Move model and data from cpu to gpu prior to applying ``matvec`` and 35 | ``rmatvec``, respectively (only when ``device='gpu'``) 36 | tocpu : :obj:`tuple`, optional 37 | Move data and model from gpu to cpu after applying ``matvec`` and 38 | ``rmatvec``, respectively (only when ``device='gpu'``) 39 | dtype : :obj:`torch.dtype`, optional 40 | Type of elements in input array. 41 | 42 | Attributes 43 | ---------- 44 | shape : :obj:`tuple` 45 | Operator shape 46 | explicit : :obj:`bool` 47 | Operator contains a matrix that can be solved explicitly (``True``) or 48 | not (``False``) 49 | 50 | Notes 51 | ----- 52 | Refer to :class:`pylops.basicoperators.Diagonal` for implementation 53 | details. 54 | 55 | """ 56 | def __init__(self, diag, dims=None, dir=0, device='cpu', 57 | togpu=(False, False), tocpu=(False, False), 58 | dtype=torch.float32): 59 | if not isinstance(diag, (torch.Tensor, ComplexTensor)): 60 | self.complex = True if np.iscomplexobj(diag) else False 61 | self.diag = \ 62 | complextorch_fromnumpy(diag.flatten()) if self.complex \ 63 | else torch.from_numpy(diag.flatten()) 64 | else: 65 | self.complex = True if isinstance(diag, ComplexTensor) else False 66 | self.diag = flatten(diag) if self.complex else diag.flatten() 67 | if dims is None: 68 | self.shape = (len(self.diag), len(self.diag)) 69 | self.dims = None 70 | self.reshape = False 71 | else: 72 | diagdims = [1] * len(dims) 73 | diagdims[dir] = dims[dir] 74 | self.diag = reshape(self.diag, diagdims) if self.complex \ 75 | else self.diag.reshape(diagdims) 76 | self.shape = (np.prod(dims), np.prod(dims)) 77 | self.dims = dims 78 | self.reshape = True 79 | self.device = device 80 | self.togpu = togpu 81 | self.tocpu = tocpu 82 | self.dtype = torchtype_from_numpytype(dtype) 83 | self.explicit = False 84 | self.Op = None 85 | 86 | def _matvec(self, x): 87 | if not self.reshape: 88 | y = self.diag * x 89 | else: 90 | if self.complex: 91 | x = reshape(x, self.dims) 92 | y = flatten(self.diag * x) 93 | else: 94 | x = x.reshape(self.dims) 95 | y = (self.diag * x).view(-1) 96 | return y 97 | 98 | def _rmatvec(self, x): 99 | if self.complex: 100 | diagadj = conj(self.diag) 101 | else: 102 | diagadj = self.diag 103 | if not self.reshape: 104 | y = diagadj * x 105 | else: 106 | if self.complex: 107 | x = reshape(x, self.dims) 108 | y = flatten(diagadj * x) 109 | else: 110 | x = x.reshape(self.dims) 111 | y = (diagadj * x).view(-1) 112 | return y 113 | 114 | def matrix(self): 115 | """Return diagonal matrix as dense :obj:`torch.Tensor` 116 | 117 | Returns 118 | ---------- 119 | densemat : :obj:`torch.Tensor` 120 | Dense matrix. 121 | 122 | """ 123 | densemat = torch.diag(self.diag.squeeze()) 124 | return densemat 125 | -------------------------------------------------------------------------------- /examples/plot_derivative.py: -------------------------------------------------------------------------------- 1 | """ 2 | Derivatives 3 | =========== 4 | This example shows how to use the suite of derivative operators, namely 5 | :py:class:`pylops_gpu.FirstDerivative`, :py:class:`pylops_gpu.SecondDerivative` 6 | and :py:class:`pylops_gpu.Laplacian`. 7 | 8 | The derivative operators are very useful when the model to be inverted for 9 | is expect to be smooth in one or more directions. These operators 10 | can in fact be used as part of the regularization term to obtain a smooth 11 | solution. 12 | """ 13 | import torch 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | import pylops_gpu 17 | 18 | from pylops_gpu.utils.backend import device 19 | 20 | dev = device() 21 | print('PyLops-gpu working on %s...' % dev) 22 | plt.close('all') 23 | 24 | 25 | ############################################################################### 26 | # Let's start by looking at a simple first-order centered derivative. We 27 | # compute it by means of the :py:class:`pylops_gpu.FirstDerivative` operator. 28 | nx = 10 29 | x = torch.zeros(nx, dtype=torch.float32) 30 | x[int(nx/2)] = 1 31 | 32 | D1op = pylops_gpu.FirstDerivative(nx, dtype=torch.float32) 33 | 34 | y_lop = D1op*x 35 | xadj_lop = D1op.H*y_lop 36 | 37 | fig, axs = plt.subplots(3, 1, figsize=(13, 8)) 38 | axs[0].stem(np.arange(nx), x, basefmt='k', linefmt='k', 39 | markerfmt='ko', use_line_collection=True) 40 | axs[0].set_title('Input', size=20, fontweight='bold') 41 | axs[1].stem(np.arange(nx), y_lop, basefmt='k', linefmt='k', 42 | markerfmt='ko', use_line_collection=True) 43 | axs[1].set_title('Forward', size=20, fontweight='bold') 44 | axs[2].stem(np.arange(nx), xadj_lop, basefmt='k', linefmt='k', 45 | markerfmt='ko', use_line_collection=True) 46 | axs[2].set_title('Adjoint', size=20, fontweight='bold') 47 | plt.tight_layout() 48 | 49 | ############################################# 50 | # Let's move onto applying the same first derivative to a 2d array in 51 | # the first direction 52 | nx, ny = 11, 21 53 | A = torch.zeros((nx, ny), dtype=torch.float32) 54 | A[nx//2, ny//2] = 1. 55 | 56 | D1op = pylops_gpu.FirstDerivative(nx * ny, dims=(nx, ny), 57 | dir=0, dtype=torch.float32) 58 | B = torch.reshape(D1op * A.flatten(), (nx, ny)) 59 | 60 | fig, axs = plt.subplots(1, 2, figsize=(10, 3)) 61 | fig.suptitle('First Derivative in 1st direction', fontsize=12, 62 | fontweight='bold', y=0.95) 63 | im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow') 64 | axs[0].axis('tight') 65 | axs[0].set_title('x') 66 | plt.colorbar(im, ax=axs[0]) 67 | im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow') 68 | axs[1].axis('tight') 69 | axs[1].set_title('y') 70 | plt.colorbar(im, ax=axs[1]) 71 | plt.tight_layout() 72 | plt.subplots_adjust(top=0.8) 73 | 74 | ############################################################################### 75 | # We can now do the same for the second derivative 76 | A = torch.zeros((nx, ny), dtype=torch.float32) 77 | A[nx//2, ny//2] = 1. 78 | 79 | D2op = pylops_gpu.SecondDerivative(nx * ny, dims=(nx, ny), 80 | dir=0, dtype=torch.float32) 81 | B = torch.reshape(D2op * A.flatten(), (nx, ny)) 82 | 83 | fig, axs = plt.subplots(1, 2, figsize=(10, 3)) 84 | fig.suptitle('Second Derivative in 1st direction', fontsize=12, 85 | fontweight='bold', y=0.95) 86 | im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow') 87 | axs[0].axis('tight') 88 | axs[0].set_title('x') 89 | plt.colorbar(im, ax=axs[0]) 90 | im = axs[1].imshow(B, interpolation='nearest', cmap='rainbow') 91 | axs[1].axis('tight') 92 | axs[1].set_title('y') 93 | plt.colorbar(im, ax=axs[1]) 94 | plt.tight_layout() 95 | plt.subplots_adjust(top=0.8) 96 | 97 | 98 | ############################################################################### 99 | # And finally we use the symmetrical Laplacian operator as well 100 | # as a asymmetrical version of it (by adding more weight to the 101 | # derivative along one direction) 102 | 103 | # symmetrical 104 | L2symop = pylops_gpu.Laplacian(dims=(nx, ny), weights=(1, 1), 105 | dtype=torch.float32) 106 | 107 | # asymmetrical 108 | L2asymop = pylops_gpu.Laplacian(dims=(nx, ny), weights=(3, 1), 109 | dtype=torch.float32) 110 | 111 | Bsym = torch.reshape(L2symop * A.flatten(), (nx, ny)) 112 | Basym = torch.reshape(L2asymop * A.flatten(), (nx, ny)) 113 | 114 | fig, axs = plt.subplots(1, 3, figsize=(10, 3)) 115 | fig.suptitle('Laplacian', fontsize=12, 116 | fontweight='bold', y=0.95) 117 | im = axs[0].imshow(A, interpolation='nearest', cmap='rainbow') 118 | axs[0].axis('tight') 119 | axs[0].set_title('x') 120 | plt.colorbar(im, ax=axs[0]) 121 | im = axs[1].imshow(Bsym, interpolation='nearest', cmap='rainbow') 122 | axs[1].axis('tight') 123 | axs[1].set_title('y sym') 124 | plt.colorbar(im, ax=axs[1]) 125 | im = axs[2].imshow(Basym, interpolation='nearest', cmap='rainbow') 126 | axs[2].axis('tight') 127 | axs[2].set_title('y asym') 128 | plt.colorbar(im, ax=axs[2]) 129 | plt.tight_layout() 130 | plt.subplots_adjust(top=0.8) 131 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | import os 4 | import datetime 5 | import sphinx_rtd_theme 6 | import sphinx_gallery 7 | from sphinx_gallery.sorting import ExampleTitleSortKey 8 | from pylops_gpu import __version__ 9 | 10 | # Sphinx needs to be able to import the package to use autodoc and get the version number 11 | sys.path.insert(0, os.path.abspath('../../pylops_gpu')) 12 | 13 | extensions = [ 14 | 'sphinx.ext.autodoc', 15 | 'sphinx.ext.autosummary', 16 | 'sphinx.ext.coverage', 17 | 'sphinx.ext.mathjax', 18 | 'sphinx.ext.doctest', 19 | 'sphinx.ext.viewcode', 20 | 'sphinx.ext.extlinks', 21 | "sphinx.ext.intersphinx", 22 | 'matplotlib.sphinxext.plot_directive', 23 | 'numpydoc', 24 | 'nbsphinx', 25 | 'sphinx_gallery.gen_gallery', 26 | #'sphinx.ext.napoleon', 27 | ] 28 | 29 | # intersphinx configuration 30 | intersphinx_mapping = { 31 | "python": ("https://docs.python.org/3/", None), 32 | "numpy": ("https://docs.scipy.org/doc/numpy/", None), 33 | "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), 34 | "sklearn": ("http://scikit-learn.org/stable/", None), 35 | "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None), 36 | "matplotlib": ("https://matplotlib.org/", None), 37 | "pyfftw": ("https://pyfftw.readthedocs.io/en/latest/", None), 38 | "spgl1": ("https://spgl1.readthedocs.io/en/latest/", None), 39 | "pytorch": ("https://pytorch.org/docs/stable/", None), 40 | "pylops": ("https://pylops.readthedocs.io/en/latest/", None), 41 | } 42 | 43 | ## Generate autodoc stubs with summaries from code 44 | autosummary_generate = True 45 | 46 | ## Include Python objects as they appear in source files 47 | autodoc_member_order = 'bysource' 48 | 49 | ## Default flags used by autodoc directives 50 | autodoc_default_flags = ['members'] 51 | 52 | ## Mocking packages that are not installed when building doc 53 | #autodoc_mock_imports = ["torch"] 54 | 55 | numpydoc_show_class_members = False 56 | numpydoc_show_inherited_class_members = False 57 | numpydoc_class_members_toctree = False 58 | 59 | 60 | sphinx_gallery_conf = { 61 | # path to your examples scripts 62 | 'examples_dirs': ['../../examples', '../../tutorials',], 63 | # path where to save gallery generated examples 64 | 'gallery_dirs': ['gallery', 'tutorials'], 65 | 'filename_pattern': '\.py', 66 | # Remove the "Download all examples" button from the top level gallery 67 | 'download_all_examples': False, 68 | # Sort gallery example by file name instead of number of lines (default) 69 | 'within_subsection_order': ExampleTitleSortKey, 70 | # directory where function granular galleries are stored 71 | 'backreferences_dir': 'api/generated/backreferences', 72 | # Modules for which function level galleries are created. 73 | 'doc_module': 'pylops_gpu', 74 | # Insert links to documentation of objects in the examples 75 | 'reference_url': {'pylops_gpu': None} 76 | } 77 | 78 | 79 | # Always show the source code that generates a plot 80 | plot_include_source = True 81 | plot_formats = ['png'] 82 | 83 | # Sphinx project configuration 84 | templates_path = ['_templates'] 85 | exclude_patterns = ['_build', '**.ipynb_checkpoints'] 86 | source_suffix = '.rst' 87 | # The encoding of source files. 88 | source_encoding = 'utf-8-sig' 89 | master_doc = 'index' 90 | 91 | # General information about the project 92 | year = datetime.date.today().year 93 | project = 'PyLops-GPU' 94 | copyright = '{}, Matteo Ravasi'.format(year) 95 | 96 | # Version 97 | version = __version__ 98 | if len(version.split('+')) > 1 or version == 'unknown': 99 | version = 'dev' 100 | 101 | # These enable substitutions using |variable| in the rst files 102 | rst_epilog = """ 103 | .. |year| replace:: {year} 104 | """.format(year=year) 105 | 106 | html_last_updated_fmt = '%b %d, %Y' 107 | html_title = 'PyLops-gpu' 108 | html_short_title = 'PyLops-gpu' 109 | html_logo = '_static/g-pylops.png' 110 | html_static_path = ['_static'] 111 | html_extra_path = [] 112 | pygments_style = 'default' 113 | add_function_parentheses = False 114 | html_show_sourcelink = False 115 | html_show_sphinx = True 116 | html_show_copyright = True 117 | 118 | # Theme config 119 | html_theme = "sphinx_rtd_theme" 120 | html_theme_options = { 121 | 'logo_only': True, 122 | 'display_version': True, 123 | } 124 | html_context = { 125 | 'menu_links_name': 'Repository', 126 | 'menu_links': [ 127 | (' Source Code', 'https://github.com/PyLops/pylops-gpu'), 128 | (' Contributing', 'https://github.com/PyLops/pylops-gpu/blob/master/CONTRIBUTING.md'), 129 | ], 130 | # Custom variables to enable "Improve this page"" and "Download notebook" 131 | # links 132 | 'doc_path': 'docs/source', 133 | 'galleries': sphinx_gallery_conf['gallery_dirs'], 134 | 'gallery_dir': dict(zip(sphinx_gallery_conf['gallery_dirs'], 135 | sphinx_gallery_conf['examples_dirs'])), 136 | 'github_project': 'PyLops', 137 | 'github_repo': 'pylops-gpu', 138 | 'github_version': 'master', 139 | } 140 | 141 | 142 | # Load the custom CSS files (needs sphinx >= 1.6 for this to work) 143 | def setup(app): 144 | app.add_stylesheet("style.css") -------------------------------------------------------------------------------- /pylops_gpu/basicoperators/MatrixMult.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from pytorch_complex_tensor import ComplexTensor 5 | from pylops_gpu.LinearOperator import LinearOperator 6 | from pylops_gpu.utils.complex import conj, reshape, flatten 7 | from pylops_gpu.utils.torch2numpy import numpytype_from_torchtype, \ 8 | torchtype_from_numpytype 9 | 10 | 11 | class MatrixMult(LinearOperator): 12 | r"""Matrix multiplication. 13 | 14 | Simple wrapper to :py:func:`torch.matmul` for 15 | an input matrix :math:`\mathbf{A}`. 16 | 17 | Parameters 18 | ---------- 19 | A : :obj:`torch.Tensor` or :obj:`pytorch_complex_tensor.ComplexTensor` or :obj:`numpy.ndarray` 20 | Matrix. 21 | dims : :obj:`tuple`, optional 22 | Number of samples for each other dimension of model 23 | (model/data will be reshaped and ``A`` applied multiple times 24 | to each column of the model/data). 25 | device : :obj:`str`, optional 26 | Device to be used 27 | togpu : :obj:`tuple`, optional 28 | Move model and data from cpu to gpu prior to applying ``matvec`` and 29 | ``rmatvec``, respectively (only when ``device='gpu'``) 30 | tocpu : :obj:`tuple`, optional 31 | Move data and model from gpu to cpu after applying ``matvec`` and 32 | ``rmatvec``, respectively (only when ``device='gpu'``) 33 | dtype : :obj:`torch.dtype` or :obj:`np.dtype`, optional 34 | Type of elements in input array. 35 | 36 | Attributes 37 | ---------- 38 | shape : :obj:`tuple` 39 | Operator shape 40 | explicit : :obj:`bool` 41 | Operator contains a matrix that can be solved explicitly 42 | (``True``) or not (``False``) 43 | 44 | Notes 45 | ----- 46 | Refer to :class:`pylops.basicoperators.MatrixMult` for 47 | implementation details. 48 | 49 | """ 50 | def __init__(self, A, dims=None, device='cpu', 51 | togpu=(False, False), tocpu=(False, False), 52 | dtype=torch.float32): 53 | # convert A to torch tensor if provided as numpy array numpy 54 | if not isinstance(A, (torch.Tensor, ComplexTensor)): 55 | dtype = numpytype_from_torchtype(dtype) 56 | self.A = \ 57 | torch.from_numpy(A.astype(numpytype_from_torchtype(dtype))).to(device) 58 | self.complex = True if np.iscomplexobj(A) else False 59 | else: 60 | self.complex = True if isinstance(A, ComplexTensor) else False 61 | self.A = A 62 | if dims is None: 63 | self.reshape = False 64 | self.shape = A.shape 65 | else: 66 | if isinstance(dims, int): 67 | dims = (dims, ) 68 | self.reshape = True 69 | self.dims = np.array(dims, dtype=np.int) 70 | self.shape = (A.shape[0]*np.prod(self.dims), 71 | A.shape[1]*np.prod(self.dims)) 72 | self.newshape = \ 73 | (tuple(np.insert([np.prod(self.dims)], 0, self.A.shape[1])), 74 | tuple(np.insert([np.prod(self.dims)], 0, self.A.shape[0]))) 75 | 76 | self.complex = True if isinstance(A, ComplexTensor) else False 77 | if self.complex: 78 | self.Ac = conj(A).t() 79 | self.device = device 80 | self.togpu = togpu 81 | self.tocpu = tocpu 82 | self.dtype = torchtype_from_numpytype(dtype) 83 | self.explicit = True 84 | self.Op = None 85 | 86 | def _matvec(self, x): 87 | if self.reshape: 88 | x = reshape(x, self.newshape[0]) if self.complex else \ 89 | torch.reshape(x, self.newshape[0]) 90 | else: 91 | if self.complex: 92 | x = x.t() 93 | if self.complex: 94 | y = self.A.mm(x) 95 | if not self.reshape: 96 | y = y.t() 97 | else: 98 | y = self.A.matmul(x) 99 | if self.reshape: 100 | y = flatten(y) if self.complex else y.view(-1) 101 | return y 102 | 103 | def _rmatvec(self, x): 104 | if self.reshape: 105 | x = reshape(x, self.newshape[1]) if self.complex else \ 106 | torch.reshape(x, self.newshape[1]) 107 | else: 108 | if self.complex: 109 | x = x.t() 110 | if self.complex: 111 | y = self.Ac.mm(x) 112 | if not self.reshape: 113 | y = y.t() 114 | else: 115 | y = self.A.t().matmul(x) 116 | if self.reshape: 117 | y = flatten(y) if self.complex else y.view(-1) 118 | return y 119 | 120 | def inv(self): 121 | r"""Return the inverse of :math:`\mathbf{A}`. 122 | 123 | Returns 124 | ---------- 125 | Ainv : :obj:`torch.Tensor` 126 | Inverse matrix. 127 | 128 | """ 129 | Ainv = torch.inverse(self.A) 130 | return Ainv 131 | 132 | 133 | def aslinearoperator(A, device='cpu'): 134 | """Return A as a LinearOperator. 135 | 136 | ``A`` may be already a :class:`pylops_gpu.LinearOperator` or a 137 | :obj:`torch.Tensor`. 138 | 139 | """ 140 | if isinstance(A, LinearOperator): 141 | return A 142 | else: 143 | return MatrixMult(A, device=device) 144 | -------------------------------------------------------------------------------- /pytests/test_convolve.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from numpy.testing import assert_array_equal, assert_array_almost_equal 7 | from scipy.signal import triang 8 | from pylops.signalprocessing import Convolve1D 9 | 10 | from pylops_gpu.utils.backend import device 11 | from pylops_gpu.utils import dottest 12 | from pylops_gpu.signalprocessing import Convolve1D as gConvolve1D 13 | from pylops_gpu.optimization.cg import cg 14 | 15 | # filters 16 | nfilt = (5, 7) 17 | h1 = torch.from_numpy(triang(nfilt[0], sym=True).astype(np.float32)) 18 | h2 = torch.from_numpy(np.outer(triang(nfilt[0], sym=True), 19 | triang(nfilt[1], sym=True)).astype(np.float32)) 20 | 21 | par1_1d = {'nz': 21, 'ny': 51, 'nx': 51, 22 | 'offset': nfilt[0] // 2, 'dir': 0} # zero phase, first direction 23 | par2_1d = {'nz': 21, 'ny': 51, 'nx': 51, 24 | 'offset': 2, 'dir': 0} # non-zero phase, first direction 25 | par3_1d = {'nz': 21, 'ny': 51, 'nx': 51, 26 | 'offset': nfilt[0] // 2, 'dir': 1} # zero phase, second direction 27 | par4_1d = {'nz': 21, 'ny': 51, 'nx': 51, 28 | 'offset': nfilt[0] // 2 - 1, 29 | 'dir': 1} # non-zero phase, second direction 30 | par5_1d = {'nz': 21, 'ny': 51, 'nx': 51, 31 | 'offset': nfilt[0] // 2, 'dir': 1} # zero phase, second direction 32 | par6_1d = {'nz': 21, 'ny': 61, 'nx': 51, 33 | 'offset': nfilt[0] // 2 - 1, 34 | 'dir': 2} # non-zero phase, third direction 35 | 36 | dev = device() 37 | np.random.seed(0) 38 | torch.manual_seed(0) 39 | 40 | 41 | @pytest.mark.parametrize("par", [(par1_1d), (par2_1d), (par3_1d), (par4_1d)]) 42 | def test_Convolve1D(par): 43 | """Dot-test, comparison with pylops and inversion for Convolve1D 44 | operator 45 | """ 46 | np.random.seed(10) 47 | 48 | #1D 49 | if par['dir'] == 0: 50 | gCop = gConvolve1D(par['nx'], h=h1, offset=par['offset'], 51 | dtype=torch.float32) 52 | assert dottest(gCop, par['nx'], par['nx'], tol=1e-3) 53 | 54 | x = torch.zeros((par['nx']), dtype=torch.float32) 55 | x[par['nx']//2] = 1. 56 | 57 | # comparison with pylops 58 | Cop = Convolve1D(par['nx'], h=h1.cpu().numpy(), offset=par['offset'], 59 | dtype='float32') 60 | assert_array_almost_equal(gCop * x, Cop * x.cpu().numpy(), decimal=3) 61 | #assert_array_equal(gCop * x, Cop * x.cpu().numpy()) 62 | 63 | # inversion 64 | if par['offset'] == nfilt[0]//2: 65 | # zero phase 66 | xcg = cg(gCop, gCop * x, niter=100)[0] 67 | else: 68 | # non-zero phase 69 | xcg = cg(gCop.H * gCop, gCop.H * (gCop * x), niter=100)[0] 70 | assert_array_almost_equal(x, xcg, decimal=1) 71 | 72 | # 1D on 2D 73 | gCop = gConvolve1D(par['ny'] * par['nx'], h=h1, offset=par['offset'], 74 | dims=(par['ny'], par['nx']), dir=par['dir'], 75 | dtype=torch.float32) 76 | assert dottest(gCop, par['ny'] * par['nx'], 77 | par['ny'] * par['nx'], tol=1e-3) 78 | 79 | x = torch.zeros((par['ny'], par['nx']), dtype=torch.float32) 80 | x[int(par['ny'] / 2 - 3):int(par['ny'] / 2 + 3), 81 | int(par['nx'] / 2 - 3):int(par['nx'] / 2 + 3)] = 1. 82 | x = x.flatten() 83 | 84 | # comparison with pylops 85 | Cop = Convolve1D(par['ny'] * par['nx'], h=h1.cpu().numpy(), 86 | offset=par['offset'], 87 | dims=(par['ny'], par['nx']), dir=par['dir'], 88 | dtype='float32') 89 | assert_array_almost_equal(gCop * x, Cop * x.cpu().numpy(), decimal=3) 90 | # assert_array_equal(gCop * x, Cop * x.cpu().numpy()) 91 | 92 | # inversion 93 | if par['offset'] == nfilt[0] // 2: 94 | # zero phase 95 | xcg = cg(gCop, gCop * x, niter=100)[0] 96 | else: 97 | # non-zero phase 98 | xcg = cg(gCop.H * gCop, gCop.H * (gCop * x), niter=100)[0] 99 | assert_array_almost_equal(x, xcg, decimal=1) 100 | 101 | # 1D on 3D 102 | gCop = gConvolve1D(par['nz'] * par['ny'] * par['nx'], h=h1, 103 | offset=par['offset'], 104 | dims=(par['nz'], par['ny'], par['nx']), dir=par['dir'], 105 | dtype=torch.float32) 106 | assert dottest(gCop, par['nz'] * par['ny'] * par['nx'], 107 | par['nz'] * par['ny'] * par['nx'], tol=1e-3) 108 | 109 | x = torch.zeros((par['nz'], par['ny'], par['nx']), dtype=torch.float32) 110 | x[int(par['nz'] / 2 - 3):int(par['nz'] / 2 + 3), 111 | int(par['ny'] / 2 - 3):int(par['ny'] / 2 + 3), 112 | int(par['nx'] / 2 - 3):int(par['nx'] / 2 + 3)] = 1. 113 | x = x.flatten() 114 | 115 | # comparison with pylops 116 | Cop = Convolve1D(par['nz'] * par['ny'] * par['nx'], h=h1.cpu().numpy(), 117 | offset=par['offset'], 118 | dims=(par['nz'], par['ny'], par['nx']), dir=par['dir'], 119 | dtype='float32') 120 | assert_array_almost_equal(gCop * x, Cop * x.cpu().numpy(), decimal=3) 121 | 122 | # inversion 123 | if par['offset'] == nfilt[0] // 2: 124 | # zero phase 125 | xcg = cg(gCop, gCop * x, niter=100)[0] 126 | else: 127 | # non-zero phase 128 | xcg = cg(gCop.H * gCop, gCop.H * (gCop * x), niter=100)[0] 129 | assert_array_almost_equal(x, xcg, decimal=1) 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![PyLops-gpu](https://github.com/PyLops/pylops-gpu/blob/master/docs/source/_static/g-pylops_b.png) 2 | 3 | [![PyPI version](https://badge.fury.io/py/pylops-gpu.svg)](https://badge.fury.io/py/pylops-gpu) 4 | [![Build Status](https://travis-ci.com/PyLops/pylops-gpu.svg?branch=master)](https://travis-ci.com/PyLops/pylops-gpu) 5 | [![AzureDevOps Status](https://dev.azure.com/matteoravasi/PyLops/_apis/build/status/PyLops.pylops-gpu?branchName=master)](https://dev.azure.com/matteoravasi/PyLops/_build/latest?definitionId=7&branchName=master) 6 | [![Documentation Status](https://readthedocs.org/projects/pylops-gpu/badge/?version=latest)](https://pylops-gpu.readthedocs.io/en/latest/?badge=latest) 7 | [![OS-support](https://img.shields.io/badge/OS-linux,osx-850A8B.svg)](https://github.com/PyLops/pylops-gpu) 8 | [![Slack Status](https://img.shields.io/badge/chat-slack-green.svg)](https://pylops.slack.com) 9 | 10 | 11 | :vertical_traffic_light: :vertical_traffic_light: This library is unmantained! 12 | If interested in running PyLops on GPUs please use the cupy backend from the PyLops main library :vertical_traffic_light: :vertical_traffic_light: 13 | 14 | ## Objective 15 | This library is an extension of [PyLops](https://pylops.readthedocs.io/en/latest/) 16 | to run operators on GPUs. 17 | 18 | As much as [numpy](http://www.numpy.org) and [scipy](http://www.scipy.org/scipylib/index.html) lie 19 | at the core of the parent project PyLops, PyLops-GPU heavily builds on top of 20 | [PyTorch](http://pytorch.org) and takes advantage of the same optimized 21 | tensor computations used in PyTorch for deep learning using GPUs and CPUs. 22 | 23 | Doing so, linear operators can be computed on GPUs. 24 | 25 | Here is a simple example showing how a diagonal operator can be created, 26 | applied and inverted using PyLops: 27 | ```python 28 | import numpy as np 29 | from pylops import Diagonal 30 | 31 | n = int(1e6) 32 | x = np.ones(n) 33 | d = np.arange(n) + 1. 34 | 35 | Dop = Diagonal(d) 36 | 37 | # y = Dx 38 | y = Dop*x 39 | ``` 40 | 41 | and similarly using PyLops-gpu: 42 | ```python 43 | import numpy as np 44 | import torch 45 | from pylops_gpu.utils.backend import device 46 | from pylops_gpu import Diagonal 47 | 48 | dev = device() 49 | 50 | n = int(1e6) 51 | x = torch.ones(n, dtype=torch.float64).to(dev) 52 | d = (torch.arange(0, n, dtype=torch.float64) + 1.).to(dev) 53 | 54 | Dop = Diagonal(d, device=dev) 55 | 56 | # y = Dx 57 | y = Dop*x 58 | ``` 59 | 60 | Running these two snippets of code in Google Colab with GPU enabled gives a 50+ 61 | speed up for the forward pass. 62 | 63 | As a by-product of implementing PyLops linear operators in PyTorch, we can easily 64 | chain our operators with any nonlinear mathematical operation (e.g., log, sin, tan, pow, ...) 65 | as well as with operators from the ``torch.nn`` submodule and obtain *Automatic 66 | Differentiation* (AD) for the entire chain. Since the gradient of a linear 67 | operator is simply its *adjoint*, we have implemented a single class, 68 | `pylops_gpu.TorchOperator`, which can wrap any linear operator 69 | from PyLops and PyLops-gpu libraries and return a `torch.autograd.Function` object. 70 | 71 | 72 | ## Project structure 73 | This repository is organized as follows: 74 | * **pylops_gpu**: python library containing various GPU-powered linear operators and auxiliary routines 75 | * **pytests**: set of pytests 76 | * **testdata**: sample datasets used in pytests and documentation 77 | * **docs**: sphinx documentation 78 | * **examples**: set of python script examples for each linear operator to be embedded in documentation using sphinx-gallery 79 | * **tutorials**: set of python script tutorials to be embedded in documentation using sphinx-gallery 80 | 81 | ## Getting started 82 | 83 | You need **Python 3.5 or greater**. 84 | 85 | #### From PyPi 86 | 87 | If you want to use PyLops-gpu within your codes, 88 | install it in your Python-gpu environment by typing the following command in your terminal: 89 | 90 | ``` 91 | pip install pylops-gpu 92 | ``` 93 | 94 | Open a python terminal and type: 95 | 96 | ``` 97 | import pylops_gpu 98 | ``` 99 | 100 | If you do not see any error, you should be good to go, enjoy! 101 | 102 | **Note**: you may see an error if `pytorch-complex-tensor` has not been 103 | previously installed. In that case first run 104 | `pip install pytorch-complex-tensor` and then install pylops-gpu 105 | 106 | #### From Github 107 | 108 | You can also directly install from the master node 109 | 110 | ``` 111 | pip install git+https://git@github.com/PyLops/pylops-gpu.git@master 112 | ``` 113 | 114 | ## Contributing 115 | *Feel like contributing to the project? Adding new operators or tutorial?* 116 | 117 | Follow the instructions from [PyLops official documentation](https://pylops.readthedocs.io/en/latest/contributing.html). 118 | 119 | ## Documentation 120 | The official documentation of PyLops-gpu is available [here](https://pylops-gpu.readthedocs.io/). 121 | 122 | Visit this page to get started learning about different operators and their applications as well as how to 123 | create new operators yourself and make it to the ``Contributors`` list. 124 | 125 | Moreover, if you have installed PyLops using the *developer environment* you can also build the documentation locally by 126 | typing the following command: 127 | ``` 128 | make doc 129 | ``` 130 | Once the documentation is created, you can make any change to the source code and rebuild the documentation by 131 | simply typing 132 | ``` 133 | make docupdate 134 | ``` 135 | Note that if a new example or tutorial is created (and if any change is made to a previously available example or tutorial) 136 | you are required to rebuild the entire documentation before your changes will be visible. 137 | 138 | 139 | ## History 140 | PyLops-GPU was initially written and it is currently maintained by [Equinor](https://www.equinor.com). 141 | It is an extension of [PyLops](https://pylops.readthedocs.io/en/latest/) for large-scale optimization with 142 | *GPU-driven* linear operators on that can be tailored to our needs, and as contribution to the free software community. 143 | 144 | 145 | ## Contributors 146 | * Matteo Ravasi, mrava87 147 | * Francesco Picetti, fpicetti -------------------------------------------------------------------------------- /pylops_gpu/signalprocessing/Convolve1D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from torch.nn.functional import pad 5 | from pylops_gpu import LinearOperator 6 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 7 | 8 | 9 | class Convolve1D(LinearOperator): 10 | r"""1D convolution operator. 11 | 12 | Apply one-dimensional convolution with a compact filter to model (and data) 13 | along a specific direction of a multi-dimensional array depending on the 14 | choice of ``dir``. 15 | 16 | Parameters 17 | ---------- 18 | N : :obj:`int` 19 | Number of samples in model. 20 | h : :obj:`torch.Tensor` or :obj:`numpy.ndarray` 21 | 1d compact filter to be convolved to input signal 22 | offset : :obj:`int` 23 | Index of the center of the compact filter 24 | dims : :obj:`tuple` 25 | Number of samples for each dimension 26 | (``None`` if only one dimension is available) 27 | dir : :obj:`int`, optional 28 | Direction along which convolution is applied 29 | zero_edges : :obj:`bool`, optional 30 | Zero output at edges (`True`) or not (`False`) 31 | device : :obj:`str`, optional 32 | Device to be used 33 | togpu : :obj:`tuple`, optional 34 | Move model and data from cpu to gpu prior to applying ``matvec`` and 35 | ``rmatvec``, respectively (only when ``device='gpu'``) 36 | tocpu : :obj:`tuple`, optional 37 | Move data and model from gpu to cpu after applying ``matvec`` and 38 | ``rmatvec``, respectively (only when ``device='gpu'``) 39 | dtype : :obj:`torch.dtype`, optional 40 | Type of elements in input array. 41 | 42 | Attributes 43 | ---------- 44 | shape : :obj:`tuple` 45 | Operator shape 46 | explicit : :obj:`bool` 47 | Operator contains a matrix that can be solved 48 | explicitly (``True``) or not (``False``) 49 | 50 | Notes 51 | ----- 52 | Refer to :class:`pylops.signalprocessing.Convolve1D` for implementation 53 | details. 54 | 55 | """ 56 | def __init__(self, N, h, offset=0, dims=None, dir=0, zero_edges=False, 57 | device='cpu', togpu=(False, False), tocpu=(False, False), 58 | dtype=torch.float32): 59 | # convert dtype to torch.dtype 60 | if not isinstance(dtype, torch.dtype): 61 | dtype = torchtype_from_numpytype(dtype) 62 | 63 | # convert h to torch if numpy 64 | if not isinstance(h, torch.Tensor): 65 | h = torch.from_numpy(h).to(device) 66 | self.nh = h.size()[0] 67 | self.h = h.reshape(1, 1, self.nh) 68 | self.offset = 2*(self.nh // 2 - int(offset)) 69 | if self.offset != 0: 70 | self.h = pad(self.h, (self.offset if self.offset > 0 else 0, 71 | -self.offset if self.offset < 0 else 0), 72 | mode='constant') 73 | self.padding = int(self.nh // 2 + np.abs(self.offset) // 2) 74 | self.dir = dir 75 | if dims is None: 76 | self.dims = (N, ) 77 | self.reshape = False 78 | elif len(dims) == 1: 79 | self.dims = dims 80 | self.reshape = False 81 | else: 82 | if np.prod(dims) != N: 83 | raise ValueError('product of dims must equal N!') 84 | else: 85 | self.dims = tuple(dims) 86 | self.otherdims = list(dims) 87 | self.otherdims.pop(self.dir) 88 | self.otherdims_prod = np.prod(self.dims) // self.dims[self.dir] 89 | self.dims_permute = list(self.dims) 90 | self.dims_permute[self.dir], self.dims_permute[-1] = \ 91 | self.dims_permute[-1], self.dims_permute[self.dir] 92 | self.dims_permute = tuple(self.dims_permute) 93 | self.permute = np.arange(0, len(self.dims)) 94 | self.permute[self.dir], self.permute[-1] = \ 95 | self.permute[-1], self.permute[self.dir] 96 | self.permute = tuple(self.permute) 97 | self.reshape = True 98 | self.shape = (np.prod(self.dims), np.prod(self.dims)) 99 | self.zero_edges = zero_edges 100 | self.device = device 101 | self.togpu = togpu 102 | self.tocpu = tocpu 103 | self.dtype = dtype 104 | self.explicit = False 105 | self.Op = None 106 | 107 | def _matvec(self, x): 108 | if not self.reshape: 109 | x = x.reshape(1, 1, self.dims[0]) 110 | y = torch.torch.conv_transpose1d(x, self.h, padding=self.padding) 111 | if self.zero_edges: 112 | y[..., :self.nh // 2] = 0 113 | y[..., -self.nh // 2 + 1:] = 0 114 | else: 115 | x1 = x.clone() # need to clone to avoid modifying x 116 | x1 = torch.reshape(x1, self.dims).permute(self.permute) 117 | y = torch.torch.conv_transpose1d(x1.reshape(self.otherdims_prod, 1, 118 | self.dims[self.dir]), 119 | self.h, padding=self.padding) 120 | if self.zero_edges: 121 | y[..., :self.nh // 2] = 0 122 | y[..., -self.nh // 2 + 1:] = 0 123 | y = y.reshape(self.dims_permute).permute(self.permute) 124 | y = y.flatten() 125 | return y 126 | 127 | def _rmatvec(self, x): 128 | if not self.reshape: 129 | x = x.reshape(1, 1, self.dims[0]) 130 | if self.zero_edges: 131 | x[..., :self.nh // 2] = 0 132 | x[..., -self.nh // 2 + 1:] = 0 133 | y = torch.torch.conv1d(x, self.h, padding=self.padding) 134 | 135 | else: 136 | x1 = x.clone() # need to clone to avoid modifying x 137 | x1 = torch.reshape(x1, self.dims).permute(self.permute) 138 | if self.zero_edges: 139 | x1[..., :self.nh // 2] = 0 140 | x1[..., -self.nh // 2 + 1:] = 0 141 | y = torch.torch.conv1d(x1.reshape(self.otherdims_prod, 1, 142 | self.dims[self.dir]), 143 | self.h, padding=self.padding) 144 | y = y.reshape(self.dims_permute).permute(self.permute) 145 | y = y.flatten() 146 | return y 147 | -------------------------------------------------------------------------------- /pytests/test_poststack.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | import torch 4 | 5 | from numpy.testing import assert_array_almost_equal 6 | from scipy.signal import filtfilt 7 | 8 | from pylops.utils.wavelets import ricker 9 | from pylops_gpu.utils.backend import device 10 | from pylops_gpu.utils import dottest 11 | from pylops_gpu.avo.poststack import PoststackLinearModelling, PoststackInversion 12 | 13 | dev = device() 14 | np.random.seed(10) 15 | torch.manual_seed(0) 16 | 17 | # params 18 | dt0 = 0.004 19 | ntwav = 41 20 | nsmooth = 50 21 | 22 | # 1d model 23 | nt0 = 201 24 | t0 = np.arange(nt0) * dt0 25 | vp = 1200 + np.arange(nt0) + \ 26 | filtfilt(np.ones(5)/5., 1, np.random.normal(0, 80, nt0)) 27 | rho = 1000 + vp + filtfilt(np.ones(5)/5., 1, np.random.normal(0, 30, nt0)) 28 | 29 | m = np.log(vp*rho) 30 | mback = filtfilt(np.ones(nsmooth)/float(nsmooth), 1, m) 31 | 32 | # 2d model 33 | inputfile = 'testdata/avo/poststack_model.npz' 34 | model = np.load(inputfile) 35 | x, z, m2d = model['x'][::3], model['z'][::3], \ 36 | np.log(model['model'][::3, ::3]) 37 | nx, nz = len(x), len(z) 38 | 39 | mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m2d, axis=0) 40 | mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, mback2d, axis=1) 41 | 42 | # stationary wavelet 43 | wav = ricker(t0[:ntwav//2+1], 20)[0] 44 | 45 | # non-stationary wavelet 46 | f0s = np.flip(np.arange(nt0) * 0.05 + 3) 47 | wavs = np.array([ricker(t0[:ntwav], f0)[0] for f0 in f0s]) 48 | wavc = np.argmax(wavs[0]) 49 | 50 | # convert to torch 51 | wav = torch.from_numpy(wav.astype(np.float32)).to(dev) 52 | wavs = torch.from_numpy(wavs.astype(np.float32)).to(dev) 53 | m = torch.from_numpy(m.astype(np.float32)).to(dev) 54 | mback = torch.from_numpy(mback.astype(np.float32)).to(dev) 55 | m2d = torch.from_numpy(m2d.astype(np.float32)).to(dev) 56 | mback2d = torch.from_numpy(mback2d.astype(np.float32)).to(dev) 57 | 58 | 59 | par1 = {'epsR': None, 'epsRL1': None, 'epsI': None, 60 | 'simultaneous': False} # unregularized 61 | par2 = {'epsR': 1e-4, 'epsRL1': None, 'epsI': 1e-6, 62 | 'simultaneous': False} # regularized 63 | par3 = {'epsR': None, 'epsRL1': None, 'epsI': None, 64 | 'simultaneous': True} # unregularized, simultaneous 65 | par4 = {'epsR': 1e-4, 'epsRL1': None, 'epsI': 1e-6, 66 | 'simultaneous': True} # regularized, simultaneous 67 | #par5 = {'epsR': 1e-4, 'epsRL1': 1e-1, 'epsI': 1e-6, 68 | # 'simultaneous': True} # blocky, simultaneous 69 | 70 | 71 | @pytest.mark.parametrize("par", [(par1), (par2)]) 72 | def test_PoststackLinearModelling1d(par): 73 | """Dot-test, comparison of dense vs lop implementation and 74 | inversion for PoststackLinearModelling in 1d with stationary wavelet 75 | """ 76 | # Dense 77 | PPop_dense = PoststackLinearModelling(wav, nt0=nt0, explicit=True) 78 | assert dottest(PPop_dense, nt0, nt0, tol=1e-4) 79 | 80 | # Linear operator 81 | PPop = PoststackLinearModelling(wav, nt0=nt0, explicit=False) 82 | assert dottest(PPop, nt0, nt0, tol=1e-4) 83 | 84 | # Compare data 85 | d = PPop * m.flatten() 86 | d_dense = PPop_dense * m.t().flatten() 87 | assert_array_almost_equal(d.numpy(), d_dense.numpy(), decimal=4) 88 | 89 | # Inversion 90 | for explicit in [True, False]: 91 | if par['epsR'] is None: 92 | dict_inv = {} 93 | else: 94 | dict_inv = dict(niter=200) 95 | minv = PoststackInversion(d, wav, m0=mback, explicit=explicit, 96 | epsR=par['epsR'], epsI=par['epsI'], 97 | simultaneous=par['simultaneous'], 98 | **dict_inv)[0] 99 | assert np.linalg.norm(m-minv) / np.linalg.norm(minv) < 1e-2 100 | 101 | 102 | @pytest.mark.parametrize("par", [(par1), (par2)]) 103 | def test_PoststackLinearModelling1d_nonstationary(par): 104 | """Dot-test, comparison of dense vs lop implementation and 105 | inversion for PoststackLinearModelling in 1d with nonstationary wavelet 106 | """ 107 | # Dense 108 | PPop_dense = PoststackLinearModelling(wavs, nt0=nt0, explicit=True) 109 | assert dottest(PPop_dense, nt0, nt0, tol=1e-4) 110 | 111 | # Linear operator 112 | PPop = PoststackLinearModelling(wavs, nt0=nt0, explicit=False) 113 | assert dottest(PPop, nt0, nt0, tol=1e-4) 114 | 115 | # Compare data 116 | d = PPop * m.flatten() 117 | d_dense = PPop_dense * m.t().flatten() 118 | assert_array_almost_equal(d.numpy(), d_dense.numpy(), decimal=4) 119 | 120 | # Inversion 121 | for explicit in [True, False]: 122 | if par['epsR'] is None: 123 | dict_inv = {} 124 | else: 125 | dict_inv = dict(niter=80) 126 | minv = PoststackInversion(d, wavs, m0=mback, explicit=explicit, 127 | epsR=par['epsR'], epsI=par['epsI'], 128 | simultaneous=par['simultaneous'], 129 | **dict_inv)[0] 130 | assert np.linalg.norm(m-minv) / np.linalg.norm(minv) < 1e-2 131 | 132 | 133 | @pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)]) 134 | def test_PoststackLinearModelling2d(par): 135 | """Dot-test and inversion for PoststackLinearModelling in 2d 136 | """ 137 | 138 | # Dense 139 | PPop_dense = PoststackLinearModelling(wav, nt0=nz, spatdims=nx, 140 | explicit=True) 141 | assert dottest(PPop_dense, nz * nx, nz * nx, tol=1e-4) 142 | 143 | # Linear operator 144 | PPop = PoststackLinearModelling(wav, nt0=nz, spatdims=nx, 145 | explicit=False) 146 | assert dottest(PPop, nz * nx, nz * nx, tol=1e-4) 147 | 148 | # Compare data 149 | d = (PPop * m2d.flatten()).reshape(nz, nx) 150 | d_dense = (PPop_dense * m2d.flatten()).reshape(nz, nx) 151 | assert_array_almost_equal(d, d_dense, decimal=4) 152 | 153 | # Inversion 154 | for explicit in [True, False]: 155 | if explicit and not par['simultaneous'] and par['epsR'] is None: 156 | dict_inv = {} 157 | elif explicit and not par['simultaneous'] and par['epsR'] is not None: 158 | dict_inv = dict(niter=10) 159 | else: 160 | dict_inv = dict(niter=10) 161 | minv2d = \ 162 | PoststackInversion(d, wav, m0=mback2d, explicit=explicit, 163 | epsI=par['epsI'], epsR=par['epsR'], 164 | epsRL1=par['epsRL1'], 165 | simultaneous=par['simultaneous'], 166 | **dict_inv)[0] 167 | assert np.linalg.norm(m2d - minv2d) / np.linalg.norm(m2d) < 1e-1 168 | -------------------------------------------------------------------------------- /pylops_gpu/optimization/leastsquares.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from pylops_gpu import Diagonal, VStack 4 | from pylops_gpu.optimization.cg import cg 5 | 6 | 7 | def NormalEquationsInversion(Op, Regs, data, Weight=None, dataregs=None, 8 | epsI=0, epsRs=None, x0=None, returninfo=False, 9 | device='cpu', **kwargs_cg): 10 | r"""Inversion of normal equations. 11 | 12 | Solve the regularized normal equations for a system of equations 13 | given the operator ``Op``, a data weighting operator ``Weight`` and 14 | a list of regularization terms ``Regs`` 15 | 16 | Parameters 17 | ---------- 18 | Op : :obj:`pylops_gpu.LinearOperator` 19 | Operator to invert 20 | Regs : :obj:`list` 21 | Regularization operators (``None`` to avoid adding regularization) 22 | data : :obj:`torch.Tensor` 23 | Data 24 | Weight : :obj:`pylops_gpu.LinearOperator`, optional 25 | Weight operator 26 | dataregs : :obj:`list`, optional 27 | Regularization data (must have the same number of elements 28 | as ``Regs``) 29 | epsI : :obj:`float`, optional 30 | Tikhonov damping 31 | epsRs : :obj:`list`, optional 32 | Regularization dampings (must have the same number of elements 33 | as ``Regs``) 34 | x0 : :obj:`torch.Tensor`, optional 35 | Initial guess 36 | returninfo : :obj:`bool`, optional 37 | Return info of CG solver 38 | device : :obj:`str`, optional 39 | Device to be used 40 | **kwargs_cg 41 | Arbitrary keyword arguments for 42 | :py:func:`pylops_gpu.optimization.leastsquares.cg` solver 43 | 44 | Returns 45 | ------- 46 | xinv : :obj:`numpy.ndarray` 47 | Inverted model. 48 | 49 | Notes 50 | ----- 51 | Refer to :class:`pylops..optimization.leastsquares.NormalEquationsInversion` 52 | for implementation details. 53 | 54 | """ 55 | dtype = data.dtype 56 | 57 | # store adjoint 58 | OpH = Op.H 59 | 60 | # create dataregs and epsRs if not provided 61 | if dataregs is None and Regs is not None: 62 | dataregs = \ 63 | [torch.zeros(Op.shape[1], dtype=dtype).to(device)] * len(Regs) 64 | 65 | if epsRs is None and Regs is not None: 66 | epsRs = [1] * len(Regs) 67 | 68 | # Normal equations 69 | if Weight is not None: 70 | y_normal = OpH * Weight * data 71 | else: 72 | y_normal = OpH * data 73 | if Weight is not None: 74 | Op_normal = OpH * Weight * Op 75 | else: 76 | Op_normal = OpH * Op 77 | 78 | # Add regularization terms 79 | if epsI > 0: 80 | Op_normal += epsI ** 2 * Diagonal(torch.ones(Op.shape[1]).to(device)) 81 | 82 | if Regs is not None: 83 | for epsR, Reg, datareg in zip(epsRs, Regs, dataregs): 84 | RegH = Reg.H 85 | y_normal += epsR ** 2 * RegH * datareg 86 | Op_normal += epsR ** 2 * RegH * Reg 87 | 88 | # CG solver 89 | if x0 is not None: 90 | y_normal = y_normal - Op_normal * x0 91 | xinv, istop = cg(Op_normal, y_normal, **kwargs_cg) 92 | if x0 is not None: 93 | xinv = x0 + xinv 94 | 95 | if returninfo: 96 | return xinv, istop 97 | else: 98 | return xinv 99 | 100 | def RegularizedOperator(Op, Regs, epsRs=(1,)): 101 | r"""Regularized operator. 102 | 103 | Creates a regularized operator given the operator ``Op`` 104 | and a list of regularization terms ``Regs``. 105 | 106 | Parameters 107 | ---------- 108 | Op : :obj:`pylops_gpu.LinearOperator` 109 | Operator to invert 110 | Regs : :obj:`tuple` or :obj:`list` 111 | Regularization operators 112 | epsRs : :obj:`tuple` or :obj:`list`, optional 113 | Regularization dampings 114 | 115 | Returns 116 | ------- 117 | OpReg : :obj:`pylops.LinearOperator` 118 | Regularized operator 119 | 120 | See Also 121 | -------- 122 | RegularizedInversion: Regularized inversion 123 | 124 | Notes 125 | ----- 126 | Refer to :class:`pylops.optimization.leastsquares.RegularizedOperator` for 127 | implementation details. 128 | 129 | """ 130 | OpReg = VStack([Op] + [epsR * Reg for epsR, Reg in zip(epsRs, Regs)], 131 | dtype=Op.dtype) 132 | return OpReg 133 | 134 | 135 | def RegularizedInversion(Op, Regs, data, Weight=None, dataregs=None, 136 | epsRs=None, x0=None, **kwargs_cg): 137 | r"""Regularized inversion. 138 | 139 | Solve a system of regularized equations given the operator ``Op``, 140 | a data weighting operator ``Weight``, and a list of regularization 141 | terms ``Regs``. 142 | 143 | Parameters 144 | ---------- 145 | Op : :obj:`pylops_gpu.LinearOperator` 146 | Operator to invert 147 | Regs : :obj:`list` 148 | Regularization operators (``None`` to avoid adding regularization) 149 | data : :obj:`torch.Tensor` 150 | Data 151 | Weight : :obj:`pylops_gpu.LinearOperator`, optional 152 | Weight operator 153 | dataregs : :obj:`list`, optional 154 | Regularization data (if ``None`` a zero data will be used for every 155 | regularization operator in ``Regs``) 156 | epsRs : :obj:`list`, optional 157 | Regularization dampings 158 | x0 : :obj:`torch.Tensor`, optional 159 | Initial guess 160 | **kwargs_cg 161 | Arbitrary keyword arguments for 162 | :py:func:`pylops_gpu.optimization.leastsquares.cg` solver 163 | 164 | Returns 165 | ------- 166 | xinv : :obj:`numpy.ndarray` 167 | Inverted model :math:`\mathbf{Op}` 168 | niter : :obj:`int` 169 | Iteration number upon termination 170 | 171 | See Also 172 | -------- 173 | RegularizedOperator: Regularized operator 174 | NormalEquationsInversion: Normal equations inversion 175 | 176 | Notes 177 | ----- 178 | Refer to :class:`pylops..optimization.leastsquares.RegularizedInversion` 179 | for implementation details. 180 | 181 | """ 182 | # create regularization data 183 | if dataregs is None and Regs is not None: 184 | dataregs = \ 185 | [torch.zeros(Op.shape[1], dtype=data.dtype)] * len(Regs) 186 | 187 | if epsRs is None and Regs is not None: 188 | epsRs = [1] * len(Regs) 189 | 190 | # create regularization operators 191 | if Weight is not None: 192 | if Regs is None: 193 | RegOp = Weight * Op 194 | else: 195 | RegOp = RegularizedOperator(Weight * Op, Regs, epsRs=epsRs) 196 | else: 197 | if Regs is None: 198 | RegOp = Op 199 | else: 200 | RegOp = RegularizedOperator(Op, Regs, epsRs=epsRs) 201 | 202 | # augumented data 203 | if Weight is not None: 204 | datatot = Weight * data 205 | else: 206 | datatot = data.clone() 207 | 208 | # augumented operator 209 | if Regs is not None: 210 | for epsR, datareg in zip(epsRs, dataregs): 211 | datatot = torch.cat((datatot, epsR*datareg), dim=0) 212 | 213 | # CG solver 214 | if x0 is not None: 215 | datatot = datatot - RegOp * x0 216 | xinv, niter = cg(RegOp.H * RegOp, 217 | RegOp.H * datatot, **kwargs_cg) 218 | if x0 is not None: 219 | xinv = x0 + xinv 220 | 221 | return xinv, niter 222 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /tutorials/poststack.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 02. Post-stack inversion 3 | ======================== 4 | This tutorial focuses on extending post-stack seismic inversion to GPU 5 | processing. We refer to the equivalent `PyLops tutorial `_ 6 | for a more detailed description of the theory. 7 | 8 | """ 9 | # sphinx_gallery_thumbnail_number = 2 10 | import numpy as np 11 | import torch 12 | import matplotlib.pyplot as plt 13 | from scipy.signal import filtfilt 14 | from pylops.utils.wavelets import ricker 15 | 16 | import pylops_gpu 17 | from pylops_gpu.utils.backend import device 18 | 19 | dev = device() 20 | plt.close('all') 21 | np.random.seed(10) 22 | torch.manual_seed(10) 23 | 24 | ############################################################################### 25 | # We consider the 1d example. A synthetic profile of acoustic impedance 26 | # is created and data is modelled using both the dense and linear operator 27 | # version of :py:class:`pylops_gpu.avo.poststack.PoststackLinearModelling` 28 | # operator. Both model and wavelet are created as numpy arrays and converted 29 | # into torch tensors (note that we enforce ``float32`` for optimal performance 30 | # on GPU). 31 | 32 | # model 33 | nt0 = 301 34 | dt0 = 0.004 35 | t0 = np.arange(nt0)*dt0 36 | vp = 1200 + np.arange(nt0) + \ 37 | filtfilt(np.ones(5)/5., 1, np.random.normal(0, 80, nt0)) 38 | rho = 1000 + vp + \ 39 | filtfilt(np.ones(5)/5., 1, np.random.normal(0, 30, nt0)) 40 | vp[131:] += 500 41 | rho[131:] += 100 42 | m = np.log(vp*rho) 43 | 44 | # smooth model 45 | nsmooth = 100 46 | mback = filtfilt(np.ones(nsmooth)/float(nsmooth), 1, m) 47 | 48 | # wavelet 49 | ntwav = 41 50 | wav, twav, wavc = ricker(t0[:ntwav//2+1], 20) 51 | 52 | # convert to torch tensors 53 | m = torch.from_numpy(m.astype('float32')) 54 | mback = torch.from_numpy(mback.astype('float32')) 55 | wav = torch.from_numpy(wav.astype('float32')) 56 | 57 | # dense operator 58 | PPop_dense = \ 59 | pylops_gpu.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nt0, 60 | explicit=True) 61 | 62 | # lop operator 63 | PPop = pylops_gpu.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nt0) 64 | 65 | # data 66 | d_dense = PPop_dense * m.flatten() 67 | d = PPop * m.flatten() 68 | 69 | # add noise 70 | dn_dense = d_dense + \ 71 | torch.from_numpy(np.random.normal(0, 2e-2, d_dense.shape).astype('float32')) 72 | 73 | ############################################################################### 74 | # We can now estimate the acoustic profile from band-limited data using either 75 | # the dense operator or linear operator. 76 | 77 | # solve dense 78 | minv_dense = \ 79 | pylops_gpu.avo.poststack.PoststackInversion(d, wav / 2, m0=mback, explicit=True, 80 | simultaneous=False)[0] 81 | 82 | # solve lop 83 | minv = \ 84 | pylops_gpu.avo.poststack.PoststackInversion(d_dense, wav / 2, m0=mback, 85 | explicit=False, 86 | simultaneous=False, 87 | **dict(niter=500))[0] 88 | 89 | # solve noisy 90 | mn = \ 91 | pylops_gpu.avo.poststack.PoststackInversion(dn_dense, wav / 2, m0=mback, 92 | explicit=True, epsI=1e-4, 93 | epsR=1e0, **dict(niter=100))[0] 94 | 95 | 96 | fig, axs = plt.subplots(1, 2, figsize=(6, 7), sharey=True) 97 | axs[0].plot(d_dense, t0, 'k', lw=4, label='Dense') 98 | axs[0].plot(d, t0, '--r', lw=2, label='Lop') 99 | axs[0].plot(dn_dense, t0, '-.g', lw=2, label='Noisy') 100 | axs[0].set_title('Data') 101 | axs[0].invert_yaxis() 102 | axs[0].axis('tight') 103 | axs[0].legend(loc=1) 104 | axs[1].plot(m, t0, 'k', lw=4, label='True') 105 | axs[1].plot(mback, t0, '--b', lw=4, label='Back') 106 | axs[1].plot(minv_dense, t0, '--m', lw=2, label='Inv Dense') 107 | axs[1].plot(minv, t0, '--r', lw=2, label='Inv Lop') 108 | axs[1].plot(mn, t0, '--g', lw=2, label='Inv Noisy') 109 | axs[1].set_title('Model') 110 | axs[1].axis('tight') 111 | axs[1].legend(loc=1) 112 | 113 | 114 | ############################################################################### 115 | # We move now to a 2d example. First of all the model is loaded and 116 | # data generated. 117 | 118 | # model 119 | inputfile = '../testdata/avo/poststack_model.npz' 120 | 121 | model = np.load(inputfile) 122 | m = np.log(model['model'][:, ::3]) 123 | x, z = model['x'][::3]/1000., model['z']/1000. 124 | nx, nz = len(x), len(z) 125 | 126 | 127 | # smooth model 128 | nsmoothz, nsmoothx = 60, 50 129 | mback = filtfilt(np.ones(nsmoothz)/float(nsmoothz), 1, m, axis=0) 130 | mback = filtfilt(np.ones(nsmoothx)/float(nsmoothx), 1, mback, axis=1) 131 | 132 | # convert to torch tensors 133 | m = torch.from_numpy(m.astype('float32')) 134 | mback = torch.from_numpy(mback.astype('float32')) 135 | 136 | 137 | # dense operator 138 | PPop_dense = \ 139 | pylops_gpu.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nz, 140 | spatdims=nx, explicit=True) 141 | 142 | # lop operator 143 | PPop = pylops_gpu.avo.poststack.PoststackLinearModelling(wav / 2, nt0=nz, 144 | spatdims=nx) 145 | 146 | # data 147 | d = (PPop_dense * m.flatten()).reshape(nz, nx) 148 | n = torch.from_numpy(np.random.normal(0, 1e-1, d.shape).astype('float32')) 149 | dn = d + n 150 | 151 | ############################################################################### 152 | # Finally we perform different types of inversion 153 | 154 | # dense inversion with noise-free data 155 | minv_dense = \ 156 | pylops_gpu.avo.poststack.PoststackInversion(d, wav / 2, m0=mback, 157 | explicit=True, 158 | simultaneous=False)[0] 159 | 160 | # dense inversion with noisy data 161 | minv_dense_noisy = \ 162 | pylops_gpu.avo.poststack.PoststackInversion(dn, wav / 2, m0=mback, 163 | explicit=True, epsI=4e-2, 164 | simultaneous=False)[0] 165 | 166 | # spatially regularized lop inversion with noisy data 167 | minv_lop_reg = \ 168 | pylops_gpu.avo.poststack.PoststackInversion(dn, wav / 2, m0=minv_dense_noisy, 169 | explicit=False, 170 | epsR=5e1, epsI=1e-2, 171 | **dict(niter=80))[0] 172 | 173 | fig, axs = plt.subplots(2, 4, figsize=(15, 9)) 174 | axs[0][0].imshow(d, cmap='gray', 175 | extent=(x[0], x[-1], z[-1], z[0]), 176 | vmin=-0.4, vmax=0.4) 177 | axs[0][0].set_title('Data') 178 | axs[0][0].axis('tight') 179 | axs[0][1].imshow(dn, cmap='gray', 180 | extent=(x[0], x[-1], z[-1], z[0]), 181 | vmin=-0.4, vmax=0.4) 182 | axs[0][1].set_title('Noisy Data') 183 | axs[0][1].axis('tight') 184 | axs[0][2].imshow(m, cmap='gist_rainbow', 185 | extent=(x[0], x[-1], z[-1], z[0]), 186 | vmin=m.min(), vmax=m.max()) 187 | axs[0][2].set_title('Model') 188 | axs[0][2].axis('tight') 189 | axs[0][3].imshow(mback, cmap='gist_rainbow', 190 | extent=(x[0], x[-1], z[-1], z[0]), 191 | vmin=m.min(), vmax=m.max()) 192 | axs[0][3].set_title('Smooth Model') 193 | axs[0][3].axis('tight') 194 | axs[1][0].imshow(minv_dense, cmap='gist_rainbow', 195 | extent=(x[0], x[-1], z[-1], z[0]), 196 | vmin=m.min(), vmax=m.max()) 197 | axs[1][0].set_title('Noise-free Inversion') 198 | axs[1][0].axis('tight') 199 | axs[1][1].imshow(minv_dense_noisy, cmap='gist_rainbow', 200 | extent=(x[0], x[-1], z[-1], z[0]), 201 | vmin=m.min(), vmax=m.max()) 202 | axs[1][1].set_title('Trace-by-trace Noisy Inversion') 203 | axs[1][1].axis('tight') 204 | axs[1][2].imshow(minv_lop_reg, cmap='gist_rainbow', 205 | extent=(x[0], x[-1], z[-1], z[0]), 206 | vmin=m.min(), vmax=m.max()) 207 | axs[1][2].set_title('Regularized Noisy Inversion - lop ') 208 | axs[1][2].axis('tight') 209 | 210 | fig, ax = plt.subplots(1, 1, figsize=(3, 7)) 211 | ax.plot(m[:, nx//2], z, 'k', lw=4, label='True') 212 | ax.plot(mback[:, nx//2], z, '--r', lw=4, label='Back') 213 | ax.plot(minv_dense[:, nx//2], z, '--b', lw=2, label='Inv Dense') 214 | ax.plot(minv_dense_noisy[:, nx//2], z, '--m', lw=2, label='Inv Dense noisy') 215 | ax.plot(minv_lop_reg[:, nx//2], z, '--g', lw=2, label='Inv Lop regularized') 216 | ax.set_title('Model') 217 | ax.invert_yaxis() 218 | ax.axis('tight') 219 | ax.legend() 220 | plt.tight_layout() 221 | 222 | ############################################################################### 223 | # Finally, if you want to run this code on GPUs, take a look at the following `notebook 224 | # `_ 225 | # and obtain more and more speed-up for problems of increasing size. 226 | 227 | 228 | 229 | 230 | -------------------------------------------------------------------------------- /pylops_gpu/avo/poststack.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch 3 | import numpy as np 4 | 5 | from scipy.sparse import csc_matrix 6 | from pylops import MatrixMult, FirstDerivative 7 | from pylops.utils.signalprocessing import convmtx, nonstationary_convmtx 8 | from pylops.signalprocessing import Convolve1D 9 | from pylops.avo.poststack import _PoststackLinearModelling 10 | 11 | from pylops_gpu.utils import dottest as Dottest 12 | from pylops_gpu.utils.torch2numpy import torchtype_from_numpytype 13 | from pylops_gpu import MatrixMult as gMatrixMult 14 | from pylops_gpu import FirstDerivative as gFirstDerivative 15 | from pylops_gpu import SecondDerivative as gSecondDerivative 16 | from pylops_gpu import Laplacian as gLaplacian 17 | from pylops_gpu.signalprocessing import Convolve1D as gConvolve1D 18 | from pylops_gpu.optimization.cg import cg 19 | from pylops_gpu.optimization.leastsquares import RegularizedInversion 20 | 21 | logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING) 22 | 23 | """ 24 | def _PoststackLinearModelling(wav, nt0, spatdims=None, explicit=False, 25 | sparse=False, _MatrixMult=MatrixMult, 26 | _Convolve1D=Convolve1D, 27 | _FirstDerivative=FirstDerivative, 28 | args_MatrixMult={}, args_Convolve1D={}, 29 | args_FirstDerivative={}): 30 | # define dtype to be used (ensure wav.dtype rules that of operator) 31 | dtype = torchtype_from_numpytype(wav.dtype) 32 | 33 | if len(wav.shape) == 2 and wav.shape[0] != nt0: 34 | raise ValueError('Provide 1d wavelet or 2d wavelet composed of nt0 ' 35 | 'wavelets') 36 | 37 | # organize dimensions 38 | if spatdims is None: 39 | dims = (nt0,) 40 | spatdims = None 41 | elif isinstance(spatdims, int): 42 | dims = (nt0, spatdims) 43 | spatdims = (spatdims,) 44 | else: 45 | dims = (nt0,) + spatdims 46 | 47 | if explicit: 48 | # Create derivative operator 49 | D = np.diag(0.5 * np.ones(nt0 - 1), k=1) - \ 50 | np.diag(0.5 * np.ones(nt0 - 1), -1) 51 | D[0] = D[-1] = 0 52 | 53 | # Create wavelet operator 54 | if len(wav.shape) == 1: 55 | C = convmtx(wav, nt0)[:, len(wav) // 2:-len(wav) // 2 + 1] 56 | else: 57 | C = nonstationary_convmtx(wav, nt0, hc=wav.shape[1] // 2, 58 | pad=(nt0, nt0)) 59 | # Combine operators 60 | M = np.dot(C, D) 61 | if sparse: 62 | M = csc_matrix(M) 63 | Pop = _MatrixMult(M, dims=spatdims, dtype=dtype, **args_MatrixMult) 64 | else: 65 | # Create wavelet operator 66 | if len(wav.shape) == 1: 67 | Cop = _Convolve1D(np.prod(np.array(dims)), h=wav, 68 | offset=len(wav) // 2, dir=0, dims=dims, 69 | dtype=dtype, **args_Convolve1D) 70 | else: 71 | Cop = _MatrixMult(nonstationary_convmtx(wav, nt0, 72 | hc=wav.shape[1] // 2, 73 | pad=(nt0, nt0)), 74 | dims=spatdims, dtype=dtype, 75 | **args_MatrixMult) 76 | # Create derivative operator 77 | Dop = _FirstDerivative(np.prod(np.array(dims)), dims=dims, 78 | dir=0, sampling=1., dtype=dtype, 79 | **args_FirstDerivative) 80 | 81 | Pop = Cop * Dop 82 | return Pop 83 | """ 84 | 85 | 86 | def PoststackLinearModelling(wav, nt0, spatdims=None, explicit=False, 87 | device='cpu', togpu=(False, False), 88 | tocpu=(False, False)): 89 | r"""Post-stack linearized seismic modelling operator. 90 | 91 | Create operator to be applied to an elastic parameter trace (or stack of 92 | traces) for generation of band-limited seismic post-stack data. The input 93 | model and data have shape :math:`[n_{t0} (\times n_x \times n_y)]`. 94 | 95 | Parameters 96 | ---------- 97 | wav : :obj:`torch.Tensor` or :obj:`np.ndarray` 98 | Wavelet in time domain (must have odd number of elements 99 | and centered to zero) 100 | nt0 : :obj:`int` 101 | Number of samples along time axis 102 | spatdims : :obj:`int` or :obj:`tuple`, optional 103 | Number of samples along spatial axis (or axes) 104 | (``None`` if only one dimension is available) 105 | explicit : :obj:`bool`, optional 106 | Create a chained linear operator (``False``, preferred for large data) 107 | or a ``MatrixMult`` linear operator with dense matrix (``True``, 108 | preferred for small data) 109 | device : :obj:`str`, optional 110 | Device to be used 111 | togpu : :obj:`tuple`, optional 112 | Move model and data from cpu to gpu prior to applying ``matvec`` and 113 | ``rmatvec``, respectively (only when ``device='gpu'``) 114 | tocpu : :obj:`tuple`, optional 115 | Move data and model from gpu to cpu after applying ``matvec`` and 116 | ``rmatvec``, respectively (only when ``device='gpu'``) 117 | 118 | Returns 119 | ------- 120 | Pop : :obj:`LinearOperator` 121 | post-stack modelling operator. 122 | 123 | Notes 124 | ----- 125 | Refer to :class:`pylops.avo.poststack.PoststackLinearModelling` for 126 | implementation details. 127 | 128 | """ 129 | # ensure wav is always numpy, it will be converted later back to torch 130 | if isinstance(wav, torch.Tensor): 131 | wav = wav.cpu().numpy() 132 | return _PoststackLinearModelling(wav, nt0, spatdims=spatdims, 133 | explicit=explicit, sparse=False, 134 | _MatrixMult=gMatrixMult, 135 | _Convolve1D=gConvolve1D, 136 | _FirstDerivative=gFirstDerivative, 137 | args_MatrixMult={'device':device, 138 | 'togpu':(togpu[0], togpu[1]), 139 | 'tocpu':(tocpu[0], tocpu[1])}, 140 | args_Convolve1D={'device':device, 141 | 'togpu':(False, togpu[1]), 142 | 'tocpu':(tocpu[0], False)}, 143 | args_FirstDerivative={'device':device, 144 | 'togpu':(togpu[0], False), 145 | 'tocpu':(False, tocpu[1])}) 146 | 147 | 148 | def PoststackInversion(data, wav, m0=None, explicit=False, 149 | simultaneous=False, epsI=None, epsR=None, 150 | dottest=False, epsRL1=None, 151 | device='cpu', togpu=(False, False), 152 | tocpu=(False, False), **kwargs_solver): 153 | r"""Post-stack linearized seismic inversion. 154 | 155 | Invert post-stack seismic operator to retrieve an acoustic 156 | impedance profile from band-limited seismic post-stack data. 157 | Depending on the choice of input parameters, inversion can be 158 | trace-by-trace with explicit operator or global with either 159 | explicit or linear operator. 160 | 161 | Parameters 162 | ---------- 163 | data : :obj:`np.ndarray` 164 | Band-limited seismic post-stack data of size 165 | :math:`[n_{t0} (\times n_x \times n_y)]` 166 | wav : :obj:`np.ndarray` 167 | Wavelet in time domain (must have odd number of elements 168 | and centered to zero). If 1d, assume stationary wavelet for the entire 169 | time axis. If 2d of size :math:`[n_{t0} \times n_h]` use as 170 | non-stationary wavelet 171 | m0 : :obj:`np.ndarray`, optional 172 | Background model of size :math:`[n_{t0} (\times n_x \times n_y)]` 173 | explicit : :obj:`bool`, optional 174 | Create a chained linear operator (``False``, preferred for large data) 175 | or a ``MatrixMult`` linear operator with dense matrix 176 | (``True``, preferred for small data) 177 | simultaneous : :obj:`bool`, optional 178 | Simultaneously invert entire data (``True``) or invert 179 | trace-by-trace (``False``) when using ``explicit`` operator 180 | (note that the entire data is always inverted when working 181 | with linear operator) 182 | epsI : :obj:`float`, optional 183 | Damping factor for Tikhonov regularization term 184 | epsR : :obj:`float`, optional 185 | Damping factor for additional Laplacian regularization term 186 | dottest : :obj:`bool`, optional 187 | Apply dot-test 188 | epsRL1 : :obj:`float`, optional 189 | Damping factor for additional blockiness regularization term 190 | device : :obj:`str`, optional 191 | Device to be used 192 | togpu : :obj:`tuple`, optional 193 | Move model and data from cpu to gpu prior to applying ``matvec`` and 194 | ``rmatvec``, respectively (only when ``device='gpu'``) 195 | tocpu : :obj:`tuple`, optional 196 | Move data and model from gpu to cpu after applying ``matvec`` and 197 | ``rmatvec``, respectively (only when ``device='gpu'``) 198 | **kwargs_solver 199 | Arbitrary keyword arguments for :py:func:`scipy.linalg.lstsq` 200 | solver (if ``explicit=True`` and ``epsR=None``) 201 | or :py:func:`scipy.sparse.linalg.lsqr` solver (if ``explicit=False`` 202 | and/or ``epsR`` is not ``None``) 203 | 204 | Returns 205 | ------- 206 | minv : :obj:`np.ndarray` 207 | Inverted model of size :math:`[n_{t0} (\times n_x \times n_y)]` 208 | datar : :obj:`np.ndarray` 209 | Residual data (i.e., data - background data) of 210 | size :math:`[n_{t0} (\times n_x \times n_y)]` 211 | 212 | Notes 213 | ----- 214 | Refer to :class:`pylops.avo.poststack.PoststackInversion` for 215 | implementation details. 216 | 217 | """ 218 | # check if background model and data have same shape 219 | if m0 is not None and data.shape != m0.shape: 220 | raise ValueError('data and m0 must have same shape') 221 | 222 | # find out dimensions 223 | if len(data.shape) == 1: 224 | dims = 1 225 | nt0 = data.shape[0] 226 | nspat = None 227 | nspatprod = nx = 1 228 | elif len(data.shape) == 2: 229 | dims = 2 230 | nt0, nx = data.shape 231 | nspat = (nx,) 232 | nspatprod = nx 233 | else: 234 | dims = 3 235 | nt0, nx, ny = data.shape 236 | nspat = (nx, ny) 237 | nspatprod = nx * ny 238 | data = data.reshape(nt0, nspatprod) 239 | 240 | # create operator 241 | PPop = PoststackLinearModelling(wav, nt0=nt0, spatdims=nspat, 242 | explicit=explicit, tocpu=tocpu, 243 | togpu=togpu, device=device) 244 | if dottest: 245 | Dottest(PPop, nt0 * nspatprod, nt0 * nspatprod, 246 | raiseerror=True, verb=True) 247 | 248 | # create and remove background data from original data 249 | datar = data.flatten() if m0 is None else \ 250 | data.flatten() - PPop * m0.flatten() 251 | # invert model 252 | if epsR is None: 253 | # inversion without spatial regularization 254 | if explicit: 255 | if epsI is None and not simultaneous: 256 | # solve unregularized equations indipendently trace-by-trace 257 | minv = torch.solve(datar.reshape(nt0, nspatprod), 258 | PPop.A.reshape(nt0, nt0) + 259 | 1e-3 * torch.eye(nt0, dtype=torch.float32)).solution 260 | elif epsI is None and simultaneous: 261 | # solve unregularized equations simultaneously 262 | minv = cg(PPop.H * PPop, PPop.H * datar, **kwargs_solver)[0] 263 | elif epsI is not None: 264 | # create regularized normal equations 265 | PP = torch.matmul(PPop.A.t(), PPop.A) + \ 266 | epsI * torch.eye(nt0, dtype=torch.float32) 267 | datarn = torch.matmul(PPop.A.t(), datar.reshape(nt0, nspatprod)) 268 | if not simultaneous: 269 | # solve regularized normal eqs. trace-by-trace 270 | minv = torch.solve(datarn.reshape(nt0, nspatprod), 271 | PP).solution 272 | else: 273 | # solve regularized normal equations simultaneously 274 | PPop_reg = gMatrixMult(PP, dims=nspatprod, device=device, 275 | togpu=togpu, tocpu=tocpu) 276 | minv = cg(PPop_reg.H * PPop_reg, 277 | PPop_reg.H * datar.flatten(), 278 | **kwargs_solver)[0] 279 | else: 280 | # create regularized normal eqs. and solve them simultaneously 281 | PP = np.dot(PPop.A.T, PPop.A) + epsI * np.eye(nt0) 282 | datarn = PPop.A.T * datar.reshape(nt0, nspatprod) 283 | PPop_reg = gMatrixMult(PP, dims=nspatprod, device=device, 284 | togpu=togpu, tocpu=tocpu) 285 | minv = torch.solve(datarn.reshape(nt0, nspatprod), 286 | PPop_reg.A).solution 287 | else: 288 | # solve unregularized normal equations simultaneously with lop 289 | minv = cg(PPop.H * PPop, PPop.H * datar, **kwargs_solver)[0] 290 | else: 291 | if epsRL1 is None: 292 | # L2 inversion with spatial regularization 293 | if dims == 1: 294 | Regop = gSecondDerivative(nt0, device=device, 295 | togpu=togpu, tocpu=tocpu, 296 | dtype=PPop.dtype) 297 | elif dims == 2: 298 | Regop = gLaplacian((nt0, nx), device=device, 299 | togpu=togpu, tocpu=tocpu, 300 | dtype=PPop.dtype) 301 | else: 302 | Regop = gLaplacian((nt0, nx, ny), dirs=(1, 2), device=device, 303 | togpu=togpu, tocpu=tocpu, dtype=PPop.dtype) 304 | 305 | minv = RegularizedInversion(PPop, [Regop], data.flatten(), 306 | x0=None if m0 is None else m0.flatten(), 307 | epsRs=[epsR], **kwargs_solver)[0] 308 | else: 309 | # Blockiness-promoting inversion with spatial regularization 310 | raise NotImplementedError('SplitBregman not available...') 311 | 312 | # compute residual 313 | if epsR is None: 314 | datar -= PPop * minv.flatten() 315 | else: 316 | datar = data.flatten() - PPop * minv.flatten() 317 | 318 | # reshape inverted model and residual data 319 | if dims == 1: 320 | minv = minv.squeeze() 321 | datar = datar.squeeze() 322 | elif dims == 2: 323 | minv = minv.reshape(nt0, nx) 324 | datar = datar.reshape(nt0, nx) 325 | else: 326 | minv = minv.reshape(nt0, nx, ny) 327 | datar = datar.reshape(nt0, nx, ny) 328 | 329 | if m0 is not None and epsR is None: 330 | minv = minv + m0 331 | 332 | return minv, datar 333 | -------------------------------------------------------------------------------- /pytests/test_derivative.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | import torch 4 | 5 | from numpy.testing import assert_array_equal, assert_array_almost_equal 6 | from pylops.basicoperators import FirstDerivative, SecondDerivative, Laplacian 7 | 8 | from pylops_gpu.utils.backend import device 9 | from pylops_gpu.utils import dottest 10 | from pylops_gpu import FirstDerivative as gFirstDerivative 11 | from pylops_gpu import SecondDerivative as gSecondDerivative 12 | from pylops_gpu import Laplacian as gLaplacian 13 | 14 | 15 | par1 = {'nz': 10, 'ny': 30, 'nx': 40, 16 | 'dz': 1., 'dy': 1., 'dx': 1.} # even with unitary sampling 17 | par2 = {'nz': 10, 'ny': 30, 'nx': 40, 18 | 'dz': 0.4, 'dy': 2., 'dx': 0.5} # even with non-unitary sampling 19 | par3 = {'nz': 11, "ny": 51, 'nx': 61, 20 | 'dz': 1., 'dy': 1., 'dx': 1.} # odd with unitary sampling 21 | par4 = {'nz': 11, "ny": 51, 'nx': 61, 22 | 'dz': 0.4, 'dy': 2., 'dx': 0.5} # odd with non-unitary sampling 23 | par1e = {'nz': 10, 'ny': 30, 'nx': 40, 24 | 'dz': 1., 'dy': 1., 'dx': 1.} # even with unitary sampling 25 | par2e = {'nz': 10, 'ny': 30, 'nx': 40, 26 | 'dz': 0.4, 'dy': 2., 'dx': 0.5} # even with non-unitary sampling 27 | par3e = {'nz': 11, "ny": 51, 'nx': 61, 28 | 'dz': 1., 'dy': 1., 'dx': 1.} # odd with unitary sampling 29 | par4e = {'nz': 11, "ny": 51, 'nx': 61, 30 | 'dz': 0.4, 'dy': 2., 'dx': 0.5} # odd with non-unitary sampling 31 | 32 | dev = device() 33 | np.random.seed(0) 34 | torch.manual_seed(0) 35 | 36 | 37 | @pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), 38 | (par1e), (par2e), (par3e), (par4e)]) 39 | def test_FirstDerivative(par): 40 | """Dot-test and forward for FirstDerivative operator 41 | """ 42 | # 1d 43 | gD1op = gFirstDerivative(par['nx'], sampling=par['dx'], 44 | dtype=torch.float32) 45 | assert dottest(gD1op, par['nx'], par['nx'], tol=1e-3) 46 | 47 | x = torch.from_numpy((par['dx']*np.arange(par['nx'], 48 | dtype='float32')) ** 2) 49 | D1op = FirstDerivative(par['nx'], sampling=par['dx'], 50 | dtype='float32') 51 | assert_array_equal((gD1op * x)[1:-1], (D1op * x.cpu().numpy())[1:-1]) 52 | 53 | # 2d - derivative on 1st direction 54 | gD1op = gFirstDerivative(par['ny']*par['nx'], dims=(par['ny'], par['nx']), 55 | dir=0, sampling=par['dy'], dtype=torch.float32) 56 | assert dottest(gD1op, par['ny']*par['nx'], par['ny']*par['nx'], tol=1e-3) 57 | 58 | x = torch.from_numpy((np.outer((par['dy']*np.arange(par['ny']))**2, 59 | np.ones(par['nx']))).astype(dtype='float32')) 60 | D1op = FirstDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 61 | dir=0, sampling=par['dy'], dtype='float32') 62 | gy = (gD1op * x.view(-1)).reshape(par['ny'], par['nx']).cpu().numpy() 63 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['ny'], par['nx']) 64 | assert_array_equal(gy[1:-1], y[1:-1]) 65 | 66 | # 2d - derivative on 2nd direction 67 | gD1op = gFirstDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 68 | dir=1, sampling=par['dy'], dtype=torch.float32) 69 | assert dottest(gD1op, par['ny'] * par['nx'], 70 | par['ny'] * par['nx'], tol=1e-3) 71 | 72 | x = torch.from_numpy((np.outer((par['dy'] * np.arange(par['ny'])) ** 2, 73 | np.ones(par['nx']))).astype(dtype='float32')) 74 | D1op = FirstDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 75 | dir = 1, sampling = par['dy'], dtype = 'float32') 76 | gy = (gD1op * x.view(-1)).reshape(par['ny'], par['nx']).cpu().numpy() 77 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['ny'], par['nx']) 78 | assert_array_equal(gy[:, 1:-1], y[:, 1:-1]) 79 | 80 | # 3d - derivative on 1st direction 81 | gD1op = gFirstDerivative(par['nz'] * par['ny'] * par['nx'], 82 | dims=(par['nz'], par['ny'], par['nx']), 83 | dir=0, sampling=par['dz'], dtype=torch.float32) 84 | assert dottest(gD1op, par['nz'] * par['ny'] * par['nx'], 85 | par['nz'] * par['ny'] * par['nx'], tol=1e-3) 86 | 87 | x = torch.from_numpy((np.outer((par['dz']*np.arange(par['nz']))**2, 88 | np.ones((par['ny'], 89 | par['nx']))).astype(dtype='float32'))) 90 | D1op = FirstDerivative(par['nz'] * par['ny'] * par['nx'], 91 | dims=(par['nz'], par['ny'], par['nx']), 92 | dir=0, sampling=par['dz'], dtype='float32') 93 | 94 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], par['nx']).cpu().numpy() 95 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 96 | par['nx']) 97 | assert_array_almost_equal(gy[1:-1], y[1:-1], decimal=5) 98 | 99 | # 3d - derivative on 2nd direction 100 | gD1op = gFirstDerivative(par['nz'] * par['ny'] * par['nx'], 101 | dims=(par['nz'], par['ny'], par['nx']), 102 | dir=1, sampling=par['dy'], dtype=torch.float32) 103 | assert dottest(gD1op, par['nz']*par['ny']*par['nx'], 104 | par['nz']*par['ny']*par['nx'], tol=1e-3) 105 | 106 | x = torch.from_numpy((np.outer((par['dz'] * np.arange(par['nz'])) ** 2, 107 | np.ones((par['ny'], 108 | par['nx']))).reshape(par['nz'], 109 | par['ny'], 110 | par['nx'])).astype(dtype='float32')) 111 | D1op = FirstDerivative(par['nz'] * par['ny'] * par['nx'], 112 | dims=(par['nz'], par['ny'], par['nx']), 113 | dir=1, sampling=par['dy'], dtype='float32') 114 | 115 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], 116 | par['nx']).cpu().numpy() 117 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 118 | par['nx']) 119 | assert_array_almost_equal(gy[1:-1], y[1:-1], 120 | decimal=5) 121 | 122 | # 3d - derivative on 3rd direction 123 | gD1op = gFirstDerivative(par['nz']*par['ny']*par['nx'], 124 | dims=(par['nz'], par['ny'], par['nx']), 125 | dir=2, sampling=par['dx'], dtype=torch.float32) 126 | assert dottest(gD1op, par['nz']*par['ny']*par['nx'], 127 | par['nz']*par['ny']*par['nx'], tol=1e-3) 128 | 129 | x = torch.from_numpy((np.outer((par['dz'] * np.arange(par['nz'])) ** 2, 130 | np.ones((par['ny'], 131 | par['nx']))).reshape(par['nz'], 132 | par['ny'], 133 | par['nx'])).astype(dtype='float32')) 134 | 135 | D1op = FirstDerivative(par['nz'] * par['ny'] * par['nx'], 136 | dims=(par['nz'], par['ny'], par['nx']), 137 | dir=2, sampling=par['dx'], dtype='float32') 138 | 139 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], 140 | par['nx']).cpu().numpy() 141 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 142 | par['nx']) 143 | assert_array_almost_equal(gy[1:-1], y[1:-1], decimal=5) 144 | 145 | 146 | 147 | @pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), 148 | (par1e), (par2e), (par3e), (par4e)]) 149 | def test_SecondDerivative(par): 150 | """Dot-test and forward for SecondDerivative operator 151 | """ 152 | # 1d 153 | gD1op = gSecondDerivative(par['nx'], sampling=par['dx'], 154 | dtype=torch.float32) 155 | assert dottest(gD1op, par['nx'], par['nx'], tol=1e-3) 156 | 157 | x = torch.from_numpy((par['dx']*np.arange(par['nx'], 158 | dtype='float32')) ** 2) 159 | D1op = SecondDerivative(par['nx'], sampling=par['dx'], 160 | dtype='float32') 161 | assert_array_equal((gD1op * x)[1:-1], (D1op * x.cpu().numpy())[1:-1]) 162 | 163 | # 2d - derivative on 1st direction 164 | gD1op = gSecondDerivative(par['ny']*par['nx'], dims=(par['ny'], par['nx']), 165 | dir=0, sampling=par['dy'], dtype=torch.float32) 166 | assert dottest(gD1op, par['ny']*par['nx'], par['ny']*par['nx'], tol=1e-3) 167 | 168 | x = torch.from_numpy((np.outer((par['dy']*np.arange(par['ny']))**2, 169 | np.ones(par['nx']))).astype(dtype='float32')) 170 | D1op = SecondDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 171 | dir=0, sampling=par['dy'], dtype='float32') 172 | gy = (gD1op * x.view(-1)).reshape(par['ny'], par['nx']).cpu().numpy() 173 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['ny'], par['nx']) 174 | assert_array_equal(gy[1:-1], y[1:-1]) 175 | 176 | # 2d - derivative on 2nd direction 177 | gD1op = gSecondDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 178 | dir=1, sampling=par['dy'], dtype=torch.float32) 179 | assert dottest(gD1op, par['ny'] * par['nx'], 180 | par['ny'] * par['nx'], tol=1e-3) 181 | 182 | x = torch.from_numpy((np.outer((par['dy'] * np.arange(par['ny'])) ** 2, 183 | np.ones(par['nx']))).astype(dtype='float32')) 184 | D1op = SecondDerivative(par['ny'] * par['nx'], dims=(par['ny'], par['nx']), 185 | dir = 1, sampling = par['dy'], dtype = 'float32') 186 | gy = (gD1op * x.view(-1)).reshape(par['ny'], par['nx']).cpu().numpy() 187 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['ny'], par['nx']) 188 | assert_array_equal(gy[:, 1:-1], y[:, 1:-1]) 189 | 190 | # 3d - derivative on 1st direction 191 | gD1op = gSecondDerivative(par['nz'] * par['ny'] * par['nx'], 192 | dims=(par['nz'], par['ny'], par['nx']), 193 | dir=0, sampling=par['dz'], dtype=torch.float32) 194 | assert dottest(gD1op, par['nz'] * par['ny'] * par['nx'], 195 | par['nz'] * par['ny'] * par['nx'], tol=1e-3) 196 | 197 | x = torch.from_numpy((np.outer((par['dz']*np.arange(par['nz']))**2, 198 | np.ones((par['ny'], 199 | par['nx']))).astype(dtype='float32'))) 200 | D1op = SecondDerivative(par['nz'] * par['ny'] * par['nx'], 201 | dims=(par['nz'], par['ny'], par['nx']), 202 | dir=0, sampling=par['dz'], dtype='float32') 203 | 204 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], par['nx']).cpu().numpy() 205 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 206 | par['nx']) 207 | assert_array_almost_equal(gy[1:-1], y[1:-1], decimal=5) 208 | 209 | # 3d - derivative on 2nd direction 210 | gD1op = gSecondDerivative(par['nz'] * par['ny'] * par['nx'], 211 | dims=(par['nz'], par['ny'], par['nx']), 212 | dir=1, sampling=par['dy'], dtype=torch.float32) 213 | assert dottest(gD1op, par['nz']*par['ny']*par['nx'], 214 | par['nz']*par['ny']*par['nx'], tol=1e-3) 215 | 216 | x = torch.from_numpy((np.outer((par['dz'] * np.arange(par['nz'])) ** 2, 217 | np.ones((par['ny'], 218 | par['nx']))).reshape(par['nz'], 219 | par['ny'], 220 | par['nx'])).astype(dtype='float32')) 221 | D1op = SecondDerivative(par['nz'] * par['ny'] * par['nx'], 222 | dims=(par['nz'], par['ny'], par['nx']), 223 | dir=1, sampling=par['dy'], dtype='float32') 224 | 225 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], 226 | par['nx']).cpu().numpy() 227 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 228 | par['nx']) 229 | assert_array_almost_equal(gy[1:-1], y[1:-1], 230 | decimal=5) 231 | 232 | # 3d - derivative on 3rd direction 233 | gD1op = gSecondDerivative(par['nz']*par['ny']*par['nx'], 234 | dims=(par['nz'], par['ny'], par['nx']), 235 | dir=2, sampling=par['dx'], dtype=torch.float32) 236 | assert dottest(gD1op, par['nz']*par['ny']*par['nx'], 237 | par['nz']*par['ny']*par['nx'], tol=1e-3) 238 | 239 | x = torch.from_numpy((np.outer((par['dz'] * np.arange(par['nz'])) ** 2, 240 | np.ones((par['ny'], 241 | par['nx']))).reshape(par['nz'], 242 | par['ny'], 243 | par['nx'])).astype(dtype='float32')) 244 | 245 | D1op = SecondDerivative(par['nz'] * par['ny'] * par['nx'], 246 | dims=(par['nz'], par['ny'], par['nx']), 247 | dir=2, sampling=par['dx'], dtype='float32') 248 | 249 | gy = (gD1op * x.view(-1)).reshape(par['nz'], par['ny'], 250 | par['nx']).cpu().numpy() 251 | y = (D1op * x.view(-1).cpu().numpy()).reshape(par['nz'], par['ny'], 252 | par['nx']) 253 | assert_array_almost_equal(gy[1:-1], y[1:-1], decimal=5) 254 | 255 | 256 | @pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), 257 | (par1e), (par2e), (par3e), (par4e)]) 258 | def test_Laplacian(par): 259 | """Dot-test for Laplacian operator 260 | """ 261 | # 2d - symmetrical 262 | Dlapop = gLaplacian((par['ny'], par['nx']), dirs=(0, 1), weights=(1, 1), 263 | sampling=(par['dy'], par['dx']), dtype=torch.float32) 264 | assert dottest(Dlapop, par['ny'] * par['nx'], par['ny'] * par['nx'], 265 | tol=1e-3) 266 | 267 | # 2d - asymmetrical 268 | Dlapop = gLaplacian((par['ny'], par['nx']), dirs=(0, 1), weights=(1, 2), 269 | sampling=(par['dy'], par['dx']), dtype=torch.float32) 270 | assert dottest(Dlapop, par['ny'] * par['nx'], par['ny'] * par['nx'], 271 | tol=1e-3) 272 | 273 | # 3d - symmetrical on 1st and 2nd direction 274 | Dlapop = gLaplacian((par['nz'], par['ny'], par['nx']), dirs=(0, 1), 275 | weights=(1, 1), sampling=(par['dy'], par['dx']), 276 | dtype=torch.float32) 277 | assert dottest(Dlapop, par['nz'] * par['ny'] * par['nx'], 278 | par['nz'] * par['ny'] * par['nx'], tol=1e-3) 279 | 280 | # 3d - symmetrical on 1st and 2nd direction 281 | Dlapop = gLaplacian((par['nz'], par['ny'], par['nx']), dirs=(0, 1), 282 | weights=(1, 1), sampling=(par['dy'], par['dx']), 283 | dtype=torch.float32) 284 | assert dottest(Dlapop, par['nz'] * par['ny'] * par['nx'], 285 | par['nz'] * par['ny'] * par['nx'], tol=1e-3) 286 | --------------------------------------------------------------------------------