├── README.md ├── workshops ├── 2022-10-05_specfem_users │ ├── additional_material │ │ ├── day_1c │ │ │ ├── meshfem3D_files │ │ │ │ ├── example_A │ │ │ │ │ ├── interface1.txt │ │ │ │ │ ├── interfaces.txt │ │ │ │ │ └── Mesh_Par_file │ │ │ │ ├── example_B │ │ │ │ │ ├── interface2.txt │ │ │ │ │ ├── interface1.txt │ │ │ │ │ ├── interfaces.txt │ │ │ │ │ └── Mesh_Par_file │ │ │ │ └── example_C │ │ │ │ │ ├── interface2.txt │ │ │ │ │ ├── interface1.txt │ │ │ │ │ ├── interfaces.txt │ │ │ │ │ └── Mesh_Par_file │ │ │ └── figures │ │ │ │ ├── mesh │ │ │ │ ├── example_A.png │ │ │ │ ├── example_B.png │ │ │ │ └── example_C.png │ │ │ │ └── source_station_geometry │ │ │ │ └── sr.png │ │ ├── README.md │ │ └── container_git_hashes.md │ ├── README.md │ ├── day_0_container_testing.ipynb │ └── day_2b_kernels_exercise.ipynb ├── 2024-05-21_scoped_uw │ ├── exercise_solutions │ │ ├── solution_2_forward_simulations.ipynb │ │ └── solution_5_intro_seisflows.ipynb │ ├── readme.md │ └── 4_intro_specfem3d.ipynb └── 2025-08-04_cig-tng │ ├── readme.md │ └── 2_forward_simulations.ipynb ├── posters ├── chow_etal_ssa_tomography_2022.pdf └── chow_etal_agu_2022.txt ├── research ├── README.md ├── seisflows_submit.md └── northern_alaska_tomography.md ├── readmes ├── how_to_version_release.md ├── contributing_to_adjdocs.md ├── manual_install_instructions.md └── docker_image_install.md └── .gitignore /README.md: -------------------------------------------------------------------------------- 1 | # adjDocs: adjTomo Documentation 2 | Teaching material including Jupyter Notebooks, slideshows and examples 3 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_A/interface1.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 4 | 0 5 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_B/interface2.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 4 | 0 5 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_C/interface2.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 0 3 | 0 4 | 0 5 | -------------------------------------------------------------------------------- /posters/chow_etal_ssa_tomography_2022.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adjtomo/adjdocs/HEAD/posters/chow_etal_ssa_tomography_2022.pdf -------------------------------------------------------------------------------- /posters/chow_etal_agu_2022.txt: -------------------------------------------------------------------------------- 1 | agu2022fallmeeting-agu.ipostersessions.com/Default.aspx?s=30-0A-56-DE-4A-C3-56-96-1B-5D-F2-C9-F4-D5-66-30 2 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_B/interface1.txt: -------------------------------------------------------------------------------- 1 | -15000 2 | -15000 3 | -15000 4 | -15000 5 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_C/interface1.txt: -------------------------------------------------------------------------------- 1 | -15000 2 | -15000 3 | -15000 4 | -15000 5 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_A.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adjtomo/adjdocs/HEAD/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_A.png -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_B.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adjtomo/adjdocs/HEAD/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_B.png -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_C.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adjtomo/adjdocs/HEAD/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_C.png -------------------------------------------------------------------------------- /research/README.md: -------------------------------------------------------------------------------- 1 | # Research 2 | 3 | Catch all directory which is meant to contain miscellaneous documents related 4 | to research which leverage adjTomo software. These may include scripts, 5 | walkthroughs, papers etc.. 6 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/source_station_geometry/sr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adjtomo/adjdocs/HEAD/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/source_station_geometry/sr.png -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/README.md: -------------------------------------------------------------------------------- 1 | For any additional workshop material (pre-completed data files, slides etc.) 2 | that participants may or may not need access to during the workshop. Files 3 | should be labelled with the correct Day to avoid any confusion. 4 | Please do not upload large files here. 5 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_A/interfaces.txt: -------------------------------------------------------------------------------- 1 | # number of interfaces 2 | 1 3 | # 4 | # We describe each interface below, structured as a 2D-grid, with several parameters : 5 | # number of points along XI and ETA, minimal XI ETA coordinates 6 | # and spacing between points which must be constant. 7 | # Then the records contain the Z coordinates of the NXI x NETA points. 8 | # 9 | # interface number 1 (topography, top of the mesh) 10 | .true. 2 2 0.0 0.0 1000.0 1000.0 11 | interface1.txt 12 | # 13 | # for each layer, we give the number of spectral elements in the vertical direction 14 | # 15 | # layer number 1 (top layer) 16 | 16 17 | -------------------------------------------------------------------------------- /research/seisflows_submit.md: -------------------------------------------------------------------------------- 1 | # SeisFlows Submit 2 | 3 | What happens when you run `seisflows submit`? 4 | 5 | 1. Load `system` module 6 | 2. `system.submit()` 7 | - `config.import_seisflows`: loads parameters, sets up logger, imports all modules 8 | - `workflow.check()`: [module.check() for module in modules], checks parameter acceptability, fails if parameters do not pass certain criteria 9 | - `workflow.setup()`: [module.setup() for module in modules], setup includes mkdir's, loading checkpoints, assigning internal parameters 10 | - `workflow.run()`: [task() for task in workflow.task_list], runs through all functions defined by the workflow 11 | 3. Example Forward workflow 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_B/interfaces.txt: -------------------------------------------------------------------------------- 1 | # number of interfaces 2 | 2 3 | # 4 | # We describe each interface below, structured as a 2D-grid, with several parameters : 5 | # number of points along XI and ETA, minimal XI ETA coordinates 6 | # and spacing between points which must be constant. 7 | # Then the records contain the Z coordinates of the NXI x NETA points. 8 | # 9 | # interface number 1 10 | .true. 2 2 0.0 0.0 1000.0 1000.0 11 | interface1.txt 12 | # 13 | # interface number 2 (topography, top of the mesh) 14 | .true. 2 2 0.0 0.0 1000.0 1000.0 15 | interface2.txt 16 | # 17 | # for each layer, we give the number of spectral elements in the vertical direction 18 | # 19 | # layer number 1 20 | 12 21 | # layer number 2 (top layer) 22 | 4 23 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_C/interfaces.txt: -------------------------------------------------------------------------------- 1 | # number of interfaces 2 | 2 3 | # 4 | # We describe each interface below, structured as a 2D-grid, with several parameters : 5 | # number of points along XI and ETA, minimal XI ETA coordinates 6 | # and spacing between points which must be constant. 7 | # Then the records contain the Z coordinates of the NXI x NETA points. 8 | # 9 | # interface number 1 10 | .true. 2 2 0.0 0.0 1000.0 1000.0 11 | interface1.txt 12 | # 13 | # interface number 2 (topography, top of the mesh) 14 | .true. 2 2 0.0 0.0 1000.0 1000.0 15 | interface2.txt 16 | # 17 | # for each layer, we give the number of spectral elements in the vertical direction 18 | # 19 | # layer number 1 20 | 6 21 | # layer number 2 (top layer) 22 | 4 23 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/README.md: -------------------------------------------------------------------------------- 1 | # SPECFEM USERS (VIRTUAL) WORKSHOP 2 | ### October 5-7, 2022 3 | 4 | [Official workshop link](https://sites.google.com/alaska.edu/carltape/home/research/specfem2022?authuser=0) 5 | 6 | Material in this directory pertains to the SPECFEM User's workshop, held 7 | virtually from October 5-7, 2022. Day 0 material (October 4) is used for 8 | workshop and container testing. 9 | 10 | Workshop materials is composed of interactive Jupyter Notebooks which participants are 11 | able to follow along to run/understand SPECFEM2D/3D examples, as well as small 2D inversion 12 | problems using the adjTomo software suite. 13 | 14 | Completed notebooks are available for those that are unable to run notebooks on their own. 15 | Some days also have exercises where Users can put their knowledge to work by setting up and 16 | running their own simulation problems. Exercise solutions are also available. 17 | 18 | All notebooks **must** be run inside a Docker container running Jupyter Lab. 19 | See instructions for installing the Docker container [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md). 20 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/container_git_hashes.md: -------------------------------------------------------------------------------- 1 | # adjTomo Docker Container 2 | For the SPECFEM Users Workshop (Oct. 5-7, 2022) 3 | 4 | This is a reference file that lists the Git hashes for all the packages that are installed in the 5 | adjTomo Docker Container. Although the static image itself should be the direct record of which 6 | versions are installed, I think it's useful to have a separate record of exactly which software 7 | was used so that we can **always** re-run the workshop material in the future, despite upgrades or 8 | changes in the underlying software. 9 | 10 | >__Note:__ adjDocs was being constantly updated throughout the workshop. The commit shown here 11 | represents the latest version which contains all updates and any bug fixes that happenend 12 | after the workshop was finished. 13 | 14 | -------------- 15 | 16 | ## adjTomo Software Suite 17 | - adjDocs: [be7b0525e7f6d2a5e1ae453fa3c8e81b7b77a9dd](https://github.com/adjtomo/adjdocs/tree/be7b0525e7f6d2a5e1ae453fa3c8e81b7b77a9dd) 18 | - SeisFlows: [ba9646506d934547a61c5b9c0cc87f76359e5a87](https://github.com/adjtomo/seisflows/tree/ba9646506d934547a61c5b9c0cc87f76359e5a87) 19 | - Pyatoa: [2db6687bc4973c15f150da9e89376ab6a0945e49](https://github.com/adjtomo/pyatoa/tree/2db6687bc4973c15f150da9e89376ab6a0945e49) 20 | - PySEP: [9e1204588c060f71982b4d4bece6382908b88c20](https://github.com/uafgeotools/pysep/tree/9e1204588c060f71982b4d4bece6382908b88c20) 21 | 22 | ## SPECFEM 23 | - SPECFEM2D: [604f766101c9ece3d588d61e6275d81d50dd8e65](https://github.com/geodynamics/specfem2d/tree/604f766101c9ece3d588d61e6275d81d50dd8e65) 24 | - SPECFEM3D_Cartesian: [4981f1df9ab78f4202a81fa541ed7b33c94fd8c0](https://github.com/geodynamics/specfem3d/tree/4981f1df9ab78f4202a81fa541ed7b33c94fd8c0) -------------------------------------------------------------------------------- /readmes/how_to_version_release.md: -------------------------------------------------------------------------------- 1 | # adjTomo Package Version Release Checklist 2 | 3 | Checklist for any version release relating to semantic version number 4 | incrementation. Please include in any PR to master and make sure all points 5 | are checked off when incrementing any of the version numbers (major, minor, 6 | patch), or provide reason in PR for why certain points are not checked. 7 | 8 | ## Prior to PR merge: 9 | - [ ] Merge `devel` -> `master` 10 | - [ ] Bump version number `pyproject.toml` 11 | - [ ] Ensure all tests still pass, fix broken tests 12 | - [ ] Update `CHANGELOG` to include all major changes since last version 13 | 14 | ## Following PR merge: 15 | - [ ] Create GitHub version release 16 | - [ ] Publish latest version on PyPi 17 | - [ ] Post on adjTomo Discussion for major and minor version releases 18 | 19 | ## Publishing Package on PyPi 20 | *Useful link: https://realpython.com/pypi-publish-python-package/* 21 | 22 | 1. Ensure your `pyproject.toml` file is set up properly; required fields are name and version 23 | 2. Set dependencies, do **not** pin exact versions but allow for upper and lower bounds; only list direct dependencies 24 | 3. Include `tests/`, `docs/`, license, and MANIFEST files (MANIFIST used for including non-source code material 25 | 4. Ensure you have an account on PyPi and TestPyPi (for testing publishing) 26 | 5. Install `twine` and `build` which are used to build and push packages to PyPi 27 | 6. Build your packages locally, which creates the `.tar.gz` and `.whl` dist files 28 | ```bash 29 | python -m build 30 | ``` 31 | 6. Check that files in your .whl (zip file) are as expected (including everything in 3) 32 | 7. Check dist files with: 33 | ```bash 34 | twine check dist/* 35 | ``` 36 | 8. Upload test package (note: requires TestPyPi account) 37 | ```bash 38 | twine upload -r testpypi dist/* 39 | ``` 40 | 9. Upload real package (note: requires PyPi account) 41 | ```bash 42 | twine upload dist/* 43 | ``` 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_A/Mesh_Par_file: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------- 2 | # 3 | # Meshing input parameters 4 | # 5 | #----------------------------------------------------------- 6 | 7 | # coordinates of mesh block in latitude/longitude and depth in km 8 | LATITUDE_MIN = 0.0 9 | LATITUDE_MAX = 134000.0 10 | LONGITUDE_MIN = 0.0 11 | LONGITUDE_MAX = 134000.0 12 | DEPTH_BLOCK_KM = 60.d0 13 | UTM_PROJECTION_ZONE = 11 14 | SUPPRESS_UTM_PROJECTION = .true. 15 | 16 | # file that contains the interfaces of the model / mesh 17 | INTERFACES_FILE = interfaces.txt 18 | 19 | # file that contains the cavity 20 | CAVITY_FILE = no_cavity.dat 21 | 22 | # number of elements at the surface along edges of the mesh at the surface 23 | # (must be 8 * multiple of NPROC below if mesh is not regular and contains mesh doublings) 24 | # (must be multiple of NPROC below if mesh is regular) 25 | NEX_XI = 36 26 | NEX_ETA = 36 27 | 28 | # number of MPI processors along xi and eta (can be different) 29 | NPROC_XI = 2 30 | NPROC_ETA = 2 31 | 32 | #----------------------------------------------------------- 33 | # 34 | # Doubling layers 35 | # 36 | #----------------------------------------------------------- 37 | 38 | # Regular/irregular mesh 39 | USE_REGULAR_MESH = .true. 40 | # Only for irregular meshes, number of doubling layers and their position 41 | NDOUBLINGS = 0 42 | # NZ_DOUBLING_1 is the parameter to set up if there is only one doubling layer 43 | # (more doubling entries can be added if needed to match NDOUBLINGS value) 44 | NZ_DOUBLING_1 = 40 45 | NZ_DOUBLING_2 = 48 46 | 47 | #----------------------------------------------------------- 48 | # 49 | # Visualization 50 | # 51 | #----------------------------------------------------------- 52 | 53 | # create mesh files for visualisation or further checking 54 | CREATE_ABAQUS_FILES = .false. 55 | CREATE_DX_FILES = .false. 56 | CREATE_VTK_FILES = .true. 57 | 58 | # path to store the databases files 59 | LOCAL_PATH = ./OUTPUT_FILES/DATABASES_MPI 60 | 61 | #----------------------------------------------------------- 62 | # 63 | # CPML 64 | # 65 | #----------------------------------------------------------- 66 | 67 | # CPML perfectly matched absorbing layers 68 | THICKNESS_OF_X_PML = 12.3d0 69 | THICKNESS_OF_Y_PML = 12.3d0 70 | THICKNESS_OF_Z_PML = 12.3d0 71 | 72 | #----------------------------------------------------------- 73 | # 74 | # Domain materials 75 | # 76 | #----------------------------------------------------------- 77 | 78 | # number of materials 79 | NMATERIALS = 1 80 | # define the different materials in the model as: 81 | # #material_id #rho #vp #vs #Q_Kappa #Q_mu #anisotropy_flag #domain_id 82 | # Q_Kappa : Q_Kappa attenuation quality factor 83 | # Q_mu : Q_mu attenuation quality factor 84 | # anisotropy_flag : 0 = no anisotropy / 1,2,... check the implementation in file aniso_model.f90 85 | # domain_id : 1 = acoustic / 2 = elastic 86 | 1 2300.0 2800.0 1500.0 2444.4 300.0 0 2 87 | 88 | #----------------------------------------------------------- 89 | # 90 | # Domain regions 91 | # 92 | #----------------------------------------------------------- 93 | 94 | # number of regions 95 | NREGIONS = 1 96 | # define the different regions of the model as : 97 | #NEX_XI_BEGIN #NEX_XI_END #NEX_ETA_BEGIN #NEX_ETA_END #NZ_BEGIN #NZ_END #material_id 98 | 1 36 1 36 1 16 1 99 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_C/Mesh_Par_file: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------- 2 | # 3 | # Meshing input parameters 4 | # 5 | #----------------------------------------------------------- 6 | 7 | # coordinates of mesh block in latitude/longitude and depth in km 8 | LATITUDE_MIN = 0.0 9 | LATITUDE_MAX = 134000.0 10 | LONGITUDE_MIN = 0.0 11 | LONGITUDE_MAX = 134000.0 12 | DEPTH_BLOCK_KM = 60.d0 13 | UTM_PROJECTION_ZONE = 11 14 | SUPPRESS_UTM_PROJECTION = .true. 15 | 16 | # file that contains the interfaces of the model / mesh 17 | INTERFACES_FILE = interfaces.txt 18 | 19 | # file that contains the cavity 20 | CAVITY_FILE = no_cavity.dat 21 | 22 | # number of elements at the surface along edges of the mesh at the surface 23 | # (must be 8 * multiple of NPROC below if mesh is not regular and contains mesh doublings) 24 | # (must be multiple of NPROC below if mesh is regular) 25 | NEX_XI = 32 26 | NEX_ETA = 32 27 | 28 | # number of MPI processors along xi and eta (can be different) 29 | NPROC_XI = 2 30 | NPROC_ETA = 2 31 | 32 | #----------------------------------------------------------- 33 | # 34 | # Doubling layers 35 | # 36 | #----------------------------------------------------------- 37 | 38 | # Regular/irregular mesh 39 | USE_REGULAR_MESH = .false. 40 | # Only for irregular meshes, number of doubling layers and their position 41 | NDOUBLINGS = 1 42 | # NZ_DOUBLING_1 is the parameter to set up if there is only one doubling layer 43 | # (more doubling entries can be added if needed to match NDOUBLINGS value) 44 | NZ_DOUBLING_1 = 6 45 | 46 | #----------------------------------------------------------- 47 | # 48 | # Visualization 49 | # 50 | #----------------------------------------------------------- 51 | 52 | # create mesh files for visualisation or further checking 53 | CREATE_ABAQUS_FILES = .false. 54 | CREATE_DX_FILES = .false. 55 | CREATE_VTK_FILES = .true. 56 | 57 | # path to store the databases files 58 | LOCAL_PATH = ./OUTPUT_FILES/DATABASES_MPI 59 | 60 | #----------------------------------------------------------- 61 | # 62 | # CPML 63 | # 64 | #----------------------------------------------------------- 65 | 66 | # CPML perfectly matched absorbing layers 67 | THICKNESS_OF_X_PML = 12.3d0 68 | THICKNESS_OF_Y_PML = 12.3d0 69 | THICKNESS_OF_Z_PML = 12.3d0 70 | 71 | #----------------------------------------------------------- 72 | # 73 | # Domain materials 74 | # 75 | #----------------------------------------------------------- 76 | 77 | # number of materials 78 | NMATERIALS = 2 79 | # define the different materials in the model as: 80 | # #material_id #rho #vp #vs #Q_Kappa #Q_mu #anisotropy_flag #domain_id 81 | # Q_Kappa : Q_Kappa attenuation quality factor 82 | # Q_mu : Q_mu attenuation quality factor 83 | # anisotropy_flag : 0 = no anisotropy / 1,2,... check the implementation in file aniso_model.f90 84 | # domain_id : 1 = acoustic / 2 = elastic 85 | 1 2800.0 4000.0 2100.0 2444.4 300.0 0 2 86 | 2 2300.0 2800.0 1500.0 2444.4 300.0 0 2 87 | 88 | #----------------------------------------------------------- 89 | # 90 | # Domain regions 91 | # 92 | #----------------------------------------------------------- 93 | 94 | # number of regions 95 | NREGIONS = 2 96 | # define the different regions of the model as : 97 | #NEX_XI_BEGIN #NEX_XI_END #NEX_ETA_BEGIN #NEX_ETA_END #NZ_BEGIN #NZ_END #material_id 98 | 1 32 1 32 1 6 1 99 | 1 32 1 32 7 10 2 100 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/additional_material/day_1c/meshfem3D_files/example_B/Mesh_Par_file: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------- 2 | # 3 | # Meshing input parameters 4 | # 5 | #----------------------------------------------------------- 6 | 7 | # coordinates of mesh block in latitude/longitude and depth in km 8 | LATITUDE_MIN = 0.0 9 | LATITUDE_MAX = 134000.0 10 | LONGITUDE_MIN = 0.0 11 | LONGITUDE_MAX = 134000.0 12 | DEPTH_BLOCK_KM = 60.d0 13 | UTM_PROJECTION_ZONE = 11 14 | SUPPRESS_UTM_PROJECTION = .true. 15 | 16 | # file that contains the interfaces of the model / mesh 17 | INTERFACES_FILE = interfaces.txt 18 | 19 | # file that contains the cavity 20 | CAVITY_FILE = no_cavity.dat 21 | 22 | # number of elements at the surface along edges of the mesh at the surface 23 | # (must be 8 * multiple of NPROC below if mesh is not regular and contains mesh doublings) 24 | # (must be multiple of NPROC below if mesh is regular) 25 | NEX_XI = 36 26 | NEX_ETA = 36 27 | 28 | # number of MPI processors along xi and eta (can be different) 29 | NPROC_XI = 2 30 | NPROC_ETA = 2 31 | 32 | #----------------------------------------------------------- 33 | # 34 | # Doubling layers 35 | # 36 | #----------------------------------------------------------- 37 | 38 | # Regular/irregular mesh 39 | USE_REGULAR_MESH = .true. 40 | # Only for irregular meshes, number of doubling layers and their position 41 | NDOUBLINGS = 0 42 | # NZ_DOUBLING_1 is the parameter to set up if there is only one doubling layer 43 | # (more doubling entries can be added if needed to match NDOUBLINGS value) 44 | NZ_DOUBLING_1 = 40 45 | NZ_DOUBLING_2 = 48 46 | 47 | #----------------------------------------------------------- 48 | # 49 | # Visualization 50 | # 51 | #----------------------------------------------------------- 52 | 53 | # create mesh files for visualisation or further checking 54 | CREATE_ABAQUS_FILES = .false. 55 | CREATE_DX_FILES = .false. 56 | CREATE_VTK_FILES = .true. 57 | 58 | # path to store the databases files 59 | LOCAL_PATH = ./OUTPUT_FILES/DATABASES_MPI 60 | 61 | #----------------------------------------------------------- 62 | # 63 | # CPML 64 | # 65 | #----------------------------------------------------------- 66 | 67 | # CPML perfectly matched absorbing layers 68 | THICKNESS_OF_X_PML = 12.3d0 69 | THICKNESS_OF_Y_PML = 12.3d0 70 | THICKNESS_OF_Z_PML = 12.3d0 71 | 72 | #----------------------------------------------------------- 73 | # 74 | # Domain materials 75 | # 76 | #----------------------------------------------------------- 77 | 78 | # number of materials 79 | NMATERIALS = 2 80 | # define the different materials in the model as: 81 | # #material_id #rho #vp #vs #Q_Kappa #Q_mu #anisotropy_flag #domain_id 82 | # Q_Kappa : Q_Kappa attenuation quality factor 83 | # Q_mu : Q_mu attenuation quality factor 84 | # anisotropy_flag : 0 = no anisotropy / 1,2,... check the implementation in file aniso_model.f90 85 | # domain_id : 1 = acoustic / 2 = elastic 86 | 1 2800.0 4000.0 2100.0 2444.4 300.0 0 2 87 | 2 2300.0 2800.0 1500.0 2444.4 300.0 0 2 88 | 89 | #----------------------------------------------------------- 90 | # 91 | # Domain regions 92 | # 93 | #----------------------------------------------------------- 94 | 95 | # number of regions 96 | NREGIONS = 2 97 | # define the different regions of the model as : 98 | #NEX_XI_BEGIN #NEX_XI_END #NEX_ETA_BEGIN #NEX_ETA_END #NZ_BEGIN #NZ_END #material_id 99 | 1 36 1 36 1 12 1 100 | 1 36 1 36 13 16 2 101 | -------------------------------------------------------------------------------- /readmes/contributing_to_adjdocs.md: -------------------------------------------------------------------------------- 1 | # Contributing to adjDocs using the adjTomo container 2 | 3 | This document serves to show contributors how to push changes to the adjDocs repo using a combination of their local machine (for GitHub interface) and the adjTomo software container (to develop changes). 4 | 5 | The motivation is here is to: 6 | - save contributors time by side-stepping software installation 7 | - ensure all contributors are using the same software suite 8 | - ensure that paths are all set correctly when running notebooks inside a container 9 | - mimic a real world run time environment, rather than a local development environment 10 | 11 | Steps involved in a nutshell: 12 | 1) Download Docker for your machine 13 | 2) `docker pull` the latest adjTomo Image 14 | 3) `git pull` the latest adjDocs repo 15 | 4) `docker run` a container and **bind mount** your adjDocs repo into the container 16 | 5) Make and save changes to adjDocs **inside the container** 17 | 6) `git push` your changes from your local 18 | 19 | Steps involved outside the nutshell: 20 | 21 | #### 1) Download Docker 22 | 23 | https://docs.docker.com/get-docker/ 24 | 25 | #### 2) `docker pull` 26 | see: https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md 27 | 28 | For Mac Intel Chip, Linux and Windows: 29 | ```bash 30 | docker pull ghcr.io/seisscoped/adjtomo:ubuntu20.04 31 | ``` 32 | 33 | For Mac M1 Chip: 34 | ```bash 35 | docker pull --platform arm64 ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 36 | ``` 37 | 38 | **Note**: If you are constantly running Docker pull and running out of space, see Notes A and B below. 39 | 40 | #### 3) `git pull` 41 | 42 | On your local, you will want a copy of the adjDocs repository. Wherever you store repositories, you can run 43 | 44 | ```bash 45 | git pull git@github.com:adjtomo/adjdocs.git 46 | # OR 47 | git pull https://github.com/adjtomo/adjdocs.git 48 | ``` 49 | 50 | #### 4) `docker run` 51 | 52 | We want to mount our adjDocs repo **inside** the container so that your local and container are sharing the same repo 53 | 54 | For Mac Intel Chip, Linux and Windows: 55 | ```bash 56 | cd adjdocs 57 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/adjdocs --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04 58 | ``` 59 | 60 | For Mac M1 Chip: 61 | ```bash 62 | cd adjdocs 63 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/adjdocs --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 64 | ``` 65 | 66 | **Note**: if your adjDocs repo seems out of date compared to whats on GitHub, its likely because we haven't rebuilt the container since changes have been pushed. See Note A and pulling fresh changes from inside the container. 67 | 68 | **Note**: if you want to save the work you did on the container to your local, see Note C below. 69 | 70 | #### 5) Edit adjDocs (inside container) 71 | 72 | From **inside** the container, you can make edits to adjDocs using the internal Jupyter Notebook system. Any changes you are making will also be made to your local repository. Please be sure the **clear all output** when saving notebooks to the workshop directory. Probably best practice to **run all** before pushing to make sure that there are no bugs. 73 | 74 | #### 6) `git push` (outside container) 75 | 76 | After completing your changes to adjDocs, go back to your local terminal and push the changes. We do this because the inside of the container doesn't know your Git information, and it doesn't store changes permanently. BUT your local machine doesn't know the containers file system, nor does it have the same software suite installed. 77 | 78 | ```bash 79 | cd adjdocs # if youre not already here 80 | git push 81 | ``` 82 | 83 | Any quesitons, feel free to email: `bhchow@alaska.edu` 84 | 85 | 86 | ### Note A: Pulling adjDocs inside the container 87 | 88 | If the adjTomo software suite hasn't changed since your last `docker pull`, but the adjDocs repo has, you can save yourself some time and hassle by just running a `git pull` inside the adjDocs repo, inside the container. That is, from inside the container you can run: 89 | 90 | ```bash 91 | cd /home/scoped/adjdocs 92 | git pull 93 | ``` 94 | 95 | ### Note B: Removing old Docker images 96 | 97 | These Images are large (>5Gb), and everytime you run `docker pull`, you download another copy. It is often useful then to remove old images. You can check what current images you have by running: 98 | 99 | ```bash 100 | docker images 101 | ``` 102 | 103 | If you have multiple copies of the adjTomo Image, you are free to delete them. Some of these may have **Containers** associated with them (live or exited sessions which are spawned from the Image itself), which will need to be closed first. If you do not have any currently running Containers that you want to preserve, the fastest way to close **all** containers is to run: 104 | 105 | ```bash 106 | docker ps -a # view all open/exited containers 107 | docker container prune # delete all exited containers 108 | ``` 109 | 110 | Once all containers are closed, you can remove unnused images by running `docker rmi `, where the Image number is listed next to the name of of the Image when running `docker images`. For example, to **delete** an image on my machine, I woudl run 111 | 112 | ```bash 113 | [~] $ docker images 114 | REPOSITORY TAG IMAGE ID CREATED SIZE 115 | ghcr.io/seisscoped/adjtomo ubuntu20.04 7de5769b1ba7 2 days ago 5.53GB 116 | [~] $ docker rmi 7de5769b1ba7 117 | ``` 118 | 119 | ### Note C: Bind mounting other directories 120 | 121 | If you also want to save results of your development, you can bind mount a work directory to the container. All of the workshop material is saved (inside the container) at `/home/scoped/work`, so this will be our mount target. On your local, find an working directory \ before `docker run`ning your container. You'll also need to replace the path to the adjDocs repository \ on your local in the command below 122 | 123 | ```bash 124 | docker run -p 8888:8888 \ 125 | --mount type=bind,source=,target=/home/scoped/adjdocs \ 126 | --mount type=bind,source=,target=/home/scoped/work \ 127 | --shm-size=1gb \ 128 | ghcr.io/seisscoped/adjtomo:ubuntu20.04 129 | ``` 130 | -------------------------------------------------------------------------------- /research/northern_alaska_tomography.md: -------------------------------------------------------------------------------- 1 | # An (ongoing) Inversion Walkthrough for northern Alaska 2 | 3 | ## Motivation 4 | As I step into my own inversion project for northern Alaska, I'd like to keep track of all my steps in a sort of diagrammatic walkthrough. 5 | This would be something like a flowchart with each of the tasks I had to complete (from data gathering to model updates), and the packages or code snippets I used to create them. 6 | 7 | It is currently difficult to determine which package should be used where, and how all the adjTomo suite is meant to work in conjunction, so my hope is that a real world example, and the documentation that falls out of it, can providesUsers a point of reference when using the suite for their own research. 8 | 9 | For now I will list the steps that I have taken or will take, and the package used to tackle it. I hope in the future each of these tasks has a set of hyperlinks to docs pages or code snippets that show exactly how these things happened. 10 | 11 | ## Tasks 12 | 13 | ### A. Run a Forward Simulation 14 | 15 | #### 1. Decide study region, determine domain boundaries [manually] 16 | - Our original proposal was to study northeastern Alaska so this decision was previously made 17 | - I use a bounding box tool to determine the geographic extent of the region in question (e.g., https://boundingbox.klokantech.com/) 18 | >__Boundary Note:__ Try to add some buffer boundary so that events and stations are not right on the edge of your mesh. 19 | I'm not sure there is an accepted value, but I try to add a few wavelengths. 20 | 21 | #### 2. Generate hexahedral mesh [meshfem3D] 22 | - Due to the scale of the problem (continental) we choose to use SPECFEM3D_GLOBE 23 | - We use the SPECFEM internal mesher (Meshfem3D) to generate our truncated one chunk mesh 24 | - ETOPO4 is used for topography (already available in SPECFEM3D_GLOBE) 25 | 26 | #### 3. Choose velocity model [specfem3d_globe] 27 | - At the moment I have chosen to use a model present in the software (Crust2.0 + S20RTS with ETOPO4 topography) 28 | - In the future we may choose more heterogeneous/complex 3D models of our study region 29 | 30 | #### 4. Determine example source, generate CMTSOLUTION [[PySEP](https://github.com/adjtomo/pysep)] 31 | - We need an appropriate earthquake to run a simulation 32 | - The [Kaktovic earthquake](https://earthquake.usgs.gov/earthquakes/eventpage/ak20076877#moment-tensor) comes to mind as a large event in the region. Perhaps too large to use a point source approximation, but aftershocks should be reasonable 33 | - Searching through the [GCMT catalogue](https://www.globalcmt.org/), I find an appropriate event of M5.1 (201808121602B) 34 | - I can directly copy-paste the CMTSOLUTION from their website, or use PySEP to grab it for me given an origin time. This will be useful in the future when grabbing an entire catalog of earthquakes 35 | 36 | ```python 37 | from pysep.utils.mt import get_gcmt_moment_tensors 38 | cat = get_gcmt_moment_tensors(origintime="2018-08-12T16:02:09", magnitude=5.1) 39 | cat.write('CMTSOLUTION_201808121602B',format="CMTSOLUTION") 40 | ``` 41 | 42 | #### 5. Gather stations in the region [[PySEP](https://github.com/adjtomo/pysep)] 43 | - Now I need a STATIONS file for SPECFEM, defining existing broadband seismic stations in this region 44 | - We can use ObsPy to gather station information from IRIS and PySEP to write them out into the STATIONS format expected by SPECFEM 45 | - https://github.com/adjtomo/pysep/wiki/6.-Cookbook#create-stations-file-for-specfem 46 | 47 | #### 6. Run simulation 48 | 49 | - Now we can run meshfem and specfem to generate synthetic seismograms 50 | - Visualize the mesh and model using ParaView 51 | - Make adjustments to mesh, model or receivers based on the result of this example forward simulation 52 | 53 | ### B. Set Up Inversion 54 | 55 | #### 1. Gather catalog of moment tensors [[PySEP](https://github.com/adjtomo/pysep) + ObsPy] 56 | 57 | - We can use PySEP to gather moment tensors for our specified region 58 | 59 | ```python 60 | from obspy import UTCDateTime, Catalog 61 | from obspy.clients.fdsn import Client 62 | from pysep.utils.mt import get_usgs_moment_tensor, get_gcmt_moment_tensor 63 | 64 | c = Client("IRIS") 65 | cat = c.get_events(starttime=UTCDateTime("2000-01-01"), 66 | endtime=UTCDateTime("2022-12-31"), 67 | minmagnitude=4.0, maxmagnitude=6.0, 68 | minlatitude=66., maxlatitude=71., 69 | minlongitude=-168., maxlongitude=-140., 70 | maxdepth=60., orderby="magnitude-asc", 71 | includeallorigins=False, 72 | includeallmagnitudes=False, 73 | ) 74 | 75 | # This recovers 90 USGS moment tensors 76 | usgs_cat = Catalog() 77 | for event in cat: 78 | mt = get_usgs_moment_tensor(event) 79 | if mt: 80 | usgs_cat.extend(mt) 81 | 82 | for event in cat_w_mt: 83 | tag = event.preferred_origin().resource_id.id.split("/")[3] 84 | if hasattr(event, "focal_mechanisms") and event.focal_mechanisms: 85 | try: 86 | event.write(f"CMTSOLUTION_{tag}", format="CMTSOLUTION") 87 | except Exception as e: 88 | continue 89 | 90 | # This recovers 30 GCMT moment tensors 91 | gcmt_cat = Catalog() 92 | for event in cat: 93 | mt = get_gcmt_moment_tensor(event) 94 | gcmt_cat.extend(mt) 95 | 96 | for event in gcmt_cat: 97 | tag = event.resource_id.id.split("/")[2] 98 | try: 99 | event.write(f"CMTSOLUTION_{tag}", format="CMTSOLUTION") 100 | except Exception: 101 | continue 102 | ``` 103 | 104 | #### 2. (Optional) Visualize moment tensors [[based_alaska](https://github.com/bch0w/based_alaska)] 105 | 106 | - Using the collected QUAKEML and STATIONS file, Based Alaska can plot a basemap using PyGMT 107 | ![nalaska_map](https://user-images.githubusercontent.com/23055374/206031355-ffb2cc75-096b-4abb-bc32-09f0a8b6ac08.png) 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /readmes/manual_install_instructions.md: -------------------------------------------------------------------------------- 1 | # Manual Software Install for SPECFEM Users Workshop Oct 5-7, 2022 2 | 3 | - These instructions are **use at your own risk**, and meant for advanced Users 4 | - Manual install instructions are aimed at those running M1 Macs that cannot find alternatives 5 | - Participants who are able should use the Docker container provided in the workshop email 6 | - The following instructions must be run from your terminal, on your **local** machine 7 | - Following along the the notebooks will require you to make edits to the root directory of the adjDocs notebooks 8 | - If you do not follow instructions exactly, there is a good chance that the workshop material will not work as expected. 9 | - **NOTE:** You will likely need ~10GB of free disk space for this installation and the workshop 10 | 11 | 12 | ## Step 1: Download/Install Miniconda 13 | 14 | https://docs.conda.io/en/latest/miniconda.html 15 | 16 | Please follow along the instructions below and install Conda for your OS 17 | 18 | ## Step 2: Create a Conda environment 19 | - Our container runs Python version 3.10 20 | - All our Python-based tools are installed into a Conda environment 21 | - Conda environments help preserve root environments 22 | 23 | ```bash 24 | conda create -n adjtomo python=3.10 25 | conda activate adjtomo 26 | ``` 27 | 28 | ## Step 3: Download required software from GitHub 29 | - Create an empty working directory where all workshop files are stored 30 | - Here we choose a directory called 'scoped', created in our Home directory 31 | - Remember where this directory is, you will need it during the workshop 32 | - Your SPECFEM2D and SPECFEM3D repositories will differ from the workshop versions. This is OK. 33 | 34 | ```bash 35 | # Create an empty working directory under the Home directory 36 | mkdir -p $HOME/scoped 37 | cd $HOME/scoped 38 | 39 | # Download the adjTomo software suite 40 | git clone https://github.com/adjtomo/adjdocs 41 | git clone --branch devel https://github.com/adjtomo/seisflows 42 | git clone --branch devel https://github.com/adjtomo/pyatoa 43 | git clone https://github.com/uafgeotools/pysep.git 44 | 45 | # Download SPECFEM2D and SPECFEM3D_Cartesian 46 | git clone --branch devel https://github.com/geodynamics/specfem2d 47 | git clone --branch devel --depth=1 https://github.com/geodynamics/specfem3d 48 | ``` 49 | 50 | ## Step 4: Install the adjTomo software suite 51 | - These steps install the adjTomo software and all their dependencies 52 | - **Run lines one-by-one** incase something fails, you know where the failure point is 53 | - Make sure that you have run the 'conda activate' command in Step 2 54 | - To confirm this, you should see the prefix '(adjtomo)' on your command line prompt 55 | 56 | ### Pip Check Note 57 | - It is possible that Pip can point to your local Pip and not the Conda environment's version 58 | - If this is the case, you will need to manually call the Conda environment's version 59 | - To check your Pip installation, use the 'which' command 60 | 61 | ```bash 62 | which pip 63 | ``` 64 | 65 | - If the path does **not** include the word 'conda' somewhere, you will need to **manually** set your Pip path 66 | - i.e., if 'which pip' returned */usr/bin/pip*, then see 'Manual Pip Location Note' 67 | - If 'which pip' returned something like *~/miniconda3/envs/adjtomo/bin/pip*, then you are OK and can **skip** to 'Install adjTomo Software' 68 | 69 | ### Manual Pip Location Note 70 | - A quick way of pointing to your Conda environment's version of Pip is to do: 71 | 72 | ```bash 73 | which python 74 | ``` 75 | 76 | - The resulit should point you towards your Conda install 77 | - e.g., it should look something like '~/miniconda3/envs/adjtomo/bin/python' 78 | - **Replace** 'python' with 'pip' 79 | - e.g., ~/miniconda3/envs/adjtomo/bin/**pip** 80 | - Anytime Pip is called in the 'Install adjTomo Software' section, replace the word 'pip' with the full path to your Conda environment's Pip 81 | - *Thanks to Andrea Escandon for troubleshooting this* 82 | 83 | 84 | ### Install adjTomo Software 85 | 86 | ```bash 87 | cd $HOME/scoped/pyatoa 88 | conda install -n adjtomo --file requirements.txt -y 89 | pip install -e . # <- See 'Pip Check Note' before running this 90 | 91 | cd $HOME/scoped/seisflows 92 | conda install -n adjtomo --file requirements.txt -y 93 | pip install -e . # <- See 'Pip Check Note' before running this 94 | 95 | cd $HOME/scoped/pysep 96 | conda install -n adjtomo --file requirements.txt -y 97 | pip install -e . # <- See 'Pip Check Note' before running this 98 | 99 | # We need Jupyter to run the adjDocs workshop notebooks 100 | conda install -n adjtomo jupyter -y 101 | ``` 102 | 103 | - To verify your installation, run the following command to list out downloaded packages 104 | - Check that 'seisflows', 'pyatoa', 'pysep' and 'jupyter' are all on this list 105 | 106 | ```bash 107 | conda list 108 | ``` 109 | 110 | ## Step 5: Install SPECFEM 111 | - The following steps configure and compile SPECFEM2D and SPECFEM3D with MPI 112 | - Your computer will need a FORTRAN compiler and MPI (see Compiler Note 1 if you are unsure) 113 | - Mac users see 'Apple Xcode Note' below 114 | - If you do not have these, the install will likely fail. See 'Compiler Note 2' below 115 | 116 | ```bash 117 | cd $HOME/scoped/specfem2d 118 | ./configure FC=gfortran CC=gcc CXX=mpicxx MPIFC=mpif90 --with-mpi 119 | make all 120 | 121 | cd $HOME/scoped/specfem3d 122 | ./configure FC=gfortran CC=gcc CXX=mpicxx MPIFC=mpif90 --with-mpi 123 | make all 124 | ``` 125 | 126 | ### Apple XCode Note 127 | - **Apple Mac** users will need Xcode to have access to the GNU Compiler Collection (GCC) 128 | - To check if whether you have Xcode or not, you can do 129 | 130 | ```bash 131 | which xcodebuild 132 | ``` 133 | 134 | - If this command does not return anything, you will need to install Xcode 135 | - Xcode can be found here: https://developer.apple.com/xcode/ 136 | 137 | 138 | ### Compiler Note 1 139 | - To check if you have the required compilers, run the following 140 | - Each statement should return a path to an existing compiler 141 | - If any return nothing (blank line or immediate return to prompt), see 'Compiler Note 2' 142 | 143 | ```bash 144 | which gcc 145 | which gfortran 146 | which mpif90 147 | ``` 148 | 149 | ### Compiler Note 2 150 | - Only required if you do not have the required FORTRAN compiler or MPI necessary for Step 5 151 | - We use Mac's native 'Homebrew' to install a FORTRAN compiler and OpenMPI 152 | - First run the steps here in Note 1 and then **re-attempt** Step 5 153 | - If things **still** do not work, attempt 'Compiler Note 3' 154 | 155 | ```bash 156 | brew update 157 | brew upgrade 158 | brew install gcc 159 | brew install gfortran 160 | brew install openmpi 161 | ``` 162 | 163 | ### Compiler Note 3 164 | - If your FORTRAN compiler **still** does not work you may try removing CommandLineTools 165 | - Some relevant links on this procedure incase you are worried 166 | - https://www.cocoanetics.com/2012/07/you-dont-need-the-xcode-command-line-tools/ 167 | - https://apple.stackexchange.com/questions/308943/how-do-i-uninstall-the-command-line-tools-for-xcode 168 | 169 | ```bash 170 | sudo rm -r /Library/Developer/CommandLineTools 171 | xcode-select --install 172 | ``` 173 | 174 | ## Step 6: Check Installation 175 | - Run the Day 0 Notebook inside Jupyter to make sure things have worked properly 176 | 177 | ```bash 178 | cd $HOME/scoped/adjdocs/workshops/2022-10-05_specfem_users/ 179 | jupyter notebook 180 | ``` 181 | 182 | - The 'Jupyter' command will have opened up an interface in your web browser 183 | - **Click** on `day_0_container_testing.ipynb` 184 | - **Click** `Run All` which is the $\blacktriangleright\blacktriangleright$ button at the top 185 | -------------------------------------------------------------------------------- /workshops/2024-05-21_scoped_uw/exercise_solutions/solution_2_forward_simulations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "347bc0f7-6dbe-454c-a95a-8469d8efe1c2", 6 | "metadata": {}, 7 | "source": [ 8 | "# 2024 SCOPED Workshop — Wavefield Simulations Using SPECFEM\n", 9 | "## Notebook 2: Forward Simulations — Exercise Solutions\n", 10 | "\n", 11 | "- Here we build upon material learned in Notebook 1\n", 12 | "- This notebook allows Users to play around with their own SPECFEM2D homogeneous halfspace example in an exercise \n", 13 | "- **Objective:** Familiarize Users with setting `SOURCE` and `STATION` attributes, adjusting velocity model parameters, and assessing simulation results.\n", 14 | "- These instructions should be run from inside a Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)). \n", 15 | "\n", 16 | "-----------\n", 17 | "\n", 18 | "**Relevant Links:** \n", 19 | "- This Notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2024-5-21_scoped_uw/2_forward_simulations.ipynb\n", 20 | "\n", 21 | "**Jupyter Quick Tips:**\n", 22 | "\n", 23 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 24 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 25 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 26 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 27 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 28 | "- Commands that start with `%` are Jupyter Magic commands.\n", 29 | "- To time a task, put a `%time` before the command (e.g., `%time ! ls`)\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "id": "8f91e266-8d06-4ef5-ad68-acf30b1e3c16", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# Python packages required for this notebook\n", 40 | "import numpy as np\n", 41 | "import matplotlib.pyplot as plt\n", 42 | "from IPython.display import Image" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "6fcefb08-43e0-4599-b98f-96bdab8e0f24", 48 | "metadata": {}, 49 | "source": [ 50 | "---------------------\n", 51 | "## 4) Forward Simulation Exercise\n", 52 | "\n", 53 | "- Participants will now be asked to edit simulation parameters to run their own simulation \n", 54 | "- Some things that you are asked to try include:\n", 55 | " 1) Change the parameters of the homogeneous halfspace model defined in the `Par_file` \n", 56 | " 2) Define a *STATIONS* file with a uniform grid of stations to record synthetics throughout the domain\n", 57 | " 3) Choose a different source, or increase the energy released by the source (using the moment tensor)\n", 58 | " 4) Re-run the mesher and solver to get new synthetics\n", 59 | " 5) Analyze the new results in comparison to the old results\n", 60 | "- First we set up a working directory for you " 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "id": "6b9a5277-4a40-42aa-8fca-9b8046f0e84a", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "! rm -rf /home/scoped/work/exercise_1\n", 71 | "! mkdir -p /home/scoped/work/exercise_1\n", 72 | "%cd /home/scoped/work/exercise_1\n", 73 | "\n", 74 | "# Symlink the executables, copy example DATA/, create empty OUTPUT_FILES\n", 75 | "! ln -s /home/scoped/specfem2d/bin .\n", 76 | "! cp -r /home/scoped/specfem2d/EXAMPLES/Tape2007/DATA .\n", 77 | "! mkdir OUTPUT_FILES\n", 78 | "\n", 79 | "# Set the Par_file\n", 80 | "! cp DATA/Par_file_Tape2007_onerec DATA/Par_file\n", 81 | "\n", 82 | "! ls" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "id": "8cd26365-87e4-40bd-800f-868afbd69676", 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "source": [ 92 | "#### Task 1: Edit the Velocity Model\n", 93 | "- Change the velocity model parameters in the homogeneous halfspace model\n", 94 | "- Remember, the velocity model is defined in the `Par_file`\n", 95 | "- Try **increasing** seismic velocity (Vp and Vs) by 10\\%\n", 96 | "- You can use Python, Bash, `seisflows sempar` or a Text Editor to do this " 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "id": "652544c7-a0aa-4865-b55c-e47d879a99d4", 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "# Figure out current values\n", 107 | "! seisflows sempar -P DATA/Par_file velocity_model" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "id": "9601785b-282e-4f0d-93e6-a7785c4d24e1", 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "# Values figured out from the Par_file, increased by 10%\n", 118 | "vp = 5800. * 1.1\n", 119 | "vs = 3500. * 1.1\n", 120 | "\n", 121 | "print(f\"vp={vp:.2f}; vs={vs:.2f}\")" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "id": "a6a2fbff-96c1-436e-9c9f-5fecf13ec865", 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "# Overwrite the current velocity model with a +10% model\n", 132 | "! seisflows sempar -P DATA/Par_file velocity_model \"1 1 2600.d0 6380.d0 3850.0d0 0 0 10.d0 10.d0 0 0 0 0 0 0\"" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "id": "6c61455a-91a7-49fd-a1c4-68da1576bd64", 138 | "metadata": {}, 139 | "source": [ 140 | "#### Task 2: Create a New STATIONS File\n", 141 | "- Define a STATIONS file that covers the **entire** domain with a uniform grid spacing of: \n", 142 | " - dx = 80km \n", 143 | " - dz = 80km\n", 144 | " - x_start = 0km\n", 145 | " - z_start = 0km\n", 146 | "- **Or** Create your own station configuration. Some examples: spiral, concentric rings, dense linear array (like DAS)\n", 147 | "- You can find the X and Z dimensions of the mesh in the `Par_file` and the `interfaces` file, respectively \n", 148 | "- Use Python/NumPy to loop values, or simply write out a text file manually with the text editor\n", 149 | "- *Look* at *DATA/STATIONS_checker* for an example of how the file should look\n", 150 | "- **NOTE**: The last two columns (burial, elevation) can be set to 0 " 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "id": "87933592-9a21-4469-a137-55aba4623735", 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "# Par_file defines the 'X' dimension of the mesh\n", 161 | "! head -293 DATA/Par_file | tail -n 3" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": null, 167 | "id": "9f410ffc-a8d1-472d-9f35-aca9cbf1d327", 168 | "metadata": {}, 169 | "outputs": [], 170 | "source": [ 171 | "# Interfaces defines the 'Z' dimension of the mesh\n", 172 | "! cat DATA/interfaces_Tape2007.dat" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "id": "aa600cf4-0c81-4cad-acdc-5ab811f7eb47", 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "# Loop through X and Z discretizations of 40km\n", 183 | "i = 0\n", 184 | "with open(\"DATA/STATIONS\", \"w\") as f:\n", 185 | " for x in range(0, 480000 + 1, 80000):\n", 186 | " for z in range(0, 480000 + 1, 80000):\n", 187 | " f.write(f\"S{i:0>6} AA {x:9.2f} {z:9.2f} 0. 0.\\n\")\n", 188 | " i += 1\n", 189 | " \n", 190 | "! cat DATA/STATIONS" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "id": "b1b6e9be-74d5-4362-9fe6-ab424f2a47c6", 196 | "metadata": {}, 197 | "source": [ 198 | "#### Task 3: Choose and edit a SOURCE file\n", 199 | "\n", 200 | "- Use one of the original sources as a template for your new source\n", 201 | "- **Set** the location of your source in the exact **middle** of your domain (or a location of your choice!) \n", 202 | "- **Set** the moment tensor (Mxx, Mzz, Mxz) of your event to make this an **explosive** source (or a mechanism of your choice!) \n", 203 | "- Don't change the scaling on the moment tensor " 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "id": "95d2adb6-09f0-4a01-91f9-958c3c94907c", 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "# 008 is roughly middle. Figure out what the current moment tensor components are\n", 214 | "! cp DATA/SOURCE_008 DATA/SOURCE\n", 215 | "! head -51 DATA/SOURCE | tail -n 3" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "id": "683dd391-72df-4a63-8c59-570ac8e86120", 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [ 225 | "lines = open(\"DATA/SOURCE\", \"r\").readlines()\n", 226 | "for i, line in enumerate(lines):\n", 227 | " if \"xs\" in line:\n", 228 | " lines[i] = \"xs = 240000\\n\"\n", 229 | " elif \"zs\" in line: \n", 230 | " lines[i] = \"zs = 240000\\n\"\n", 231 | " else:\n", 232 | " for value in [\"Mxx\", \"Mzz\", \"Mxz\"]:\n", 233 | " if value in line:\n", 234 | " lines[i] = f\"{value} = 1.0\\n\"\n", 235 | "\n", 236 | "with open(\"DATA/SOURCE\", \"w\") as f:\n", 237 | " f.writelines(lines)\n", 238 | " \n", 239 | "# Check the file\n", 240 | "! head -5 DATA/SOURCE\n", 241 | "! echo\n", 242 | "! head -51 DATA/SOURCE | tail -n 3" 243 | ] 244 | }, 245 | { 246 | "cell_type": "markdown", 247 | "id": "c5f9886a-46bf-4834-8a5f-099c9da6aefd", 248 | "metadata": {}, 249 | "source": [ 250 | "#### Task 4: Run the Solver and Analyze Outputs\n", 251 | "\n", 252 | "- Run the mesher and solver with your new experimental setup and 4 MPI processes \n", 253 | "- **Remember** to tell SPECFEM to use your `STATIONS` file and not its internal representation of stations\n", 254 | "- **Remember** to tell SPECFEM that we want to run this with 4 processors\n", 255 | "- Look at the source images to see if your explosion makes sense\n", 256 | "- Plot waveforms output from your gridded stations" 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": null, 262 | "id": "eb64e05c-d47a-49eb-a2a8-95df8e9189ae", 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "! seisflows sempar -P DATA/Par_file nproc 4\n", 267 | "! seisflows sempar -P DATA/Par_file use_existing_stations .true.\n", 268 | "\n", 269 | "! mpirun -n 4 bin/xmeshfem2D > OUTPUT_FILES/output_mesher.txt\n", 270 | "! mpirun -n 4 bin/xspecfem2D > OUTPUT_FILES/output_solver.txt\n", 271 | "\n", 272 | "! tail OUTPUT_FILES/output_solver.txt" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": null, 278 | "id": "26c2ad29-00bc-45d0-9ba5-4f9e1c7eb871", 279 | "metadata": {}, 280 | "outputs": [], 281 | "source": [ 282 | "# Check your station configuration and source location\n", 283 | "Image(\"OUTPUT_FILES/forward_image000001200.jpg\")" 284 | ] 285 | } 286 | ], 287 | "metadata": { 288 | "kernelspec": { 289 | "display_name": "Python 3 (ipykernel)", 290 | "language": "python", 291 | "name": "python3" 292 | }, 293 | "language_info": { 294 | "codemirror_mode": { 295 | "name": "ipython", 296 | "version": 3 297 | }, 298 | "file_extension": ".py", 299 | "mimetype": "text/x-python", 300 | "name": "python", 301 | "nbconvert_exporter": "python", 302 | "pygments_lexer": "ipython3", 303 | "version": "3.12.3" 304 | } 305 | }, 306 | "nbformat": 4, 307 | "nbformat_minor": 5 308 | } 309 | -------------------------------------------------------------------------------- /workshops/2025-08-04_cig-tng/readme.md: -------------------------------------------------------------------------------- 1 | # CIG:TNG 2025 Specfem/SeisFlows Workshop Material 2 | 3 | Welcome! In this repository you will find SPECFEM/SeisFlows workshop material for the [CIG:TNG Community Meeting](https://geodynamics.org/events/details/349) to be held August 04, 2025. Here you will find the workshop notebooks and below you will find installation instructions for the Docker container that hosts the software needed for this material. 4 | 5 | ## Docker Image Installation Instructions 6 | 7 | - First you will need to install Docker: https://www.docker.com/ 8 | - The following instructions are meant to **install** a Docker Image, which contains all the software you need to participate 9 | - This Docker Image is ~3.98 GB, workshop material will create another ~1GB; please ensure you have sufficient disk space 10 | - See the troubleshooting notes at the bottom for common issues you might encounter 11 | - If you have any issues running the steps below that are not solved by the troubleshooting notes, feel free to open up a [GitHub issue](https://github.com/adjtomo/adjdocs/issues) 12 | 13 | ### 1) Open a Terminal 14 | - The commands in the following sections will need to be run from your computer's terminal program 15 | - To open the terminal on your **local machine**: 16 | - Mac: Type 'terminal' into Spotlight 17 | - Windows: Type 'powershell' in the system search bar 18 | - Linux: Type 'terminal' in the system search bar 19 | 20 | ### 2) Start Docker 21 | - Docker will need to be running in the background before we can use it from the command line 22 | - On **Windows or Mac**, use the search bar to find and open `Docker` `Docker.app` or `Docker Desktop` 23 | - On **Linux**, run the following from the command line: 24 | ```bash 25 | systemctl start docker 26 | ``` 27 | 28 | 29 | ### 3) Pull Docker Image 30 | 31 | - This will download the Docker Image from the SeisSCOPED GitHub repository 32 | - Mac Intel Chip, Windows and Linux Users (AMD architecture) please follow instructions in A 33 | - Mac Silicon Chip (ARM architecture; M1, M2, M3...) please follow instructions in B 34 | 35 | #### 3A) Docker pull for Mac Intel, Windows, Linux 36 | ```bash 37 | docker pull --platform amd64 ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 38 | ``` 39 | 40 | #### 3B) Docker pull for Mac Silicon 41 | 42 | Installs the Docker Image from GitHub 43 | ```bash 44 | docker pull --platform arm64 ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 45 | ``` 46 | 47 | ### 4) Make Empty Working Directory 48 | 49 | To save the results we obtain from inside our container, we will need to mount our local filesystem. 50 | **> Please `cd` (change directory) to an empty working directory (example below)** 51 | 52 | ```bash 53 | # NOTE: This is only an EXAMPLE code snippet. Please create an 54 | # appropriate empty working directory on your machine 55 | mkdir -p ~/Work/cig_tng_2025 56 | cd ~/Work/cig_tng_2025 57 | ``` 58 | 59 | ### 5) Run Container 60 | 61 | Now run the container to open a JupyterLab instance (see notes below for explanation of this command) 62 | ```bash 63 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 64 | ``` 65 | 66 | ### 6) Open JupyterLab 67 | 68 | - After running the `docker run` command, you will see some output that ends with a web address, e.g,. 69 | 70 | ```bash 71 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 72 | $ docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 73 | [I 2024-05-16 15:59:20.338 ServerApp] jupyter_lsp | extension was successfully linked. 74 | [I 2024-05-16 15:59:20.341 ServerApp] jupyter_server_terminals | extension was successfully linked. 75 | [I 2024-05-16 15:59:20.343 ServerApp] jupyterlab | extension was successfully linked. 76 | [I 2024-05-16 15:59:20.346 ServerApp] notebook | extension was successfully linked. 77 | [I 2024-05-16 15:59:20.346 ServerApp] Writing Jupyter server cookie secret to /home/scoped/.local/share/jupyter/runtime/jupyter_cookie_secret 78 | [I 2024-05-16 15:59:20.518 ServerApp] notebook_shim | extension was successfully linked. 79 | [I 2024-05-16 15:59:20.529 ServerApp] notebook_shim | extension was successfully loaded. 80 | [I 2024-05-16 15:59:20.530 ServerApp] jupyter_lsp | extension was successfully loaded. 81 | [I 2024-05-16 15:59:20.530 ServerApp] jupyter_server_terminals | extension was successfully loaded. 82 | [I 2024-05-16 15:59:20.532 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.10/site-packages/jupyterlab 83 | [I 2024-05-16 15:59:20.532 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab 84 | [I 2024-05-16 15:59:20.532 LabApp] Extension Manager is 'pypi'. 85 | [I 2024-05-16 15:59:20.539 ServerApp] jupyterlab | extension was successfully loaded. 86 | [I 2024-05-16 15:59:20.540 ServerApp] notebook | extension was successfully loaded. 87 | [I 2024-05-16 15:59:20.541 ServerApp] Serving notebooks from local directory: /home/scoped 88 | [I 2024-05-16 15:59:20.541 ServerApp] Jupyter Server 2.14.0 is running at: 89 | [I 2024-05-16 15:59:20.541 ServerApp] http://c80bdafc9ee2:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 90 | [I 2024-05-16 15:59:20.541 ServerApp] http://127.0.0.1:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 91 | [I 2024-05-16 15:59:20.541 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation). 92 | [C 2024-05-16 15:59:20.543 ServerApp] 93 | 94 | To access the server, open this file in a browser: 95 | file:///home/scoped/.local/share/jupyter/runtime/jpserver-8-open.html 96 | Or copy and paste one of these URLs: 97 | http://c80bdafc9ee2:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 98 | http://127.0.0.1:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 99 | [I 2024-05-16 15:59:20.806 ServerApp] Skipped non-installed server(s): bash-language-server, dockerfile-language-server-nodejs, javascript-typescript-langserver, jedi-language-server, julia-language-server, pyright, python-language-server, python-lsp-server, r-languageserver, sql-language-server, texlab, typescript-language-server, unified-language-server, vscode-css-languageserver-bin, vscode-html-languageserver-bin, vscode-json-languageserver-bin, yaml-language-server 100 | ``` 101 | 102 | - Please **open** the bottom link (starting with http://127.0.0.1:8888) with a web browser 103 | - You will be met with the following looking web page 104 | 105 | ![JupyterLab](https://user-images.githubusercontent.com/23055374/193501549-8f0d9429-1414-40c7-ad4d-0bdcf8ad6e55.png) 106 | 107 | ### 7) Run Workshop Material 108 | 109 | - Using the navigation bar on the left, click through the following directories 110 | - *adjdocs -> workshops -> 2025-08-04_cig-tng* 111 | - **Clicking** any of the workshop notebooks (e.g., *1_intro_specfem2d.ipynb*) will open a Jupyter Notebook 112 | - See 'Jupyter Quick Tips' at the top of any notebook for information on how to run through a notebook 113 | - Run cells one-by-one and sequentially, read along with text to follow workshop material 114 | 115 | --------------- 116 | 117 | > Great! You are ready to run the workshop material, please follow along with the material in each of the notebooks. The material below is for when you are done with the workshop (Workshop Shut Down Procedures), or if you have trouble with any of the above install instructions (Troubleshooting Notes) 118 | 119 | --------------- 120 | 121 | ## Workshop Shut Down Procedures 122 | 123 | - These instructions are for when you are finished with the workshop and want to shut down the container and free up space 124 | - Before you approach any of these shut down procedures, please be sure to save any work you might have created 125 | - 'Created work' may include exercise notebooks that you filled out on your own, or files that you may have generated on your own 126 | 127 | ### a) Removing Files from the *work/* Directory 128 | 129 | - If you want to free up memory taken up by our workshop materials, the easiest way is to do it from **inside** the container 130 | - **WARNING** This will delete all files created by our workshop notebooks. You will not be able to recover these files 131 | - From the *Terminal* **inside** your container, you can run the following commands to remove all the workshop-created files: 132 | 133 | ```bash 134 | # Run the following from inside your container's terminal, not on your local machine 135 | rm -r /home/scoped/work 136 | ``` 137 | 138 | ### b) Closing a Running Container 139 | 140 | - From inside the JupyterLab interface, you can **click** `File -> Shut Down` in the top navigation bar 141 | - **OR** from the terminal where you ran the `docker run` command (which should still be running), you can type `Ctrl + c` on your keyboard to stop a running session 142 | 143 | ### c) Re-opening a Closed Container 144 | 145 | - It is possible to re-open a closed container, however we did not do this during the workshop to keep things simpler 146 | - You may follow this documentation if you want to re-open a closed container (https://docs.docker.com/engine/reference/commandline/container_start/) 147 | 148 | ### d) Deleting Closed Containers 149 | 150 | - Although you have closed your container, each container still occupies some memory on your local machine 151 | - This can be freed up if you no longer want to access the container's contents 152 | - Run the following command to free up memory associated with all closed containers 153 | 154 | ```bash 155 | docker container prune 156 | ``` 157 | 158 | ### e) Removing Workshop Docker Image 159 | 160 | - If you want to **remove** our workshop's Docker Image (which takes up ~3.98GB), you will need to identify the Image ID 161 | - To do that, you can run 162 | 163 | ```bash 164 | docker images 165 | ``` 166 | 167 | - You will be met with a list of available Docker Images. Ours has Repository name: *'ghcr.io/seisscoped/adjtomo'*, and Tag: *'ubuntu20.04_jupyterlab'* 168 | - You will need to identify the hash value listed under **IMAGE ID** column and run the following command 169 | 170 | ```bash 171 | docker rmi 172 | ``` 173 | 174 | - **For Example**: To do this on my own machine I would run: 175 | 176 | ```bash 177 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 178 | REPOSITORY TAG IMAGE ID CREATED SIZE 179 | ghcr.io/seisscoped/adjtomo ubuntu20.04_jupyterlab 3a6ddce01b76 20 hours ago 3.99GB 180 | bchow@tern [~] $ docker rmi 3a6ddce01b76 181 | ``` 182 | 183 | -------------- 184 | 185 | > If everything worked and you have successfully completed the steps above, congratulations! Please disregard everything below. If you are having trouble, please see the 'Troubleshooting Notes' section below. 186 | 187 | -------------- 188 | 189 | ## Troubleshooting Notes 190 | 191 | >__ERROR:__ $(pwd) not recognized in `docker run` command of Step 1 192 | 193 | - *$(pwd)* is a Linux command that might not be recognized by all operating systems 194 | - Please change the *$(pwd)* to the full path to your current working directory, e.g., 195 | 196 | ```bash 197 | # Note that you will need to substitute with your own path 198 | docker run -p 8888:8888 --mount type=bind,source=,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 199 | ``` 200 | 201 | for example on my own computer this might look like: 202 | ```bash 203 | # Do NOT copy-paste this, it is just an example and will not work on your computer 204 | docker run -p 8888:8888 --mount type=bind,source=/Users/Chow/Work/specfem_users_workshop,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 205 | ``` 206 | 207 | >__ERROR:__ *Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?* 208 | 209 | - You need to start Docker or Docker Desktop on your machine. 210 | - On **Windows or Mac**, use the search bar to find 'Docker' or 'Docker Desktop'. Open this program and try again 211 | - On **Linux** try running: 212 | ```bash 213 | systemctl start docker 214 | ``` 215 | 216 | >__ERROR:__ Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Post "...": dial unix /var/run/docker.sock: connect: permission denied 217 | - Your user does not have the correct permission to access the Docker daemon 218 | - If you have root privelage on your machine, try running the following: 219 | ```bash 220 | sudo chmod 666 /var/run/docker.sock 221 | ``` 222 | - You may also need to add your user to a Docker group which has the correct privileges ([see this link](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user)) 223 | 224 | >__ERROR__: docker: Error response from daemon: driver failed programming external connectivity on endpoint hopeful_haslett (c3e93dc3530d474e5152cf7ec58d69030da2be534f47fc94bcc6da19177f60f4): Bind for 0.0.0.0:8888 failed: port is already allocated. 225 | - You likely have another task (e.g., Jupyter Notebooks) running on the port we are trying to specify (8888:8888) 226 | - Please change the port map to something else, e.g., *docker run -p 8889:8888*... and try again 227 | - **Note** that you will also have to change the web address that you open in your browser to the new port number (e.g., http://127.0.0.1:8889/lab...) 228 | 229 | 230 | ## Docker Command Explanation 231 | 232 | ```bash 233 | docker pull ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 234 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 235 | ``` 236 | 237 | - `docker pull` downloads the Docker Image from GitHub 238 | - `docker run` launches the container. The flags in the `run` command are: 239 | - `--mount`: binds our local filesystem (in the current working directory) with the **container's internal filesytem** (at location */home/scoped/work* which resides **inside** the container) 240 | - `--shm-size`: tells Docker to give us 1 Gigabyte of shared memory, which is required for MPI processes 241 | - Note that the *$(pwd)* argument may not be recognized by your operating system. If so, please see the troubleshooting notes below 242 | - You may substitute any directory for *$(pwd)*, even remote filesystems, as long as they are accessible by your current machine 243 | 244 | 245 | 246 | ## Version Tracking 247 | Keeping track of versions used for the workshop. These will also be pinned to the container. 248 | 249 | - SPECFEM2D 8.1.0: baeb71d 250 | - SPEECFEM3D 4.1.1: ee3b095 251 | - SeisFlows v3.2.1: e96e888c 252 | - PySEP v0.6.0: 9e77ad3 253 | - Pyatoa v0.4.0: 1a6b86a 254 | -------------------------------------------------------------------------------- /workshops/2024-05-21_scoped_uw/readme.md: -------------------------------------------------------------------------------- 1 | # SCOPED Specfem/SeisFlows Workshop Material 2 | 3 | Welcome! In this repository you will find SPECFEM/SeisFlows workshop material for the [2024 SCOPED UW Workshop](https://seisscoped.org/workshop-2024/) to be held May 20-24, 2024. Here you will find the workshop notebooks and below you will find installation instructions for the Docker container that hosts the software needed for this material. 4 | 5 | ## Docker Image Installation Instructions 6 | 7 | - First you will need to install Docker: https://www.docker.com/get-started/ 8 | - The following instructions are meant to **install** a Docker Image, which contains all the software you need to participate 9 | - This Docker Image is ~3.98 GB, workshop material will create another ~1GB; please ensure you have sufficient disk space 10 | - See the troubleshooting notes at the bottom for common issues you might encounter 11 | - If you have any issues running the steps below that are not solved by the troubleshooting notes, feel free to open up a [GitHub issue](https://github.com/adjtomo/adjdocs/issues) 12 | 13 | ### 1) Open a Terminal 14 | - The commands in the following sections will need to be run from your computer's terminal program 15 | - To open the terminal on your **local machine**: 16 | - Mac: Type 'terminal' into Spotlight 17 | - Windows: Type 'powershell' in the system search bar 18 | - Linux: Type 'terminal' in the system search bar 19 | 20 | ### 2) Start Docker 21 | - Docker will need to be running in the background before we can use it from the command line 22 | - On **Windows or Mac**, use the search bar to find and open `Docker` `Docker.app` or `Docker Desktop` 23 | - On **Linux**, run the following from the command line: 24 | ```bash 25 | systemctl start docker 26 | ``` 27 | 28 | 29 | ### 3) Pull Docker Image 30 | 31 | - This will download the Docker Image from the SeisSCOPED GitHub repository 32 | - Mac Intel Chip, Windows and Linux Users (AMD architecture) please follow instructions in A 33 | - Mac Silicon Chip (ARM architecture; M1, M2, M3...) please follow instructions in B 34 | 35 | #### 3A) Docker pull for Mac Intel, Windows, Linux 36 | ```bash 37 | docker pull --platform amd64 ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 38 | ``` 39 | 40 | #### 3B) Docker pull for Mac Silicon 41 | 42 | Installs the Docker Image from GitHub 43 | ```bash 44 | docker pull --platform arm64 ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 45 | ``` 46 | 47 | ### 4) Make Empty Working Directory 48 | 49 | To save the results we obtain from inside our container, we will need to mount our local filesystem. 50 | **> Please `cd` (change directory) to an empty working directory (example below)** 51 | 52 | ```bash 53 | # NOTE: This is only an EXAMPLE code snippet. Please create an 54 | # appropriate empty working directory on your machine 55 | mkdir -p ~/Work/scoped_uw_2024 56 | cd ~/Work/scoped_uw_2024 57 | ``` 58 | 59 | ### 5) Run Container 60 | 61 | Now run the container to open a JupyterLab instance (see notes below for explanation of this command) 62 | ```bash 63 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 64 | ``` 65 | 66 | ### 6) Open JupyterLab 67 | 68 | - After running the `docker run` command, you will see some output that ends with a web address, e.g,. 69 | 70 | ```bash 71 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 72 | $ docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 73 | [I 2024-05-16 15:59:20.338 ServerApp] jupyter_lsp | extension was successfully linked. 74 | [I 2024-05-16 15:59:20.341 ServerApp] jupyter_server_terminals | extension was successfully linked. 75 | [I 2024-05-16 15:59:20.343 ServerApp] jupyterlab | extension was successfully linked. 76 | [I 2024-05-16 15:59:20.346 ServerApp] notebook | extension was successfully linked. 77 | [I 2024-05-16 15:59:20.346 ServerApp] Writing Jupyter server cookie secret to /home/scoped/.local/share/jupyter/runtime/jupyter_cookie_secret 78 | [I 2024-05-16 15:59:20.518 ServerApp] notebook_shim | extension was successfully linked. 79 | [I 2024-05-16 15:59:20.529 ServerApp] notebook_shim | extension was successfully loaded. 80 | [I 2024-05-16 15:59:20.530 ServerApp] jupyter_lsp | extension was successfully loaded. 81 | [I 2024-05-16 15:59:20.530 ServerApp] jupyter_server_terminals | extension was successfully loaded. 82 | [I 2024-05-16 15:59:20.532 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.10/site-packages/jupyterlab 83 | [I 2024-05-16 15:59:20.532 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab 84 | [I 2024-05-16 15:59:20.532 LabApp] Extension Manager is 'pypi'. 85 | [I 2024-05-16 15:59:20.539 ServerApp] jupyterlab | extension was successfully loaded. 86 | [I 2024-05-16 15:59:20.540 ServerApp] notebook | extension was successfully loaded. 87 | [I 2024-05-16 15:59:20.541 ServerApp] Serving notebooks from local directory: /home/scoped 88 | [I 2024-05-16 15:59:20.541 ServerApp] Jupyter Server 2.14.0 is running at: 89 | [I 2024-05-16 15:59:20.541 ServerApp] http://c80bdafc9ee2:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 90 | [I 2024-05-16 15:59:20.541 ServerApp] http://127.0.0.1:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 91 | [I 2024-05-16 15:59:20.541 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation). 92 | [C 2024-05-16 15:59:20.543 ServerApp] 93 | 94 | To access the server, open this file in a browser: 95 | file:///home/scoped/.local/share/jupyter/runtime/jpserver-8-open.html 96 | Or copy and paste one of these URLs: 97 | http://c80bdafc9ee2:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 98 | http://127.0.0.1:8888/lab?token=6e70b595bd7d62bde09db852053971b28f2f012574ca0a95 99 | [I 2024-05-16 15:59:20.806 ServerApp] Skipped non-installed server(s): bash-language-server, dockerfile-language-server-nodejs, javascript-typescript-langserver, jedi-language-server, julia-language-server, pyright, python-language-server, python-lsp-server, r-languageserver, sql-language-server, texlab, typescript-language-server, unified-language-server, vscode-css-languageserver-bin, vscode-html-languageserver-bin, vscode-json-languageserver-bin, yaml-language-server 100 | ``` 101 | 102 | - Please **open** the bottom link (starting with http://127.0.0.1:8888) with a web browser 103 | - You will be met with the following looking web page 104 | 105 | ![JupyterLab](https://user-images.githubusercontent.com/23055374/193501549-8f0d9429-1414-40c7-ad4d-0bdcf8ad6e55.png) 106 | 107 | ### 7) Run Workshop Material 108 | 109 | - Using the navigation bar on the left, click through the following directories 110 | - *adjdocs -> workshops -> 2024-05-21_scoped_uw* 111 | - **Clicking** any of the workshop notebooks (e.g., *1_intro_specfem2d.ipynb*) will open a Jupyter Notebook 112 | - See 'Jupyter Quick Tips' at the top of any notebook for information on how to run through a notebook 113 | - Run cells one-by-one and sequentially, read along with text to follow workshop material 114 | 115 | --------------- 116 | 117 | > Great! You are ready to run the workshop material, please follow along with the material in each of the notebooks. The material below is for when you are done with the workshop (Workshop Shut Down Procedures), or if you have trouble with any of the above install instructions (Troubleshooting Notes) 118 | 119 | --------------- 120 | 121 | ## Workshop Shut Down Procedures 122 | 123 | - These instructions are for when you are finished with the workshop and want to shut down the container and free up space 124 | - Before you approach any of these shut down procedures, please be sure to save any work you might have created 125 | - 'Created work' may include exercise notebooks that you filled out on your own, or files that you may have generated on your own 126 | 127 | ### a) Removing Files from the *work/* Directory 128 | 129 | - If you want to free up memory taken up by our workshop materials, the easiest way is to do it from **inside** the container 130 | - **WARNING** This will delete all files created by our workshop notebooks. You will not be able to recover these files 131 | - From the *Terminal* **inside** your container, you can run the following commands to remove all the workshop-created files: 132 | 133 | ```bash 134 | # Run the following from inside your container's terminal, not on your local machine 135 | rm -r /home/scoped/work 136 | ``` 137 | 138 | ### b) Closing a Running Container 139 | 140 | - From inside the JupyterLab interface, you can **click** `File -> Shut Down` in the top navigation bar 141 | - **OR** from the terminal where you ran the `docker run` command (which should still be running), you can type `Ctrl + c` on your keyboard to stop a running session 142 | 143 | ### c) Re-opening a Closed Container 144 | 145 | - It is possible to re-open a closed container, however we did not do this during the workshop to keep things simpler 146 | - You may follow this documentation if you want to re-open a closed container (https://docs.docker.com/engine/reference/commandline/container_start/) 147 | 148 | ### d) Deleting Closed Containers 149 | 150 | - Although you have closed your container, each container still occupies some memory on your local machine 151 | - This can be freed up if you no longer want to access the container's contents 152 | - Run the following command to free up memory associated with all closed containers 153 | 154 | ```bash 155 | docker container prune 156 | ``` 157 | 158 | ### e) Removing Workshop Docker Image 159 | 160 | - If you want to **remove** our workshop's Docker Image (which takes up ~3.98GB), you will need to identify the Image ID 161 | - To do that, you can run 162 | 163 | ```bash 164 | docker images 165 | ``` 166 | 167 | - You will be met with a list of available Docker Images. Ours has Repository name: *'ghcr.io/seisscoped/adjtomo'*, and Tag: *'ubuntu20.04_jupyterlab'* 168 | - You will need to identify the hash value listed under **IMAGE ID** column and run the following command 169 | 170 | ```bash 171 | docker rmi 172 | ``` 173 | 174 | - **For Example**: To do this on my own machine I would run: 175 | 176 | ```bash 177 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 178 | REPOSITORY TAG IMAGE ID CREATED SIZE 179 | ghcr.io/seisscoped/adjtomo ubuntu20.04_jupyterlab 3a6ddce01b76 20 hours ago 3.99GB 180 | bchow@tern [~] $ docker rmi 3a6ddce01b76 181 | ``` 182 | 183 | -------------- 184 | 185 | > If everything worked and you have successfully completed the steps above, congratulations! Please disregard everything below. If you are having trouble, please see the 'Troubleshooting Notes' section below. 186 | 187 | -------------- 188 | 189 | ## Troubleshooting Notes 190 | 191 | >__ERROR:__ $(pwd) not recognized in `docker run` command of Step 1 192 | 193 | - *$(pwd)* is a Linux command that might not be recognized by all operating systems 194 | - Please change the *$(pwd)* to the full path to your current working directory, e.g., 195 | 196 | ```bash 197 | # Note that you will need to substitute with your own path 198 | docker run -p 8888:8888 --mount type=bind,source=,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 199 | ``` 200 | 201 | for example on my own computer this might look like: 202 | ```bash 203 | # Do NOT copy-paste this, it is just an example and will not work on your computer 204 | docker run -p 8888:8888 --mount type=bind,source=/Users/Chow/Work/specfem_users_workshop,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 205 | ``` 206 | 207 | >__ERROR:__ *Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?* 208 | 209 | - You need to start Docker or Docker Desktop on your machine. 210 | - On **Windows or Mac**, use the search bar to find 'Docker' or 'Docker Desktop'. Open this program and try again 211 | - On **Linux** try running: 212 | ```bash 213 | systemctl start docker 214 | ``` 215 | 216 | >__ERROR:__ Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Post "...": dial unix /var/run/docker.sock: connect: permission denied 217 | - Your user does not have the correct permission to access the Docker daemon 218 | - If you have root privelage on your machine, try running the following: 219 | ```bash 220 | sudo chmod 666 /var/run/docker.sock 221 | ``` 222 | - You may also need to add your user to a Docker group which has the correct privileges ([see this link](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user)) 223 | 224 | >__ERROR__: docker: Error response from daemon: driver failed programming external connectivity on endpoint hopeful_haslett (c3e93dc3530d474e5152cf7ec58d69030da2be534f47fc94bcc6da19177f60f4): Bind for 0.0.0.0:8888 failed: port is already allocated. 225 | - You likely have another task (e.g., Jupyter Notebooks) running on the port we are trying to specify (8888:8888) 226 | - Please change the port map to something else, e.g., *docker run -p 8889:8888*... and try again 227 | - **Note** that you will also have to change the web address that you open in your browser to the new port number (e.g., http://127.0.0.1:8889/lab...) 228 | 229 | 230 | ## Docker Command Explanation 231 | 232 | ```bash 233 | docker pull ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 234 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04_jupyterlab 235 | ``` 236 | 237 | - `docker pull` downloads the Docker Image from GitHub 238 | - `docker run` launches the container. The flags in the `run` command are: 239 | - `--mount`: binds our local filesystem (in the current working directory) with the **container's internal filesytem** (at location */home/scoped/work* which resides **inside** the container) 240 | - `--shm-size`: tells Docker to give us 1 Gigabyte of shared memory, which is required for MPI processes 241 | - Note that the *$(pwd)* argument may not be recognized by your operating system. If so, please see the troubleshooting notes below 242 | - You may substitute any directory for *$(pwd)*, even remote filesystems, as long as they are accessible by your current machine 243 | 244 | 245 | 246 | ## Version Tracking 247 | Keeping track of versions used for the workshop. These will also be pinned to the container. 248 | 249 | - SPECFEM2D 8.1.0: baeb71d 250 | - SPEECFEM3D 4.1.1: ee3b095 251 | - SeisFlows v3.2.1: e96e888c 252 | - PySEP v0.6.0: 9e77ad3 253 | - Pyatoa v0.4.0: 1a6b86a 254 | -------------------------------------------------------------------------------- /readmes/docker_image_install.md: -------------------------------------------------------------------------------- 1 | # Docker Image Installation Instructions 2 | For the SPECFEM Users Workshop (Oct. 5-7, 2022) 3 | 4 | - The following instructions are meant to **install** a Docker Image, which contains all the software you will need to participate in the workshop 5 | - This Docker Image is ~5.5 GB, workshop material ~2.5 GB; please ensure you have sufficient disk space (10 GB free to be safe) 6 | - See the troubleshooting notes at the bottom for common issues you might encounter 7 | - If you have any issues running the steps below that are not solved by the troubleshooting notes, feel free to open up a [GitHub issue](https://github.com/adjtomo/adjdocs/issues) 8 | 9 | ## 0) Open a Terminal 10 | - The commands in the following sections will need to be run from your computer's terminal program 11 | - To open the terminal on your **local machine**: 12 | - Mac: Type 'terminal' into Spotlight 13 | - Windows: Type 'powershell' in the system search bar 14 | - Linux: Type 'terminal' in the system search bar 15 | 16 | 17 | ## 1) Pull Docker Image 18 | 19 | - This will download the Docker Image from GitHub 20 | - Mac Intel Chip, Windows and Linux Users (AMD architecture) please follow instructions in 1A 21 | - Mac M1 Chip (ARM architecture) please follow instructions in 1B 22 | 23 | 24 | ### 1A) Docker pull for Mac Intel, Windows, Linux 25 | ```bash 26 | docker pull --platform amd64 ghcr.io/seisscoped/adjtomo:workshop2022 27 | ``` 28 | 29 | ### 1B) Docker pull for Mac M1 30 | 31 | Installs the Docker Image from GitHub 32 | ```bash 33 | docker pull --platform arm64 ghcr.io/seisscoped/adjtomo:workshop2022 34 | ``` 35 | 36 | ## 2) Make Empty Working Directory 37 | 38 | To save the results we obtain from inside our container, we will need to mount our local filesystem. 39 | **> Please `cd` (change directory) to an empty working directory (example below)** 40 | 41 | ```bash 42 | # NOTE: This is only an EXAMPLE code snippet. Please create an 43 | # appropriate empty working directory on your machine 44 | mkdir -p ~/Work/specfem_users_workshop 45 | cd ~/Work/specfem_users_workshop 46 | ``` 47 | 48 | ## 3) Run Container 49 | 50 | Now run the container to open a JupyterLab instance (see notes below for explanation of this command) 51 | ```bash 52 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:workshop2022 53 | ``` 54 | 55 | ## 4) Open JupyterLab 56 | 57 | - After running the `docker run` command, you will see some output that ends with a web address, e.g,. 58 | 59 | ```bash 60 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 61 | [bchow@blackbox specfem_users_workshop]$ docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work ghcr.io/seisscoped/adjtomo:workshop2022 62 | [I 2022-10-03 02:45:25.543 ServerApp] jupyterlab | extension was successfully linked. 63 | [I 2022-10-03 02:45:25.548 ServerApp] nbclassic | extension was successfully linked. 64 | [I 2022-10-03 02:45:25.549 ServerApp] Writing Jupyter server cookie secret to /home/scoped/.local/share/jupyter/runtime/jupyter_cookie_secret 65 | [I 2022-10-03 02:45:25.704 ServerApp] notebook_shim | extension was successfully linked. 66 | [I 2022-10-03 02:45:25.714 ServerApp] notebook_shim | extension was successfully loaded. 67 | [I 2022-10-03 02:45:25.715 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.10/site-packages/jupyterlab 68 | [I 2022-10-03 02:45:25.715 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab 69 | [I 2022-10-03 02:45:25.717 ServerApp] jupyterlab | extension was successfully loaded. 70 | [I 2022-10-03 02:45:25.719 ServerApp] nbclassic | extension was successfully loaded. 71 | [I 2022-10-03 02:45:25.719 ServerApp] Serving notebooks from local directory: /home/scoped 72 | [I 2022-10-03 02:45:25.719 ServerApp] Jupyter Server 1.18.1 is running at: 73 | [I 2022-10-03 02:45:25.719 ServerApp] http://4196322a7bd0:8888/lab?token=864237545761973aa9f70fd458c19d40d6b3b52549dafc6e 74 | [I 2022-10-03 02:45:25.719 ServerApp] or http://127.0.0.1:8888/lab?token=864237545761973aa9f70fd458c19d40d6b3b52549dafc6e 75 | [I 2022-10-03 02:45:25.719 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation). 76 | [C 2022-10-03 02:45:25.721 ServerApp] 77 | 78 | To access the server, open this file in a browser: 79 | file:///home/scoped/.local/share/jupyter/runtime/jpserver-13-open.html 80 | Or copy and paste one of these URLs: 81 | http://4196322a7bd0:8888/lab?token=864237545761973aa9f70fd458c19d40d6b3b52549dafc6e 82 | or http://127.0.0.1:8888/lab?token=864237545761973aa9f70fd458c19d40d6b3b52549dafc6e 83 | ``` 84 | 85 | - Please **open** the bottom link (starting with http://127.0.0.1:8888) with a web browser 86 | - You will be met with the following looking web page 87 | 88 | ![JupyterLab](https://user-images.githubusercontent.com/23055374/193501549-8f0d9429-1414-40c7-ad4d-0bdcf8ad6e55.png) 89 | 90 | ## 5) Update adjDocs 91 | 92 | - To get the latest copy of the workshop material we will need to update adjDocs, our documentation repository 93 | - Please **double click** Terminal (`$_` icon in the 'Other' section) to open up the JupyterLab terminal 94 | - Run the following commands inside the terminal to update adjDocs 95 | 96 | ```bash 97 | cd ~/adjdocs 98 | git pull 99 | ``` 100 | 101 | A successful 'git pull' should result in output that looks something like this: 102 | 103 | ```bash 104 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 105 | root@9613bff72a41:~/adjdocs# git pull 106 | remote: Enumerating objects: 345, done. 107 | remote: Counting objects: 100% (173/173), done. 108 | remote: Compressing objects: 100% (93/93), done. 109 | remote: Total 345 (delta 119), reused 120 (delta 80), pack-reused 172 110 | Receiving objects: 100% (345/345), 29.07 MiB | 22.27 MiB/s, done. 111 | Resolving deltas: 100% (204/204), completed with 11 local objects. 112 | From https://github.com/adjtomo/adjdocs 113 | 17378b6..77b2fe6 main -> origin/main 114 | Updating 17378b6..77b2fe6 115 | Fast-forward 116 | ... 117 | root@2579daf64918:~/adjdocs# 118 | ``` 119 | 120 | >__NOTE:__ If you have made any changes to the adjDocs repository, the `git pull` command may fail. The easiest way to resolve this is to stop your current container and start a new one. Alternatively you may run `git stash` to hide any changes you have made, before running `git pull` 121 | 122 | ## 6) Run Workshop Material 123 | 124 | - Using the navigation bar on the left, click through the following directories 125 | - *adjdocs -> workshops -> 2022-10-05_specfem_users* 126 | - **Clicking** any of the workshop notebooks (e.g., *day_1a_intro_specfem2d.ipynb*) will open a Jupyter Notebook 127 | - See 'Jupyter Quick Tips' at the top of any notebook for information on how to run through a notebook 128 | - Run cells one-by-one and sequentially, read along with text to follow workshop material 129 | - If encounter any issues running the notebooks, please open up a [GitHub issue](https://github.com/adjtomo/adjdocs/issues) 130 | 131 | 132 | -------------- 133 | 134 | > If everything worked and you have successfully completed the steps above, congratulations! Please disregard everything below. If you are having trouble, please see the 'Troubleshooting Notes' section below. 135 | 136 | -------------- 137 | 138 | ## Docker Command Explanation 139 | 140 | ```bash 141 | docker pull ghcr.io/seisscoped/adjtomo:workshop2022 142 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:workshop2022 143 | Hmm. We’re having trouble finding that site. 144 | 145 | We can’t connect to the server at www.google.com. 146 | 147 | If you entered the right address, you can: 148 | 149 | Try again later 150 | Check your network connection 151 | Check that Firefox has permission to access the web (you might be connected but behind a firewall) 152 | 153 | 154 | ``` 155 | 156 | - `docker pull` downloads the Docker Image from GitHub 157 | - `docker run` launches the container. The flags in the `run` command are: 158 | - `--mount`: binds our local filesystem (in the current working directory) with the **container's internal filesytem** (at location */home/scoped/work* which resides **inside** the container) 159 | - `--shm-size`: tells Docker to give us 1 Gigabyte of shared memory, which is required for MPI processes 160 | - Note that the *$(pwd)* argument may not be recognized by your operating system. If so, please see the troubleshooting notes below 161 | - You may substitute any directory for *$(pwd)*, even remote filesystems, as long as they are accessible by your current machine 162 | 163 | 164 | ## Troubleshooting Notes 165 | 166 | >__ERROR:__ $(pwd) not recognized in `docker run` command of Step 1 167 | 168 | - *$(pwd)* is a Linux command that might not be recognized by all operating systems 169 | - Please change the *$(pwd)* to the full path to your current working directory, e.g., 170 | 171 | ```bash 172 | # Note that you will need to substitute with your own path 173 | docker run -p 8888:8888 --mount type=bind,source=,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:workshop2022 174 | ``` 175 | 176 | for example on my own computer this might look like: 177 | ```bash 178 | # Do NOT copy-paste this, it is just an example and will not work on your computer 179 | docker run -p 8888:8888 --mount type=bind,source=/Users/Chow/Work/specfem_users_workshop,target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:workshop2022 180 | ``` 181 | 182 | >__ERROR:__ *Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?* 183 | 184 | - You need to start Docker or Docker Desktop on your machine. 185 | - On **Windows or Mac**, use the search bar to find 'Docker' or 'Docker Desktop'. Open this program and try again 186 | - On **Linux** try running: 187 | ```bash 188 | systemctl start docker 189 | ``` 190 | 191 | >__ERROR:__ Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Post "...": dial unix /var/run/docker.sock: connect: permission denied 192 | - Your user does not have the correct permission to access the Docker daemon 193 | - If you have root privelage on your machine, try running the following: 194 | ```bash 195 | sudo chmod 666 /var/run/docker.sock 196 | ``` 197 | - You may also need to add your user to a Docker group which has the correct privileges ([see this link](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user)) 198 | 199 | >__ERROR__: docker: Error response from daemon: driver failed programming external connectivity on endpoint hopeful_haslett (c3e93dc3530d474e5152cf7ec58d69030da2be534f47fc94bcc6da19177f60f4): Bind for 0.0.0.0:8888 failed: port is already allocated. 200 | - You likely have another task (e.g., Jupyter Notebooks) running on the port we are trying to specify (8888:8888) 201 | - Please change the port map to something else, e.g., *docker run -p 8889:8888*... and try again 202 | - **Note** that you will also have to change the web address that you open in your browser to the new port number (e.g., http://127.0.0.1:8889/lab...) 203 | 204 | 205 | --------------- 206 | 207 | # Workshop Days 1-3 Startup Procedure 208 | 209 | - Please make sure Docker is **running** before executing the following instructions 210 | - From your **local** machine, please run the following commands to **start** the Docker Container 211 | 212 | ## 1) Start Docker Container 213 | You may replace the path given (*~/Work/specfem_users_workshop*) with any **empty** working directory of your choice 214 | ```bash 215 | mkdir -p ~/Work/specfem_users_workshop 216 | cd ~/Work/specfem_users_workshop 217 | docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:workshop2022 218 | ``` 219 | 220 | Now please **open** the link starting with http://127.0.0.1:8888 in a web browser 221 | 222 | ## 2) Update adjDocs 223 | 224 | - To get the latest copy of the workshop material we will need to update adjDocs, our documentation repository 225 | - Please **double click** Terminal (`$_` icon in the 'Other' section) to open up the JupyterLab terminal 226 | - Run the following commands inside the terminal to update adjDocs 227 | 228 | ```bash 229 | cd ~/adjdocs 230 | git pull 231 | ``` 232 | 233 | --------------- 234 | 235 | # Workshop Shut Down Procedures 236 | 237 | - These instructions are for when you are finished with the workshop material and want to shut down the container and/or free up space 238 | - Before you approach any of these shut down procedures, please be sure to save any work you might have created 239 | - 'Created work' may include exercise notebooks that you filled out on your own, or files that you may have generated on your own 240 | 241 | ### a) Removing Files from the *work/* Directory 242 | 243 | - If you want to free up memory taken up by our workshop materials, the easiest way is to do it from **inside** the container 244 | - **WARNING** This will delete all files created by our workshop notebooks. You will not be able to recover these files 245 | - From the *Terminal* **inside** your container, you can run the following commands to remove all the workshop-created files: 246 | 247 | ```bash 248 | # Run the following from inside your container's terminal, not on your local machine 249 | rm -r /home/scoped/work 250 | ``` 251 | 252 | ### b) Closing a Running Container 253 | 254 | - From inside the JupyterLab interface, you can **click** `File -> Shut Down` in the top navigation bar 255 | - **OR** from the terminal where you ran the `docker run` command (which should still be running), you can type `Ctrl + c` on your keyboard to stop a running session 256 | 257 | ### c) Re-opening a Closed Container 258 | 259 | - It is possible to re-open a closed container, however we did not do this during the workshop to keep things simpler 260 | - You may follow this documentation if you want to re-open a closed container (https://docs.docker.com/engine/reference/commandline/container_start/) 261 | 262 | ### d) Deleting Closed Containers 263 | 264 | - Although you have closed your container, each container still occupies some memory on your local machine 265 | - This can be freed up if you no longer want to access the container's contents 266 | - Run the following command to free up memory associated with all closed containers 267 | 268 | ```bash 269 | docker container prune 270 | ``` 271 | 272 | ### e) Removing Workshop Docker Image 273 | 274 | - If you want to **remove** our workshop's Docker Image (which takes up ~5.5GB), you will need to identify the Image ID 275 | - To do that, you can run 276 | 277 | ```bash 278 | docker images 279 | ``` 280 | 281 | - You will be met with a list of available Docker Images. Ours has Repository name: *'ghcr.io/seisscoped/adjtomo'*, and Tag: *'workshop2022'* 282 | - You will need to identify the hash value listed under **IMAGE ID** column and run the following command 283 | 284 | ```bash 285 | docker rmi 286 | ``` 287 | 288 | - **For Example**: To do this on my own machine I would run: 289 | 290 | ```bash 291 | # DO NOT COPY THE CODE BLOCK BELOW, it is just an example 292 | bchow@tern [~] $ docker images 293 | REPOSITORY TAG IMAGE ID CREATED SIZE 294 | ghcr.io/seisscoped/adjtomo workshop2022 6849be2bcfb8 3 days ago 5.56GB 295 | bchow@tern [~] $ docker rmi 6849be2bcfb8 296 | ``` 297 | 298 | -------------------------------------------------------------------------------- /workshops/2024-05-21_scoped_uw/exercise_solutions/solution_5_intro_seisflows.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "347bc0f7-6dbe-454c-a95a-8469d8efe1c2", 6 | "metadata": {}, 7 | "source": [ 8 | "# 2024 SCOPED Workshop — Wavefield Simulations Using SPECFEM\n", 9 | "\n", 10 | "## Notebook 5: Introduction to SeisFlows — Exercise Solutions\n", 11 | "\n", 12 | "- In this notebook we will introduce two open-source Python packages for facilitating/automating seismic imaging \n", 13 | "- **Objective**: To introduce and tour around SeisFlows and Pyatoa, and see how they can be used to simplify working with SPECFEM \n", 14 | "- These instructions should be run from inside a Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)). \n", 15 | "-----------\n", 16 | "\n", 17 | "**Relevant Links:** \n", 18 | "- This Notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2024-5-21_scoped_uw/5_intro_seisflows.ipynb\n", 19 | "\n", 20 | "**adjTomo Software Suite:** \n", 21 | "- adjTomo: https://github.com/adjtomo\n", 22 | "- SeisFlows GitHub Page: https://github.com/adjtomo/seisflows\n", 23 | "- SeisFlows Documentation: https://seisflows.readthedocs.io/en/latest/\n", 24 | "\n", 25 | "\n", 26 | "**Jupyter Quick Tips:**\n", 27 | "\n", 28 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 29 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 30 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 31 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 32 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 33 | "- Commands that start with `%` are Jupyter Magic commands.\n", 34 | "----------" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "id": "93bfc4ea-a94a-4d4d-9453-a7e11c0201d4", 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "# Required Python packages for today's notebook\n", 45 | "import os\n", 46 | "import shutil\n", 47 | "import numpy as np\n", 48 | "from glob import glob\n", 49 | "from pyasdf import ASDFDataSet\n", 50 | "from pyatoa import Inspector\n", 51 | "from seisflows.tools import unix\n", 52 | "from IPython.display import Image" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "id": "796a3507-d06c-46a5-aad0-9f68af4aef01", 58 | "metadata": {}, 59 | "source": [ 60 | "-----------\n", 61 | "## 2) Exercise: Run an Inversion w/ SeisFlows\n", 62 | "\n", 63 | "- Okay, now that we have solved the forward problem, we can tackle the inverse problem\n", 64 | "- We will take our current working directory and make adjustments to the required modules to run an inversion\n", 65 | "- First we'll clean up our working directory prior to getting started" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "id": "5a736c33-7deb-42ee-9cf3-b1feadc2a1c7", 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "# Move to the SeisFlows working directory\n", 76 | "%cd /home/scoped/work/intro_seisflows\n", 77 | "! ls " 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "id": "0c3cda3f-bf6f-49cb-a47d-084d7868689f", 83 | "metadata": {}, 84 | "source": [ 85 | "------------\n", 86 | "In order to run our inversion, we will need a few components we did not have in the Forward problem, \n", 87 | "these tasks will help guide you into setting up your inversion. Much of the code you will need is available\n", 88 | "in previous notebooks. \n", 89 | "\n", 90 | "### Task 1) Create 'Data'\n", 91 | "\n", 92 | "#### Background\n", 93 | "- To run an inversion, we need some kind of 'data' to compare to our synthetics, the data-synthetic differences (i.e., **misfit**) will guide the inversion.\n", 94 | "- Often tomographers will run **synthetic inversions**, where are data consist of synthetic waveforms generated using a **target model**.\n", 95 | "- In this example, we will take the data we just created in our forward simulations to use as our **target synthetics**.\n", 96 | "\n", 97 | "> NOTE: Seisflows has a `unix` module that allows you to run unix commands through python. For example `unix.cp` mimics the `cp` command\n", 98 | "\n", 99 | "#### Exercise Tasks\n", 100 | "1) Identify `path_data` in the 'parameters.yaml' file, this is where SeisFlows expects waveform data \n", 101 | " - You can open the file with the file manager, or use `seisflows par`\n", 102 | "2) Create the required directory structure in `path_data`, which follows the format `{path_data}/{event_id}/` \n", 103 | " - Each source requires its own sub-directory\n", 104 | " - Follow the source naming convention we covered earlier\n", 105 | " - Check parameter `ntask` to determine how many sources will be used\n", 106 | "3) Move or copy the synthetics generated by the forward problem we just ran into the directories you created in (2) \n", 107 | " - Remember that synthetics are stored in: `scratch/solver/{event_id}/traces/syn/*` \n", 108 | " - You can do this manually, with bash commands or with Python*)\n", 109 | "4) Confirm that you have `ntask` sub-directories in `path_data`, each containing synthetic waveform data" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "id": "28e53e23-75c9-4d18-afee-b8b283722de4", 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "# 1. Figure out where data are to be stored\n", 120 | "! seisflows par path_data" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "id": "15697542-84e6-48c9-8100-85367ff5cda7", 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "# 2. Create the correct directory structure\n", 131 | "! seisflows par ntask # how many sources will we be using\n", 132 | "! echo\n", 133 | "! ls scratch/solver" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "id": "296b131a-37d1-4639-b4ce-97dc60264c61", 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "# 2. Generate source directories\n", 144 | "for i in range(1, 11):\n", 145 | " os.mkdir(f\"waveforms/{i:0>3}\")\n", 146 | "! ls waveforms" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "id": "be1020ab-884b-4ebe-a426-a7b1513e4364", 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# 3. Copy the \"data\" into these directories\n", 157 | "for i in range(1, 11):\n", 158 | " for src in glob(f\"scratch/solver/{i:0>3}/traces/syn/*\"):\n", 159 | " dst = f\"waveforms/{i:0>3}\"\n", 160 | " unix.cp(src, dst)" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "id": "a00f698a-24f1-40b9-bb55-6fc5e423b575", 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "! ls waveforms/001" 171 | ] 172 | }, 173 | { 174 | "cell_type": "markdown", 175 | "id": "a4ac0db5-1724-4b93-a453-a06f29669f53", 176 | "metadata": {}, 177 | "source": [ 178 | "---------------\n", 179 | "### Task 2) Generate a new 'Starting Model'\n", 180 | "\n", 181 | "- Because our 'data' was generated using the checkerboard model shown above, we need a new 'starting model'\n", 182 | "- If we do not change our starting model, the synthetics we generate will be the same as our **target synthetics**, resulting in 0 misfit\n", 183 | "- Let's modify the model located in `specfem2d_workdir`, there are two approaches with (1) being easier than (2). \n", 184 | "\n", 185 | "#### Exercise Tasks\n", 186 | "\n", 187 | "**Option 1 (Homogeneous Halfspace):**\n", 188 | "1) Change the value of parameter `Model` in `specfem2d_workdir/DATA/Par_file` from `gll` -> `Default`\n", 189 | " - You can do this manually or use `seisflows sempar`\n", 190 | " - This will tell the internal mesher to use the parameter file definition of the model, which is a homogeneous halfspace\n", 191 | "2) Rerun `xmeshfem2D` and `xspecfem2D` to generate the required Model files. You can find the syntax for running these commands in previous notebooks.\n", 192 | "3) Reset `Model` parameter to `gll` for the inversion\n", 193 | " - We do this because the actual inversion uses this option to be able to update model parameters\n", 194 | "\n", 195 | "**Option 2 (Checkerboard Perturbation):**\n", 196 | ">Warning: This requires some Python skill\n", 197 | "1) Change the value of parameter `Model` in `specfem2d_workdir/DATA/Par_file` from `gll` -> `legacy`\n", 198 | " - This will tell the internal mesher to read model values from the file `model_velocity.dat_input`\n", 199 | "2) Find the file that defines the legacy model values in `specfem2d_workdir/DATA` \n", 200 | "3) Modify this file in order to perturb the checkerboard model\n", 201 | " - The easiest thing to do is increase or decrease P and S-wave velocity structure by some percentage of their original value (5%?)\n", 202 | " - The column structure of this file is: `index, x-coordinate [m], y-coordinate [m], density, Vp [m/s], Vs [m/s]`\n", 203 | " - Probably best to use Python to read, write and modify the file (e.g., with NumPy `loadtxt` and `savetxt`)\n", 204 | "5) Rerun `xmeshfem2D` and `xspecfem2D` to generate the required Model files. You can find these commands in previous notebooks.\n", 205 | "6) Reset `Model` parameter to `gll` for the inversion\n", 206 | " - We do this because the actual inversion uses this option to be able to update model parameters\n" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "id": "e9c58d55-fb17-4e84-804f-297fc9a32b3d", 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [ 216 | "# Option 1: \n", 217 | "# Change parameter type\n", 218 | "%cd /home/scoped/work/intro_seisflows/specfem2d_workdir\n", 219 | "! seisflows sempar -P DATA/Par_file model default" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": null, 225 | "id": "1f003102-7152-445d-9c64-d7741776d8c4", 226 | "metadata": {}, 227 | "outputs": [], 228 | "source": [ 229 | "# Run mesher and simulation code\n", 230 | "! mpirun -n 1 bin/xmeshfem2D > OUTPUT_FILES/output_meshfem.txt\n", 231 | "! mpirun -n 1 bin/xspecfem2D > OUTPUT_FILES/output_solver.txt" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": null, 237 | "id": "d4b4e954-b3e8-4973-a777-16c1b757ac4d", 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "# Move model to the correct location\n", 242 | "! mv DATA/proc000000_*.bin OUTPUT_FILES" 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "id": "b7770b5c-b817-44d0-8e8c-e64e20232524", 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "# Reset model parameter for inversion\n", 253 | "%cd /home/scoped/work/intro_seisflows\n", 254 | "! seisflows sempar -P specfem2d_workdir/DATA/Par_file model gll" 255 | ] 256 | }, 257 | { 258 | "cell_type": "markdown", 259 | "id": "43131b02-a321-4162-bbec-c162598ee3f6", 260 | "metadata": {}, 261 | "source": [ 262 | "-----------" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "id": "883d9507-c9d4-410a-a1a8-00dd24e87b9a", 269 | "metadata": {}, 270 | "outputs": [], 271 | "source": [ 272 | "# Option 2: Change parameter type\n", 273 | "%cd /home/scoped/work/intro_seisflows/specfem2d_workdir\n", 274 | "! seisflows sempar -P DATA/Par_file model legacy" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": null, 280 | "id": "5d5a1949-8e83-402b-a3ff-955c0baeae19", 281 | "metadata": {}, 282 | "outputs": [], 283 | "source": [ 284 | "# Identify file\n", 285 | "! ls DATA/*.dat_input" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "id": "47995410-00ba-491b-a09e-2c2f009a62a4", 292 | "metadata": {}, 293 | "outputs": [], 294 | "source": [ 295 | "# Look at the input model\n", 296 | "! echo \" index x[m] y[m] density Vp [m/s] Vs [m/s]\"\n", 297 | "! echo\n", 298 | "! head -5 DATA/proc000000_model_velocity.dat_input" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": null, 304 | "id": "5b9e1b17-2d47-4964-8a42-3d1193ccb4e9", 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "# Modify the input checkerboard model\n", 309 | "# Make sure we keep the original file incase we make a mistake\n", 310 | "! cp DATA/proc000000_model_velocity.dat_input DATA/proc000000_model_velocity.dat_input_original \n", 311 | "data = np.loadtxt(\"DATA/proc000000_model_velocity.dat_input\")\n", 312 | "print(data)\n", 313 | "\n", 314 | "# Modify velocity structure\n", 315 | "data[:, 4] *= 1.05 # Increase Vp by 5%\n", 316 | "data[:, 5] *= 1.05 # Increase Vs by 5%" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": null, 322 | "id": "da77a496-1e67-4b43-9914-48f78f47b331", 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "# Overwrite the existing file\n", 327 | "np.savetxt(\"DATA/proc000000_model_velocity.dat_input\", data, fmt=\"%10.4f\", delimiter=\"\\t\")\n", 328 | "! head DATA/proc000000_model_velocity.dat_input" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "id": "233862e2-4e2a-4fd9-836a-ec22ff82fc64", 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [ 338 | "# Run mesher and simulation code\n", 339 | "! mpirun -n 1 bin/xmeshfem2D > OUTPUT_FILES/output_meshfem.txt\n", 340 | "! mpirun -n 1 bin/xspecfem2D > OUTPUT_FILES/output_solver.txt" 341 | ] 342 | }, 343 | { 344 | "cell_type": "code", 345 | "execution_count": null, 346 | "id": "7fb82b51-98ff-4c38-bd46-dce99d507e94", 347 | "metadata": {}, 348 | "outputs": [], 349 | "source": [ 350 | "! seisflows sempar -P specfem2d_workdir/DATA/Par_file model gll" 351 | ] 352 | }, 353 | { 354 | "cell_type": "markdown", 355 | "id": "572618ca-8d9f-4545-9571-9e65de33aa0c", 356 | "metadata": {}, 357 | "source": [ 358 | "----------------\n", 359 | "### Task 3) Set up your SeisFlows Parameter File\n", 360 | "\n", 361 | "- Now we need to modify our existing parameter file to switch our workflow from Forward simulations to Inversion\n", 362 | "- Inversion workflows require additional modules for `preprocess` for data-synthetic comparisons, \n", 363 | "- They also require an `optimize` module which is in charge of model updates\n", 364 | "- We will use the `seisflows swap` command which swaps in the set of parameters associated with a given module\n", 365 | "- You can use the command `seisflows print modules` to check the available choices for each module\n", 366 | "\n", 367 | "#### Exercise Tasks\n", 368 | "\n", 369 | "1) `Swap` the `preprocess` module to option: `default`\n", 370 | " - SeisFlows currently has two preprocessing modules, 'Default' and 'Pyaflowa'\n", 371 | " - Both modules perform similar functionality, but Pyaflowa provides richer features such as windowing, improved data storage, and plotting\n", 372 | "2) `Swap` the `optimize` module to option: `gradient`\n", 373 | " - The optimize module takes care of gradient regularization and model updates\n", 374 | " - Other optimization modules include L-BFGS and Nonlinear Conjugate Gradient (NLCG)\n", 375 | "3) `Swap` the `workflow` module to option: `inversion`\n", 376 | " - The `inversion` submodule builds upon the forward simulation and adds in functionality for generating kernels and updating models\n", 377 | " - Other workflow modules include: Forward, Migration (for generating kernels), and NoiseInversion (for ambient noise adjoint tomography)\n", 378 | "4) Change the location of `path_model_init` which points to your starting model. \n", 379 | " - Note: in (2) we generated a starting model in `specfem2d_workdir/DATA` (this is specific to SPECFEM2D, model files \n", 380 | " - You might use the command `seisflows par` to change parameters from the command line, or do this manually\n", 381 | "\n", 382 | "#### Optional Tasks\n", 383 | "- Have a look through the remainder of the parameter file, are there parameters you think would be useful to change?\n", 384 | "- You can run the Inversion as is, but advanced Users may play around with filtering (preprocess module) and smoothing (solver module) ." 385 | ] 386 | }, 387 | { 388 | "cell_type": "code", 389 | "execution_count": null, 390 | "id": "78493896-593c-4cea-ae3e-4d5a73a6ee9c", 391 | "metadata": {}, 392 | "outputs": [], 393 | "source": [ 394 | "%cd /home/scoped/work/intro_seisflows\n", 395 | "\n", 396 | "! seisflows swap preprocess default\n", 397 | "! seisflows swap optimize gradient\n", 398 | "! seisflows swap workflow inversion\n", 399 | "! seisflows par path_model_init /home/scoped/work/intro_seisflows/specfem2d_workdir/OUTPUT_FILES" 400 | ] 401 | }, 402 | { 403 | "cell_type": "markdown", 404 | "id": "ff733799-0b2d-453f-8154-f57f7c367dfc", 405 | "metadata": {}, 406 | "source": [ 407 | "--------------\n", 408 | "### Task 4) Clean Up The Working Directory\n", 409 | "\n", 410 | "- Run `seisflows clean` to delete all of the files from the previous Forward simulation, getting ready for our inversion.\n", 411 | "- You can use the `-f/--force` option to skip over any 'are you sure about that?' prompts." 412 | ] 413 | }, 414 | { 415 | "cell_type": "code", 416 | "execution_count": null, 417 | "id": "299c7c0c-02a5-4e67-ab94-5d274a126a02", 418 | "metadata": {}, 419 | "outputs": [], 420 | "source": [ 421 | "! seisflows clean -f" 422 | ] 423 | }, 424 | { 425 | "cell_type": "markdown", 426 | "id": "0e0c733f-f522-4617-9c73-36ab12fb1551", 427 | "metadata": {}, 428 | "source": [ 429 | "-------------\n", 430 | "### Task 5) Ready to Run? Check and See!\n", 431 | "\n", 432 | "- When your data are ready, and your parameter file is setup, you can perform a sanity check \n", 433 | "- Run `seisflows check` to perform a number of internal checks that makes sure paths and parameters are set properly \n", 434 | "- If you receive any error messages from `seisflows check`, please fix them and re-run `seisflows check` to see if new errors pop up." 435 | ] 436 | }, 437 | { 438 | "cell_type": "code", 439 | "execution_count": null, 440 | "id": "a70479e6-b751-4a1b-a977-f404a44dfc62", 441 | "metadata": {}, 442 | "outputs": [], 443 | "source": [ 444 | "! seisflows check" 445 | ] 446 | }, 447 | { 448 | "cell_type": "markdown", 449 | "id": "16a4d650-8c4f-4935-a68e-a2f650d8be88", 450 | "metadata": {}, 451 | "source": [ 452 | "### Task 6) Let's go!\n", 453 | "\n", 454 | "If you think you're ready, run `seisflows submit` to start your inversion. " 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": null, 460 | "id": "1226fa1f-2836-4eb0-a501-64c8d587881d", 461 | "metadata": {}, 462 | "outputs": [], 463 | "source": [ 464 | "! seisflows submit" 465 | ] 466 | } 467 | ], 468 | "metadata": { 469 | "kernelspec": { 470 | "display_name": "Python 3 (ipykernel)", 471 | "language": "python", 472 | "name": "python3" 473 | }, 474 | "language_info": { 475 | "codemirror_mode": { 476 | "name": "ipython", 477 | "version": 3 478 | }, 479 | "file_extension": ".py", 480 | "mimetype": "text/x-python", 481 | "name": "python", 482 | "nbconvert_exporter": "python", 483 | "pygments_lexer": "ipython3", 484 | "version": "3.12.3" 485 | } 486 | }, 487 | "nbformat": 4, 488 | "nbformat_minor": 5 489 | } 490 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/day_0_container_testing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "306acd82", 6 | "metadata": {}, 7 | "source": [ 8 | "# SPECFEM Users Workshop (Oct. 5-7, 2022)\n", 9 | "-------------------------\n", 10 | "\n", 11 | "## Day 0: Container Testing Instructions\n", 12 | "\n", 13 | "- Day 0 instructions are meant for Users to test out their Docker container to ensure that things will go smoothly during the workshop. \n", 14 | "- Estimated times are provided next to each task to provide a benchmark on how fast thing should run. \n", 15 | "- Benchmarks run on a 2015 Macbook Pro (OSX 10.14.6) with 3.1GHz Intel Core i7, and 16Gb of ram. \n", 16 | "- If you run into any issues, please copy the error message, make note of the cell, and open a GitHub issue (https://github.com/adjtomo/adjdocs/issues).\n", 17 | "- These instructions should be run from inside a Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)).\n", 18 | "----------------\n", 19 | "\n", 20 | "**Docker Preamble:** \n", 21 | "\n", 22 | "To open this container you will need the latest Docker Image: \n", 23 | "**> Please run the following from your computer's Terminal**\n", 24 | "\n", 25 | "```bash\n", 26 | "docker pull ghcr.io/seisscoped/adjtomo:ubuntu20.04\n", 27 | "```\n", 28 | "\n", 29 | "To save the results we obtain from inside our container, we will need to mount our local filesystem. \n", 30 | "**> Please `cd` (change directory) to am empty working directory before running the following command:**\n", 31 | "\n", 32 | "```bash\n", 33 | "docker run -p 8888:8888 --mount type=bind,source=$(pwd),target=/home/scoped/work --shm-size=1gb ghcr.io/seisscoped/adjtomo:ubuntu20.04 \n", 34 | "```\n", 35 | "\n", 36 | ">__Docker commands explanation__: `pull` downloads the Docker Image from GitHub. `run` launches the container. `--mount` binds our local filesystem (in the current working directory, pwd) with the container's internal filesytem (at location /home/scoped/work). `--shm-size` tells Docker to give us 1 Gigabyte of shared memory, which is required for MPI processes.\n", 37 | "\n", 38 | ">__M1 Macs__: Our Docker container is not well optimized for Macs running M1 chips, as compared to those running Intel chips. This results in Docker running >10x slower on M1 Macs. If you have an M1 we suggest you find an alternative for the workshop (Intel Mac, Windows, Linux) but if that is not possible, you can optimize your container slightly by adding the flag `--platform linux/amd64` to the 'docker run' command above (e.g., `docker run -p 8888:8888 --platform linux/amd64 ...`). With that said, please be prepared for overall slower run times, we will try to accomodate this as much as possible. \n", 39 | "\n", 40 | ">__M1 Mac manual install__: For those adamant about using their M1 Mac, you may follow along these instructions to manually install software required. Use at your own risk! \n", 41 | "https://github.com/adjtomo/adjdocs/blob/main/readmes/manual_install_instructions.md\n", 42 | "\n", 43 | ">__SIGBUS Error__: If you are receiving the error message *Program received signal SIGBUS: Access to an undefined portion of a memory object.*, please `File -> Shut Down` the current container and reopen with a larger value for the `--shm-size` flag above. e.g., `--shm-size=2gb`. \n", 44 | "\n", 45 | "--------------------\n", 46 | "\n", 47 | "**Jupyter Quick Tips:**\n", 48 | "\n", 49 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 50 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 51 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 52 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 53 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 54 | "- Commands that start with `%` are Jupyter Magic commands.\n", 55 | "- To time a task, put a `%time` before the command (e.g., `%time ! ls`)\n", 56 | "\n", 57 | "\n", 58 | "**Relevant Links:** \n", 59 | "- Today's notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/day_0_container_testing.ipynb\n", 60 | "- Today's completed notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_0_container_testing.ipynb\n", 61 | "- Workshop announcement: https://sites.google.com/alaska.edu/carltape/home/research/specfem2022?authuser=0\n", 62 | "- adjTomo Container: https://github.com/SeisSCOPED/adjtomo\n", 63 | "- adjTomo Org Page: https://github.com/adjTomo\n", 64 | "- SeisFlows docs: https://seisflows.readthedocs.io/en/devel/\n", 65 | "- Pyatoa docs: https://pyatoa.readthedocs.io/en/devel/\n", 66 | "- PySEP readme: https://github.com/uafgeotools/pysep#readme\n", 67 | "\n", 68 | "--------------" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "id": "5145f5d3-28e6-4865-bcb4-d8f5f2f713de", 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "# Python import for in-notebook visualizations\n", 79 | "from IPython.display import Image" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "acddac2f", 85 | "metadata": {}, 86 | "source": [ 87 | "## 1) Testing SPECFEM2D\n", 88 | "\n", 89 | "We want to make sure we can run SPECFEM2D natively by running the example problem using MPI. \n", 90 | "**Benchmark time for cell w/ 4 MPI processes: ~60s**" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "id": "73f9208c", 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "%cd /home/scoped/specfem2d\n", 101 | "\n", 102 | "# Convenience function to edit the SPECFEM Par_file from the command line\n", 103 | "! seisflows sempar -P DATA/Par_file nproc 4 # change number of processors from 1 to 4\n", 104 | "\n", 105 | "! mpirun -n 4 bin/xmeshfem2D\n", 106 | "! mpirun -n 4 bin/xspecfem2D" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "id": "bf06615e", 112 | "metadata": { 113 | "tags": [] 114 | }, 115 | "source": [ 116 | "Using the navigation menu on the left, navigate to:\n", 117 | "\n", 118 | "`home -> specfem2d -> OUTPUT_FILES`\n", 119 | "\n", 120 | "**Open** any of the `forward_image*.jpg` files and see that the wavefront makes sense. It should look like a layered halfspace with topography and 3 layers.\n", 121 | "One such figure is shown below." 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "id": "af68efa0-2af0-48dc-91d1-55d0d38d4037", 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "Image(\"OUTPUT_FILES/forward_image000000200.jpg\")" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "id": "a55e46f3-6993-4527-8c72-1baf8c09fad5", 137 | "metadata": { 138 | "tags": [] 139 | }, 140 | "source": [ 141 | "### Testing Record Section (RecSec) Tool with SPECMFE2D Synthetics\n", 142 | "- Make sure we can plot record sections (using PySEP) from SPECFEM2D synthetic waveforms.\n", 143 | "- Must point RecSec at 1) synthetics, 2) station metadata, 3) source metadata\n", 144 | "- The `--cartesian` flag tells RecSec that the SPECFEM2D domain is cartesian (not geographic)" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "25ad5406-5501-4daa-bd91-2d5b4dca8709", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "# Try RecSec with SPECFEM2D synthetics\n", 155 | "! recsec --syn_path OUTPUT_FILES/ --cmtsolution DATA/SOURCE --stations DATA/STATIONS --cartesian" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "id": "e00ae407-4121-42a6-995a-9a4cdba416db", 161 | "metadata": {}, 162 | "source": [ 163 | "Using the navigation menu on the left, navigate to:\n", 164 | "\n", 165 | "`home -> work -> specfem2d`\n", 166 | "\n", 167 | "**Open** the `record_section.png` file to look at the created record section.\n" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "id": "64f6b625-ae67-4f98-9923-27d770398e5d", 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "Image(\"record_section.png\")" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "id": "0bf0fdcc", 183 | "metadata": {}, 184 | "source": [ 185 | "## 2) SeisFlows Setup\n", 186 | "\n", 187 | "Some directory bookkeeping to make sure we keep the container's `/home` directory free of clutter" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "id": "958b638f", 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "%cd /home/scoped/work \n", 198 | "\n", 199 | "! mkdir example_1 example_2 example_2a example_3 " 200 | ] 201 | }, 202 | { 203 | "cell_type": "markdown", 204 | "id": "545960a8-21c2-47a9-b1e4-75df12038980", 205 | "metadata": {}, 206 | "source": [ 207 | ">__NOTE:__ Successfully run examples will end with the following log message \n", 208 | "\n", 209 | "```\n", 210 | "================================================================================\n", 211 | "EXAMPLE COMPLETED SUCCESFULLY\n", 212 | "================================================================================\n", 213 | "```" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "id": "3ad7e88b", 219 | "metadata": {}, 220 | "source": [ 221 | "## 3) Example 1: Homogeneous Halfspace Inversion\n", 222 | "\n", 223 | "- This example runs a 1 iteration inversion for a single source-receiver pair, using two homogeneous halfspace models. \n", 224 | "- See [SeisFlows Example \\#1 docs page](https://seisflows.readthedocs.io/en/devel/specfem2d_example.html#example-1-homogenous-halfspace-inversion) for guidance on what is going on \n", 225 | "- **Benchmark time for cell w/ 2 MPI processes: ~1m45s**" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": null, 231 | "id": "dcfc795b", 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "# Run Example 1 with 2 processors\n", 236 | "%cd /home/scoped/work/example_1\n", 237 | "! seisflows examples run 1 -r /home/scoped/specfem2d --with_mpi --nproc 2" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": null, 243 | "id": "ab2c4287", 244 | "metadata": {}, 245 | "outputs": [], 246 | "source": [ 247 | "# Plots the initial and final models, as well as the gradient \n", 248 | "! seisflows plot2d MODEL_INIT vs --savefig m_init_vs.png\n", 249 | "! seisflows plot2d MODEL_TRUE vs --savefig m_true_vs.png\n", 250 | "! seisflows plot2d GRADIENT_01 vs_kernel --savefig g_01_vs.png\n", 251 | "! seisflows plot2d MODEL_01 vs --savefig m_01_vs.png" 252 | ] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "id": "90b452d4", 257 | "metadata": {}, 258 | "source": [ 259 | "Using the navigation menu on the left, navigate to:\n", 260 | "\n", 261 | "`home -> work -> example_1`\n", 262 | "\n", 263 | "and **open** each of the .png files that were created to look at the results of this simple inversion. They should match the figures shown in the documentation page." 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": null, 269 | "id": "cc295ba5-b920-4821-aa3c-9b71346b6582", 270 | "metadata": {}, 271 | "outputs": [], 272 | "source": [ 273 | "Image(\"m_init_vs.png\")" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": null, 279 | "id": "bcbbcde7-b063-4769-91c4-ea866834611a", 280 | "metadata": {}, 281 | "outputs": [], 282 | "source": [ 283 | "Image(\"m_true_vs.png\")" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "id": "e8baa59c-f129-4b2f-b89c-b4555dacaacb", 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "Image(\"g_01_vs.png\")" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "id": "079c903d-a44e-449f-88cb-2f120ec46207", 300 | "metadata": {}, 301 | "outputs": [], 302 | "source": [ 303 | "Image(\"m_01_vs.png\")" 304 | ] 305 | }, 306 | { 307 | "cell_type": "markdown", 308 | "id": "f597bb4c", 309 | "metadata": {}, 310 | "source": [ 311 | "## 4) Example 2a: Re-create a Kernel from Tape et al. 2007\n", 312 | "\n", 313 | "- This Example runs Example 2, for 1 event and 32 stations, comparing a homogeneous halfspace model with a checkerboard model. It generate a gradient and updated model. \n", 314 | "- See [SeisFlows Example \\#2a docs page](https://seisflows.readthedocs.io/en/devel/specfem2d_example.html#re-creating-kernels-from-tape-et-al-2007) for guidance on what is going on. \n", 315 | "- **NOTE**: You can choose which event ID you're running by changing the integer after `--event_id`. Tape et al. show results for Event IDs 1 through 7.\n", 316 | "- **Benchmark time for cell: ~4m**" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": null, 322 | "id": "a5366661", 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "# Run Example 2 for only 1 iteration, and for a given event ID\n", 327 | "%cd /home/scoped/work/example_2a\n", 328 | "! seisflows examples run 2 -r /home/scoped/specfem2d --with_mpi --niter 1 --event_id 7 # <-- Choose your event by changing this value" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "id": "161cffbc", 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [ 338 | "# Plots the target and final models, as well as the gradient \n", 339 | "! seisflows plot2d MODEL_TRUE vs --savefig m_true_vs.png\n", 340 | "! seisflows plot2d MODEL_01 vs --savefig m_01_vs.png\n", 341 | "! seisflows plot2d GRADIENT_01 vs_kernel --savefig g_01_vs.png" 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "id": "f09852ba", 347 | "metadata": {}, 348 | "source": [ 349 | "Using the navigation menu on the left, navigate to:\n", 350 | "\n", 351 | "`home -> work -> example_2a`\n", 352 | "\n", 353 | "and **open** the .png file that was created. Make sure that the kernel looks like one of the panels provided in [Figure 9 of Tape et al.](https://seisflows.readthedocs.io/en/devel/specfem2d_example.html#re-creating-kernels-from-tape-et-al-2007)" 354 | ] 355 | }, 356 | { 357 | "cell_type": "code", 358 | "execution_count": null, 359 | "id": "804989f5-6c84-461b-8c06-bf3b4c156df2", 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "Image(\"m_true_vs.png\")" 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": null, 369 | "id": "951fcef6-6e42-443e-b957-2bd46c8ae9d6", 370 | "metadata": {}, 371 | "outputs": [], 372 | "source": [ 373 | "Image(\"g_01_vs.png\")" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": null, 379 | "id": "2a2f0b94-ac80-4192-ac72-37cbdd0f1027", 380 | "metadata": {}, 381 | "outputs": [], 382 | "source": [ 383 | "Image(\"m_01_vs.png\")" 384 | ] 385 | }, 386 | { 387 | "cell_type": "markdown", 388 | "id": "49d08213-97fa-45a7-9fab-75864b4db90c", 389 | "metadata": {}, 390 | "source": [ 391 | "### Waveform Figures\n", 392 | "\n", 393 | "We can also look at waveform figures generated by `Pyatoa`, a misfit quantification package used by SeisFlows to determine data-synthetic misfit. These can be found be navigating to\n", 394 | "\n", 395 | "`home -> work -> example_2a -> output -> pyaflowa -> figures`\n", 396 | "\n", 397 | "There is one .pdf file in there, corresponding to waveforms generated during the first iteration (i01) and first step count (s01). If you open the PDF you'll see waveform comparison figures showing time shifts between data and synthetics.\n", 398 | "\n", 399 | "\n", 400 | "### Re-running Examples\n", 401 | "\n", 402 | "If you want to **re-run this example** to generate a new event kernel, you will have to remove the SeisFlows state file. \n", 403 | "This bypasses SeisFlows' checkpointing feature, which will not allow you to run another workflow within an existing workflow directory." 404 | ] 405 | }, 406 | { 407 | "cell_type": "code", 408 | "execution_count": null, 409 | "id": "6e9b451b", 410 | "metadata": {}, 411 | "outputs": [], 412 | "source": [ 413 | "! rm /home/scoped/work/example_2a/sfstate.txt\n", 414 | "! seisflows examples run 2 -r /home/scoped/specfem2d --with_mpi --niter 1 --event_id 5" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "id": "01e238be", 420 | "metadata": { 421 | "tags": [] 422 | }, 423 | "source": [ 424 | "## 5) Example 3: En-masse Forward Simulations\n", 425 | "\n", 426 | "- This Example runs forward simulations for 250 source-receiver pairs (10 events, 25 stations). \n", 427 | "- See [SeisFlows Example \\#3 docs page](https://seisflows.readthedocs.io/en/devel/specfem2d_example.html#example-3-en-masse-forward-simulations) for guidance on what is going on. \n", 428 | "- **Benchmark time for cell: ~4m**" 429 | ] 430 | }, 431 | { 432 | "cell_type": "code", 433 | "execution_count": null, 434 | "id": "c72ed098", 435 | "metadata": {}, 436 | "outputs": [], 437 | "source": [ 438 | "# Run Example 3\n", 439 | "%cd /home/scoped/work/example_3\n", 440 | "! seisflows examples run 3 -r /home/scoped/specfem2d --with_mpi --nproc 4" 441 | ] 442 | }, 443 | { 444 | "cell_type": "code", 445 | "execution_count": null, 446 | "id": "673351b9", 447 | "metadata": {}, 448 | "outputs": [], 449 | "source": [ 450 | "# Plots one synthetic waveform exported by the Solver\n", 451 | "! seisflows plotst output/solver/001/syn/AA.S000000.BXY.semd --savefig AA.S000000.BXY.semd.png\n", 452 | "\n", 453 | "# Plots multiple synthetic waveforms exported by the Solver\n", 454 | "! seisflows plotst output/solver/001/syn/AA.S00000?.BXY.semd --savefig AA.S00000n.BXY.semd.png" 455 | ] 456 | }, 457 | { 458 | "cell_type": "markdown", 459 | "id": "0b4a4282", 460 | "metadata": {}, 461 | "source": [ 462 | "Using the navigation menu on the left, navigate to:\n", 463 | "\n", 464 | "`home -> work -> example_3`\n", 465 | "\n", 466 | "and **open** the .png files that were created to look at the waveforms. The waveforms should be relatively simple since this is a homogeneous halfspace model." 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": null, 472 | "id": "8f1054a2-7f80-42f4-9f37-3fb6f142c7bc", 473 | "metadata": {}, 474 | "outputs": [], 475 | "source": [ 476 | "Image(\"AA.S000000.BXY.semd.png\")" 477 | ] 478 | }, 479 | { 480 | "cell_type": "code", 481 | "execution_count": null, 482 | "id": "94bd79f5-0558-43ff-9004-e7ed4cacbaa8", 483 | "metadata": {}, 484 | "outputs": [], 485 | "source": [ 486 | "Image(\"AA.S00000n.BXY.semd.png\")" 487 | ] 488 | }, 489 | { 490 | "cell_type": "markdown", 491 | "id": "dd0c981f", 492 | "metadata": {}, 493 | "source": [ 494 | "## 6) Testing SPECFEM3D\n", 495 | "\n", 496 | "- We want to make sure we can run SPECFEM3D natively by running the homogeneous halfspace example problem using MPI.\n", 497 | "- The cell first sets up the example directory, and then runs a 3D example problem with 4 processors. \n", 498 | "- We can not easily visualize SPECFEM3D results, so we plot synthetic waveforms as a check.\n", 499 | "- **Benchmark time for cell: ~9m**" 500 | ] 501 | }, 502 | { 503 | "cell_type": "code", 504 | "execution_count": null, 505 | "id": "620e2692", 506 | "metadata": {}, 507 | "outputs": [], 508 | "source": [ 509 | "%cd /home/scoped/specfem3d/EXAMPLES/homogeneous_halfspace\n", 510 | "\n", 511 | "# Setup example problem\n", 512 | "! ln -s /home/scoped/specfem3d/bin .\n", 513 | "! cp -r ./meshfem3D_files ./DATA/meshfem3D_files\n", 514 | "! mkdir -p OUTPUT_FILES/DATABASES_MPI\n", 515 | "\n", 516 | "# Run SPECFEM3D exectuables\n", 517 | "! mpirun -n 4 bin/xmeshfem3D\n", 518 | "! mpirun -n 4 bin/xgenerate_databases\n", 519 | "! mpirun -n 4 bin/xspecfem3D" 520 | ] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": null, 525 | "id": "40c560ea", 526 | "metadata": {}, 527 | "outputs": [], 528 | "source": [ 529 | "# Plots synthetic waveforms as a check\n", 530 | "! seisflows plotst OUTPUT_FILES/*semd --savefig specfem3d_synthetics.png" 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "id": "2fc42d5b", 536 | "metadata": {}, 537 | "source": [ 538 | "Using the navigation menu on the left, navigate to:\n", 539 | "\n", 540 | "`home -> specfem3d -> EXAMPLES -> homogeneous_halfspace`\n", 541 | "\n", 542 | "and **open** the `specfem3d_synthetics.png` file to look at the waveforms. We can see the directivity of the moment tensor in the variation of amplitude of the given waveforms." 543 | ] 544 | }, 545 | { 546 | "cell_type": "code", 547 | "execution_count": null, 548 | "id": "52cfc39d-8537-4ed0-90ec-9ae9096fb814", 549 | "metadata": {}, 550 | "outputs": [], 551 | "source": [ 552 | "Image(\"specfem3d_synthetics.png\")" 553 | ] 554 | }, 555 | { 556 | "cell_type": "markdown", 557 | "id": "5a07e6bc-08a1-4b6f-a01a-f65d5b42813f", 558 | "metadata": {}, 559 | "source": [ 560 | "```python\n", 561 | "All done. Thanks :)\n", 562 | "```" 563 | ] 564 | } 565 | ], 566 | "metadata": { 567 | "kernelspec": { 568 | "display_name": "Python 3 (ipykernel)", 569 | "language": "python", 570 | "name": "python3" 571 | }, 572 | "language_info": { 573 | "codemirror_mode": { 574 | "name": "ipython", 575 | "version": 3 576 | }, 577 | "file_extension": ".py", 578 | "mimetype": "text/x-python", 579 | "name": "python", 580 | "nbconvert_exporter": "python", 581 | "pygments_lexer": "ipython3", 582 | "version": "3.10.6" 583 | } 584 | }, 585 | "nbformat": 4, 586 | "nbformat_minor": 5 587 | } 588 | -------------------------------------------------------------------------------- /workshops/2024-05-21_scoped_uw/4_intro_specfem3d.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "347bc0f7-6dbe-454c-a95a-8469d8efe1c2", 6 | "metadata": {}, 7 | "source": [ 8 | "# 2024 SCOPED Workshop — Wavefield Simulations Using SPECFEM\n", 9 | "## Notebook 3: Intro to SPECFEM3D\n", 10 | "\n", 11 | "- SPECFEM3D_Cartesian follows very similar practices as SPECFEM2D. \n", 12 | "- In this short introduction, we will explore the SPECFEM3D repository, and then run a small example homogeneous halfspace example. \n", 13 | "- These instructions should be run from inside the Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)).\n", 14 | "\n", 15 | "-----------\n", 16 | "\n", 17 | "**Relevant Links:** \n", 18 | "- Today's Notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2024-5-21_scoped_uw/4_intro_specfem3d.ipynb \n", 19 | "- SPECFEM3D_Cartesian User Manual: https://github.com/geodynamics/specfem3d/blob/devel/doc/USER_MANUAL/manual_SPECFEM3D_Cartesian.pdf\n", 20 | "- SPECFEM3D_Cartesian GitHub Repository: https://github.com/geodynamics/specfem3d/tree/devel \n", 21 | "\n", 22 | "**Jupyter Quick Tips:**\n", 23 | "\n", 24 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 25 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 26 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 27 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 28 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 29 | "- Commands that start with `%` are Jupyter Magic commands.\n", 30 | "- To time a task, put a `%time` before the command (e.g., `%time ! ls`)" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "id": "b5326085-4028-4446-9c9b-2c3438a2c222", 36 | "metadata": { 37 | "tags": [] 38 | }, 39 | "source": [ 40 | "------------\n", 41 | "## 1) Package Exploration\n", 42 | "\n", 43 | "Let's get started by looking at the SPECFEM3D repository." 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "3e541fc3-732e-4e4c-9da9-90a227cdb857", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# Python packages we will need for this notebook\n", 54 | "import matplotlib.pyplot as plt\n", 55 | "import numpy as np\n", 56 | "from IPython.display import Image" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "id": "2bf4298c-bfdd-428d-adbe-6423749c8880", 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "# Go to the SPECFEM3D directory\n", 67 | "%cd /home/scoped/specfem3d\n", 68 | "\n", 69 | "# Exploring the SPECFEM3D directory\n", 70 | "! ls" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "id": "e4331a55-bdf0-466c-bc28-37f824c853d7", 76 | "metadata": {}, 77 | "source": [ 78 | "Among the many folders (and files) listed above, the most notable ones are -\n", 79 | "- `bin/`\n", 80 | "- `DATA/`\n", 81 | "- `OUTPUT_FILES/`\n", 82 | "- `EXAMPLES/`\n", 83 | "- `src/`\n", 84 | "\n", 85 | "Let's look at these folders one by one.\n", 86 | "\n", 87 | "### 1a) ' bin/ ' directory" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "id": "1cf19452-e3c8-4f42-b2c8-660f22675df4", 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "! ls bin" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "id": "881d5a1f-0f3d-4db5-8f76-1ab9421f9457", 103 | "metadata": {}, 104 | "source": [ 105 | "- The `bin/` folder contains binary executable files which are essentially linked compiled fortran code generated after compiling various programs available with this package \n", 106 | "- The most essential executables for a basic simulation are: \n", 107 | " - `xmeshfem3D`: also called SPECFEM3D's internal mesher, this program discretizes the simulation domain into small elements \n", 108 | " - `xgenerate_databases`: this database generation program assigns model parameter values to the elements \n", 109 | " - `xspecfem3D`: also called the solver, this program uses the mesh and the corresponding model parameter values assigned to the mesh elements, for numerically solving the wave equation \n", 110 | " \n", 111 | "### 1b) ' DATA/ ' directory\n" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "id": "f8c72c78-9042-4425-a297-a7497963e23f", 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "! ls DATA" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "id": "b018aafb-7650-4655-9b93-bf795e64d5e3", 127 | "metadata": {}, 128 | "source": [ 129 | "The `DATA/` folder is the input files folder and contains files and folders which describe the -\n", 130 | "- mesh: (`meshfem3D_files/`)\n", 131 | "- model: (`meshfem3D_files/`, `tomo_files/`)\n", 132 | "- source: (`CMTSOLUTION`, `FORCESOLUTION`)\n", 133 | "- station: (`STATIONS`)\n", 134 | "\n", 135 | "### 1c) ' OUTPUT_FILES/ ' directory\n" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "id": "89522de3-ce38-4285-b4b4-2e715122da6a", 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "! ls OUTPUT_FILES" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "id": "1afb1742-f509-4c38-84b8-35cb43627d61", 151 | "metadata": {}, 152 | "source": [ 153 | "- This `OUTPUT_FILES/` folder contains the output files of any SPECFEM3D job. \n", 154 | "- The `DATABASES_MPI/` folder in the `OUTPUT_FILES/` folder contains the database files generated as a result of a meshing or database generation job. The database files can be very large in size. \n", 155 | "- Other job files like log files for the various programs as well as the output seismograms are directly generated in the `OUTPUT_FILES/` folder.\n", 156 | "\n", 157 | "### 1d) ' src/ ' directory" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "id": "b5bb6206-a0bd-4c50-a7c1-b3057627793a", 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "! ls src" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "id": "fe6dcda2-3e3f-493a-8b9b-9748e0fcf37c", 173 | "metadata": {}, 174 | "source": [ 175 | "- The `src/` folder contains the source code for the various programs in SPECFEM3D. \n", 176 | "- If you want to add to or modify some of SPECFEM3D's features, you need to modify one or more of the files in the 'src/' subfolders. \n", 177 | "- If you think the features you added would be useful to the broader SPECFEM3D community, please consider making a pull request to the SPECFEM3D github [repository](https://github.com/geodynamics/specfem3d/tree/devel) so that it can be reviewed and integrated to the package. \n", 178 | "\n", 179 | "### 1e) ' EXAMPLES/ ' directory" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "id": "fe2f60b2-7036-4f90-b6c7-9194bf0484d8", 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "! ls EXAMPLES" 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "id": "250ae152-71ee-4cec-87ea-abaf771132bb", 195 | "metadata": {}, 196 | "source": [ 197 | "- The `EXAMPLES/` folder contains a variety of examples to provide a quick start to the user on how to use SPECFEM3D and its various features. \n", 198 | "- The README files within the example folders within `EXAMPLES/` guide the user through the steps to run each example. \n", 199 | "- Some of the highly recommended examples to start with, depending on your need to run SPECFEM3D, are:\n", 200 | " - `homogeneous_halfspace/` - a simple homogeneous halfspace model based simulation\n", 201 | " - `meshfem3D_examples/socal1D/` - a layered model based simulation\n", 202 | " - `sensitivity_kernels_liutromp2006/` - adjoint simulations to generate the kernels in [Liu&Tromp 2006](https://pubs.geoscienceworld.org/ssa/bssa/article/96/6/2383/146674/Finite-Frequency-Kernels-Based-on-Adjoint-Methods)\n", 203 | "\n", 204 | ">__NOTE:__ README files, with steps to run these examples, should not undermine the detailing provided in the [manual](https://github.com/geodynamics/specfem3d/blob/master/doc/USER_MANUAL) on how to run SPECFEM3D.\n", 205 | "\n" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "id": "f8f4422f-78e7-4624-82be-ebc3f3f9d59b", 211 | "metadata": {}, 212 | "source": [ 213 | "- Here we will start with the `homogeneous halfspace/` example, and then move on to running simulations with layered models. \n", 214 | "- We will also discuss some strategies to design meshes using SPECFEM3D's internal mesher. " 215 | ] 216 | }, 217 | { 218 | "cell_type": "markdown", 219 | "id": "96b11bb2-b7a4-4f1c-8deb-54af4fcea387", 220 | "metadata": {}, 221 | "source": [ 222 | "-----------" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "id": "3b47774a-5cba-4e9c-b859-ba5a3e1caffa", 228 | "metadata": { 229 | "tags": [] 230 | }, 231 | "source": [ 232 | "## 2) Setting Up Simulations\n", 233 | "\n", 234 | "- It is often desirable to run SPECFEM outside of the cloned SPECFEM repository, in order to keep files and outputs manageable. \n", 235 | "- SPECFEM3D only requires the following 3 directories for a successful run -\n", 236 | " - `bin/` (with compiled executables)\n", 237 | " - `DATA/` (with the necessary input files)\n", 238 | " - `OUTPUT_FILES/`\n", 239 | "\n", 240 | "In this section we will set up a separate SPECFEM3D working directory to work with.\n", 241 | "\n", 242 | ">__NOTE:__ The following cells assume that we are in the directory `/home/scoped/work/day_1/specfem3d_workdir`, so we must evaluate the '%cd' command (if needed) to ensure that cells work as expected." 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "id": "d00e2ce8-2613-45e8-a115-64915c95ed45", 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "# Create separate working directory for SPECFEM3D\n", 253 | "! mkdir -p /home/scoped/work/day_1/specfem3d_workdir\n", 254 | "\n", 255 | "# Go to the SPECFEM3D working directory\n", 256 | "%cd /home/scoped/work/day_1/specfem3d_workdir\n", 257 | "\n", 258 | "# Symlink the binary files, and copy the relevant DATA/ directory\n", 259 | "! ln -s /home/scoped/specfem3d/bin .\n", 260 | "! mkdir -p OUTPUT_FILES/DATABASES_MPI\n", 261 | "\n", 262 | "# Look at the work directory\n", 263 | "! ls" 264 | ] 265 | }, 266 | { 267 | "cell_type": "markdown", 268 | "id": "143a9beb-d574-456c-b9e9-b805347caa3f", 269 | "metadata": {}, 270 | "source": [ 271 | "- The work directory now has the `bin/` and the `OUTPUT_FILES/` folders.\n", 272 | "- The `DATA/` folder will be added in the subsequent sections depending on the example we want to run." 273 | ] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "id": "4f4b47bb-0b30-4cc0-ad31-57afd3213a45", 278 | "metadata": {}, 279 | "source": [ 280 | "-----------" 281 | ] 282 | }, 283 | { 284 | "cell_type": "markdown", 285 | "id": "8049c01a-3857-40ae-a7cc-357288b8621c", 286 | "metadata": {}, 287 | "source": [ 288 | "## 3) Homogeneous Halfspace Simulations\n", 289 | "\n", 290 | "A SPECFEM3D simulation primarily involves three steps -\n", 291 | "- meshing\n", 292 | "- database generation\n", 293 | "- solving the wave equation\n", 294 | "\n", 295 | "We will go through these steps in the following example." 296 | ] 297 | }, 298 | { 299 | "cell_type": "markdown", 300 | "id": "0098f1e7-43ab-4dea-8e43-f058bbe031a7", 301 | "metadata": {}, 302 | "source": [ 303 | "This example creates a homogeneous halfspace, i.e. a single volume block with a\n", 304 | "constant elastic material property, using SPECFEM3D's internal mesher, and runs a\n", 305 | "forward simulation." 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "id": "4be55063-bd72-49b6-8d4f-fc2be913286e", 312 | "metadata": {}, 313 | "outputs": [], 314 | "source": [ 315 | "# Copy necessary input files for the homogeneous halfspace example\n", 316 | "! cp -r /home/scoped/specfem3d/EXAMPLES/homogeneous_halfspace/DATA .\n", 317 | "! cp -r /home/scoped/specfem3d/EXAMPLES/homogeneous_halfspace/meshfem3D_files DATA/.\n", 318 | "\n", 319 | "# Look at the DATA directory\n", 320 | "! ls DATA" 321 | ] 322 | }, 323 | { 324 | "cell_type": "markdown", 325 | "id": "3e990f07-68c0-4869-a102-481a229d55d4", 326 | "metadata": {}, 327 | "source": [ 328 | "### Step 1) Meshing\n", 329 | "\n", 330 | "Here we generate the numerical mesh, or the discretization of our domain into finite elements" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": null, 336 | "id": "ddb2179b-6c6d-410d-bb4f-993512f140cd", 337 | "metadata": {}, 338 | "outputs": [], 339 | "source": [ 340 | "# Explore the mesh files\n", 341 | "! ls DATA/meshfem3D_files" 342 | ] 343 | }, 344 | { 345 | "cell_type": "code", 346 | "execution_count": null, 347 | "id": "fe4420a6-35dd-4297-8fe6-ba479173ddac", 348 | "metadata": {}, 349 | "outputs": [], 350 | "source": [ 351 | "# Check the Mesh_Par_file\n", 352 | "! cat DATA/meshfem3D_files/Mesh_Par_file" 353 | ] 354 | }, 355 | { 356 | "cell_type": "code", 357 | "execution_count": null, 358 | "id": "c0f8e1ca-5b13-4278-8cd3-fc7519551533", 359 | "metadata": {}, 360 | "outputs": [], 361 | "source": [ 362 | "# Run mesher (xmeshfem3D)\n", 363 | "! mpiexec -np 4 ./bin/xmeshfem3D\n", 364 | "\n", 365 | "# Look at the generated mesh files\n", 366 | "! ls OUTPUT_FILES/DATABASES_MPI" 367 | ] 368 | }, 369 | { 370 | "cell_type": "markdown", 371 | "id": "5b4b34d4-63ea-41de-b145-1841bfeabf55", 372 | "metadata": {}, 373 | "source": [ 374 | "### Step 2) Database Generation: Model assignment to mesh\n", 375 | "\n", 376 | "We generate database files which stores the GLL model to be used for simulations." 377 | ] 378 | }, 379 | { 380 | "cell_type": "code", 381 | "execution_count": null, 382 | "id": "6eb71586-61bd-4cba-b7e3-7eacfea9cc82", 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "# Run database generator (xgenerate_databases)\n", 387 | "! mpiexec -np 4 ./bin/xgenerate_databases\n", 388 | "\n", 389 | "# Explore the generated database files\n", 390 | "! ls OUTPUT_FILES/DATABASES_MPI" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": null, 396 | "id": "d1257259-6537-4e5e-b015-82d470e3d726", 397 | "metadata": {}, 398 | "outputs": [], 399 | "source": [ 400 | "# View the meshing and the Vp model\n", 401 | "# The following image was generated using Paraview\n", 402 | "! cp /home/scoped/adjdocs/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/mesh/example_A.png .\n", 403 | "Image(\"example_A.png\")" 404 | ] 405 | }, 406 | { 407 | "cell_type": "markdown", 408 | "id": "8580bcb8-ff37-4c3e-862b-7fed57e80721", 409 | "metadata": {}, 410 | "source": [ 411 | "**Fig.** The homogeneous halfspace velocity (Vp) model and mesh. The model spans 134 km in the 'X' and 'Y' directions, and 60 km in the 'Z' direction. The mesh has 36 elements in the 'X' and 'Y' directions, and has 16 elements in the 'Z' direction. The halfspace has a Vp value of 2.8 km/s." 412 | ] 413 | }, 414 | { 415 | "cell_type": "markdown", 416 | "id": "1a72ccd8-8dd0-4193-9ec3-24b52dcbd14f", 417 | "metadata": {}, 418 | "source": [ 419 | "#### Step 3) Simulation: Solving the wave equation\n", 420 | "\n", 421 | "Finally, we run the numerical solver to simulation seismic wave propagation within our mesh and model" 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": null, 427 | "id": "01919eeb-04b1-4f45-a0c4-c2b3a81148e0", 428 | "metadata": {}, 429 | "outputs": [], 430 | "source": [ 431 | "# Check the source definition\n", 432 | "! cat DATA/CMTSOLUTION" 433 | ] 434 | }, 435 | { 436 | "cell_type": "markdown", 437 | "id": "f59a185a-4c13-40ed-a280-f55095ce3290", 438 | "metadata": {}, 439 | "source": [ 440 | "Read more about the CMTSOLUTION files [here](https://www.sciencedirect.com/science/article/pii/S0031920112000696?via%3Dihub)." 441 | ] 442 | }, 443 | { 444 | "cell_type": "code", 445 | "execution_count": null, 446 | "id": "411d7c9e-dce4-41e1-8dc2-14a7c57a10a0", 447 | "metadata": {}, 448 | "outputs": [], 449 | "source": [ 450 | "# Check the stations file\n", 451 | "! cat DATA/STATIONS" 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": null, 457 | "id": "8913c3d1-43dd-4571-8829-9825eb1430d8", 458 | "metadata": {}, 459 | "outputs": [], 460 | "source": [ 461 | "# View the meshing and the Vp model (figure pre-generated using ParaView)\n", 462 | "! cp /home/scoped/adjdocs/workshops/2022-10-05_specfem_users/additional_material/day_1c/figures/source_station_geometry/sr.png .\n", 463 | "Image(\"sr.png\")" 464 | ] 465 | }, 466 | { 467 | "cell_type": "markdown", 468 | "id": "f131bf10-e1a8-4f58-8782-998f7279d852", 469 | "metadata": {}, 470 | "source": [ 471 | "**Fig.** The source station geometry, in the homogeneous halfspace domain with the 'Y' dimension cut in half. The source and stations are placed on the plane of the slice. The source is at the center of the grid, while the stations are equally spaced on a section along the surface." 472 | ] 473 | }, 474 | { 475 | "cell_type": "code", 476 | "execution_count": null, 477 | "id": "07e0b96c-470b-4f37-a005-20b369afed36", 478 | "metadata": {}, 479 | "outputs": [], 480 | "source": [ 481 | "# Check the Par_file\n", 482 | "! head -30 DATA/Par_file" 483 | ] 484 | }, 485 | { 486 | "cell_type": "code", 487 | "execution_count": null, 488 | "id": "06090c90-d08c-45a1-8cb4-32c7b2963514", 489 | "metadata": {}, 490 | "outputs": [], 491 | "source": [ 492 | "! head -80 DATA/Par_file | tail -n 22" 493 | ] 494 | }, 495 | { 496 | "cell_type": "code", 497 | "execution_count": null, 498 | "id": "11ef3e3c-5193-422b-bb76-2fd46896d434", 499 | "metadata": {}, 500 | "outputs": [], 501 | "source": [ 502 | "# Run the solver (xspecfem3D)\n", 503 | "%time ! mpiexec -np 4 ./bin/xspecfem3D" 504 | ] 505 | }, 506 | { 507 | "cell_type": "markdown", 508 | "id": "c5c5fb5e-3580-4b0f-b37c-b880506afded", 509 | "metadata": {}, 510 | "source": [ 511 | ">__NOTE:__ solver runs should take ~5 minutes or less on a recent (as of 2022) laptop.\n", 512 | "\n", 513 | "Note the wall time for the simulation." 514 | ] 515 | }, 516 | { 517 | "cell_type": "code", 518 | "execution_count": null, 519 | "id": "39ad16d7-67bd-4e42-a606-0a07a2e0868d", 520 | "metadata": {}, 521 | "outputs": [], 522 | "source": [ 523 | "# Look at the simulation output files\n", 524 | "! ls OUTPUT_FILES/" 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": null, 530 | "id": "7dfa0b35-0a7b-47cf-a908-69239701fc29", 531 | "metadata": {}, 532 | "outputs": [], 533 | "source": [ 534 | "# Explore the seismograms\n", 535 | "! head -25 OUTPUT_FILES/DB.X50.BXZ.semd " 536 | ] 537 | }, 538 | { 539 | "cell_type": "code", 540 | "execution_count": null, 541 | "id": "20a6903b-6beb-4984-aefe-213a38acca6e", 542 | "metadata": {}, 543 | "outputs": [], 544 | "source": [ 545 | "# Plot the seismograms\n", 546 | "X20_Z = np.genfromtxt(\"OUTPUT_FILES/DB.X20.BXZ.semd\", dtype=None, names=(\"time\",\"BXZ\"))\n", 547 | "X30_Z = np.genfromtxt(\"OUTPUT_FILES/DB.X30.BXZ.semd\", dtype=None, names=(\"time\",\"BXZ\"))\n", 548 | "X40_Z = np.genfromtxt(\"OUTPUT_FILES/DB.X40.BXZ.semd\", dtype=None, names=(\"time\",\"BXZ\"))\n", 549 | "X50_Z = np.genfromtxt(\"OUTPUT_FILES/DB.X50.BXZ.semd\", dtype=None, names=(\"time\",\"BXZ\"))\n", 550 | "\n", 551 | "t = X20_Z[\"time\"]\n", 552 | "\n", 553 | "plt.title(\"Seismograms (Z - component)\")\n", 554 | "plt.xlabel(\"---- time -->\")\n", 555 | "plt.ylabel(\"---- displacement -->\")\n", 556 | "\n", 557 | "plt.plot(t,X20_Z[\"BXZ\"],label=\"X20\")\n", 558 | "plt.plot(t,X30_Z[\"BXZ\"],label=\"X30\")\n", 559 | "plt.plot(t,X40_Z[\"BXZ\"],label=\"X40\")\n", 560 | "plt.plot(t,X50_Z[\"BXZ\"],label=\"X50\")\n", 561 | "\n", 562 | "plt.legend(title=\"Station\")\n", 563 | "plt.savefig(\"seis.png\")" 564 | ] 565 | }, 566 | { 567 | "cell_type": "code", 568 | "execution_count": null, 569 | "id": "fc2fb761-2a0e-49ba-836b-fa482b7547ac", 570 | "metadata": {}, 571 | "outputs": [], 572 | "source": [ 573 | "# We can also use the RecordSection tool to plot our synthetics\n", 574 | "! recsec --syn_path OUTPUT_FILES --source DATA/CMTSOLUTION --stations DATA/STATIONS --components XYZ --scale_by normalize \n", 575 | "Image(\"record_section.png\")" 576 | ] 577 | }, 578 | { 579 | "cell_type": "code", 580 | "execution_count": null, 581 | "id": "8d676b79-52ce-4005-8fd3-3ca68e0df438", 582 | "metadata": {}, 583 | "outputs": [], 584 | "source": [ 585 | "# Let's remove the large sized database files to reduce storage requirements\n", 586 | "! rm -rf /home/scoped/work/day_1/specfem3d_workdir/OUTPUT_FILES/DATABASES_MPI" 587 | ] 588 | }, 589 | { 590 | "cell_type": "markdown", 591 | "id": "a58c718b-4a8d-4e9c-ba7c-ab9ffb0f1625", 592 | "metadata": {}, 593 | "source": [ 594 | "**Congratulations! You have reached your destination.**" 595 | ] 596 | } 597 | ], 598 | "metadata": { 599 | "kernelspec": { 600 | "display_name": "Python 3 (ipykernel)", 601 | "language": "python", 602 | "name": "python3" 603 | }, 604 | "language_info": { 605 | "codemirror_mode": { 606 | "name": "ipython", 607 | "version": 3 608 | }, 609 | "file_extension": ".py", 610 | "mimetype": "text/x-python", 611 | "name": "python", 612 | "nbconvert_exporter": "python", 613 | "pygments_lexer": "ipython3", 614 | "version": "3.12.3" 615 | } 616 | }, 617 | "nbformat": 4, 618 | "nbformat_minor": 5 619 | } 620 | -------------------------------------------------------------------------------- /workshops/2022-10-05_specfem_users/day_2b_kernels_exercise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "347bc0f7-6dbe-454c-a95a-8469d8efe1c2", 6 | "metadata": {}, 7 | "source": [ 8 | "# SPECFEM Users Workshop -- Day 2 (Oct. 6, 2022)\n", 9 | "## Day 2b: Kernel Exercise\n", 10 | "\n", 11 | "\n", 12 | "- In this notebook we will have participants run their own adjoint simulation using Day 2a as a guide \n", 13 | "- We will use two homogeneous halfspace models for simplicity, building on the exercise from Day 1b \n", 14 | "- Adjoint simulations are key for performing seismic imaging (Day 3) as their results guide iterative model updates during the inverse problem \n", 15 | "- These instructions should be run from inside a Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)). \n", 16 | "-----------\n", 17 | "\n", 18 | "**Relevant Links:** \n", 19 | "- Today's Notebook: https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/day_2b_kernels_exercise.ipynb\n", 20 | "- Completed Notebook https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_2b_kernels_exercise.ipynb\n", 21 | "- Day 0 Notebook (Container Testing): https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_0_container_testing.ipynb\n", 22 | "- Day 1A Notebook (Intro SPECFEM): https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_1a_intro_specfem2d.ipynb\n", 23 | "- Day 1B Notebook (Fwd. Simulations): https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_1b_forward_simulations.ipynb\n", 24 | "- Day 2A Notebook (Adj. Simulations): https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_2a_kernels.ipynb\n", 25 | "\n", 26 | "**Jupyter Quick Tips:**\n", 27 | "\n", 28 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 29 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 30 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 31 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 32 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 33 | "- Commands that start with `%` are Jupyter Magic commands." 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "c0acd55c-e9b3-4448-a327-ea23b797d6a5", 39 | "metadata": { 40 | "tags": [] 41 | }, 42 | "source": [ 43 | "-----------\n", 44 | "## 0) Setting Up a SPECFEM2D Working Directory\n", 45 | "\n", 46 | "- Let's set up a clean working directory to run SPECFEM2D \n", 47 | "- We will be doing all our work in the directory `/home/scoped/work/day_2/exercise`, all the following cells assume that we are in this directory " 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "797fb508-998d-470a-8142-7e3e61a063c4", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "# Python packages we might use in this notebook\n", 58 | "import numpy as np\n", 59 | "import matplotlib.pyplot as plt\n", 60 | "from IPython.display import Image" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "id": "a7c386f6-0c75-4d44-b532-a1455eefd1cc", 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "# Make correct dir. and move there\n", 71 | "! mkdir -p /home/scoped/work/day_2/exercise\n", 72 | "%cd /home/scoped/work/day_2/exercise\n", 73 | "\n", 74 | "# Symlink the executables and copy the relevant DATA/ directory\n", 75 | "! ln -s /home/scoped/specfem2d/bin .\n", 76 | "! cp -r /home/scoped/specfem2d/EXAMPLES/Tape2007/DATA .\n", 77 | "! cp -f DATA/Par_file_Tape2007_onerec DATA/Par_file\n", 78 | "\n", 79 | "# Ensure that SPECFEM outputs required files for adjoint simulations\n", 80 | "! seisflows sempar -P DATA/Par_file save_model binary\n", 81 | "! seisflows sempar -P DATA/Par_file setup_with_binary_database 1\n", 82 | "\n", 83 | "! mkdir OUTPUT_FILES\n", 84 | "\n", 85 | "! ls" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "id": "e6404755-a37c-40bf-aab5-377f46f57e9d", 91 | "metadata": {}, 92 | "source": [ 93 | "------------\n", 94 | "## 1) Target Model Forward Simulations\n", 95 | "\n", 96 | "- We'll use the **homogeneous halfspace** defined by default in the example as our target model\n", 97 | "- Remember that the model spans X = [0, 480000]m and Z = [0, 480000]m \n", 98 | "- Also remember that there are 40 elements in X and Z, corresponding to element sizes of 12000m \n", 99 | "\n", 100 | "### 1a) STATIONS\n", 101 | "- Using what you learned in Day 1, generate your own STATIONS file with an interesting configuration\n", 102 | "- Look at *'STATIONS_checker'* to get a refresher of how the STATIONS file should be configured \n", 103 | "- *Remember* that eventually SPECFEM will be looking for a file called *'DATA/STATIONS'*\n", 104 | "\n", 105 | "#### Choose your adventure: \n", 106 | "- **Easier approach**: Use the first 25 stations in the *'STATIONS_checker'* file \n", 107 | "- **Moderate approach**: Generate a horizontal or vertical line of 25 stations across your domain \n", 108 | "- **Advanced approach**: Use up to 100 stations to design an array configuration of your choice \n", 109 | " - Potential configurations you could choose that might mimic a real world seismic array: \n", 110 | " a) Cross shaped linear array \n", 111 | " b) Uniform, dense gridded array \n", 112 | " c) Spiral \n", 113 | " d) Concentric rings \n", 114 | " e) Dense linear array mimicing a DAS sensor " 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "id": "11028ffe-ad9f-4dea-8d08-38906d900f85", 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "id": "fba99890-1233-463b-a579-693f48b0644c", 128 | "metadata": {}, 129 | "source": [ 130 | "### 1b) SOURCE\n", 131 | "- Let's create a SOURCE for our simulation\n", 132 | "- *Remember* that SPECFEM2D is expecting a file called *'DATA/SOURCE'* \n", 133 | "\n", 134 | "\n", 135 | "#### Choose your adventure: \n", 136 | "- **Easier approach**: Use **one** of the available 25 *'SOURCE_???'* files located in the 'DATA' directory \n", 137 | "- **Moderate approach**: Use one of the available *'SOURCE_???'* files as a template, but change it's location to the center of the domain \n", 138 | "- **Advanced approach**: Can you think of an interesting moment tensor configuration for your source? Explosions are always fun! Can you place the SOURCE somewhere it would be interesting based on your station configuration? e.g., \n", 139 | " - at the center of a spiral or concentric rings \n", 140 | " - normal to a linear array or grid \n", 141 | " - at one end of a linear array" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "id": "ffd0a5cc-d0a5-405c-83a4-e4a3aeb3c195", 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [] 151 | }, 152 | { 153 | "cell_type": "markdown", 154 | "id": "19d32129-3855-4ece-b649-1690b3661c74", 155 | "metadata": {}, 156 | "source": [ 157 | "### 1c) Run the Mesher and Solver \n", 158 | "\n", 159 | "1) *Remember* to tell SPECFEM to use your STATIONS file, and not it's internal definition of stations (see e.g., Day 2a, Section 2b)\n", 160 | "2) Run your simulation in **parallel** using 4 cores \n", 161 | " - *Remember* that you need to tell both SPECFEM and MPI that you are planning to run 4 processes \n", 162 | "\n", 163 | "The remainder of the `Par_file` should already be set up appropriately" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "id": "6f33370d-7095-4072-9937-74175e411fcd", 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [] 173 | }, 174 | { 175 | "cell_type": "markdown", 176 | "id": "b6732e2f-3474-4439-b133-d568d023b977", 177 | "metadata": {}, 178 | "source": [ 179 | "### 1d) Save Your Results\n", 180 | "- Make sure you **save the seismograms** output by SPECFEM somewhere safe \n", 181 | "- *Remember* that subsequent simulations will **overwrite** files in the DATA/ and OUTPUT_FILES/ directory \n", 182 | "- Remember that displacement seismograms are stored in the *OUTPUT_FILES/* directory with the extension '*.semd*'" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": null, 188 | "id": "4859243d-3be1-40fc-afa6-fd11debac2c2", 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "id": "211041b7-0881-4958-adcb-9cf31f7d590e", 196 | "metadata": {}, 197 | "source": [ 198 | "------------\n", 199 | "## 2) Initial Model Forward Simulations\n", 200 | "\n", 201 | "- Let's edit the current model definition to create a separate initial or 'starting' model\n", 202 | "- The starting model will also be a homogeneous halfspace, but with slightly different velocities \n", 203 | "- We'll use what we learned in the [Day 1B exercise](https://github.com/adjtomo/adjdocs/blob/main/workshops/2022-10-05_specfem_users/completed_notebooks/day_1b_forward_simulations.ipynb) to change the model parameters \n", 204 | "\n", 205 | "### 2a) Edit Velocity Model\n", 206 | "- *Remember* that the velocity model is defined in the `Par_file`\n", 207 | "- Let's **decrease** the velocity values (Vp and Vs) of the starting model by 20\\% \n", 208 | "- In other words $V_p \\rightarrow V_p - V_p \\times 0.2$\n", 209 | "- *Remember* from Day 1B that we can use SeisFlows to view and change the velocity model parameters, you can also use a text editor " 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "id": "21d8fc69-075f-426f-a0bb-d6f53433b41b", 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [] 219 | }, 220 | { 221 | "cell_type": "markdown", 222 | "id": "7896be89-0e76-4b90-a448-2afcd49ef2f4", 223 | "metadata": {}, 224 | "source": [ 225 | "### 2b) Set the `Par_file` for a new Forward Simulation\n", 226 | "\n", 227 | "1) Tell SPECFEM to **save the forward wavefield** after the simulation \n", 228 | "2) Tell SPECFEM to **output binary database files** (as opposed to ASCII files) \n", 229 | "3) Tell SPECFEM to **save the model** in binary format" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "id": "17e52cb5-3a02-467e-a695-b424d6a082d8", 236 | "metadata": {}, 237 | "outputs": [], 238 | "source": [] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "id": "1446249a-c2c8-4e18-b854-1fa00c80a6df", 243 | "metadata": {}, 244 | "source": [ 245 | "### 2c) Run the Mesher, Solver and Save Results \n", 246 | "\n", 247 | "1) Run your simulation in **parallel** using 4 cores\n", 248 | "2) After your simulation, **save the seismograms** output by SPECFEM somewhere safe " 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "id": "2595543a-6ee9-4264-9ccc-b8752149bfab", 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [] 258 | }, 259 | { 260 | "cell_type": "markdown", 261 | "id": "b925ff0a-3c24-4ac3-9344-a5eb868ddcc7", 262 | "metadata": {}, 263 | "source": [ 264 | "### 2d) Optional: Visualize Waveforms\n", 265 | "\n", 266 | "- To make sure the two models were different, we can check our waveforms against one another \n", 267 | "- Use Python to plot matching seismograms against one another \n", 268 | "- Alternatively, you can use RecSec to plot *both* sets of synthetics, you'll need the following flags \n", 269 | " - `--pysep_path`: path to tell RecSec where your 'data' is \n", 270 | " - `--syn_path`: path to tell RecSec where your 'synthetics' are \n", 271 | " - `--cmtsolution`: path to your 'SOURCE' file \n", 272 | " - `--stations`: path to your 'STATIONS' file \n", 273 | " - `--components`: the components of your seismograms. These are listed in the filenames (e.g., AA.S000099.BXY.semd is component 'Y') \n", 274 | " - `--synsyn`: flag to tell RecSec that we are plotting two sets of synthetics (not actual data) \n", 275 | " - `--cartesian`: flag to tell RecSec that our domain is cartesian (not geographic) " 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": null, 281 | "id": "cbc9bd90-4aad-4e15-b975-364b9cc92b32", 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "id": "3fa4b06e-7cb6-4d62-965f-d42842c9b703", 289 | "metadata": {}, 290 | "source": [ 291 | "---------------\n", 292 | "## 3) Quantify Misfit, Generate Adjoint Sources\n", 293 | "\n", 294 | "- You should now have **two sets of synthetics**, one generated by your initial model, another by your target model\n", 295 | "- We now want to generate adjoint sources for each pair of synthetics\n", 296 | "\n", 297 | "#### Choose your adventure: \n", 298 | "- **Easier/Moderate approach**: Define a waveform difference misfit function as in Day 2a, Section 3b \n", 299 | "- **Advanced approach**: Head to Section 3B to try and define a cross-correlation traveltime misfit function \n", 300 | "\n", 301 | "### 3a_1) Easier/Moderate Approach: Waveform Misfit\n", 302 | "\n", 303 | "- Waveform misfit adjoint source: $f^\\dagger (t) = s(t) - d(t)$ \n", 304 | "- You can use the following as a template for defining your waveform misfit function\n", 305 | "\n", 306 | "---------\n", 307 | "Start with the following template and try to follow steps above:\n", 308 | "```python\n", 309 | "from scipy.integrate import simps\n", 310 | "\n", 311 | "\n", 312 | "def waveform_misfit(d, s):\n", 313 | " \"\"\"\n", 314 | " Define a waveform misft adjoint source\n", 315 | " \n", 316 | " :type d: np.array\n", 317 | " :param d: data array\n", 318 | " :type s: np.array\n", 319 | " :param s: synthetic array\n", 320 | " :rtype adj_src: np.array\n", 321 | " :return adj_src: adjoint source array\n", 322 | " \"\"\"\n", 323 | " # follow steps 1-5 here\n", 324 | " # ...\n", 325 | " return adj_src\n", 326 | "```" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "id": "80e60445-5b5c-4bd8-93d6-d7f9ce7ac811", 333 | "metadata": {}, 334 | "outputs": [], 335 | "source": [] 336 | }, 337 | { 338 | "cell_type": "markdown", 339 | "id": "151cfecc-e6ac-424d-a7f1-57cdaa473fc1", 340 | "metadata": {}, 341 | "source": [ 342 | "--------------------\n", 343 | "### 3a_2) Advanced Approach: Define a Cross-Correlation Traveltime Misfit Function\n", 344 | "\n", 345 | "- Let's use a cross correlation traveltime misfit function to define our adjoint source \n", 346 | "- The cross correlation misfit is defined: $\\chi (\\mathbf{m}) = \\frac{1}{2} \\left[ T^{obs} - T(\\mathbf{m}) \\right] ^ 2$, \n", 347 | "- Where $T^{obs}$ is the observed traveltime, and $T(\\mathbf{m})$ is the\n", 348 | "predicted traveltime in Earth model $m$ \n", 349 | "- **Alternatively**, you can use the waveform difference objective function we say in Day 2 Section 3A \n", 350 | "\n", 351 | ">__Adjoint Source Equation:__ $f^{\\dagger}(t) = - \\left[ T^{obs} - T(\\mathbf{m}) \\right] ~ \\frac{1}{N} ~\n", 352 | " \\partial_t \\mathbf{s}(T - t, \\mathbf{m})$\n", 353 | " \n", 354 | "Complete the function below using the following steps:\n", 355 | "\n", 356 | "1) Calculate the time shift $\\left[ T^{obs} - T(\\mathbf{m})\\right]$ using [ObsPy's cross correlation function](https://docs.obspy.org/master/packages/autogen/obspy.signal.cross_correlation.correlate.html) \n", 357 | " - The correlate function returns an array of correlation values\n", 358 | " - Use [xcorr_max](https://docs.obspy.org/master/packages/autogen/obspy.signal.cross_correlation.xcorr_max.html#obspy.signal.cross_correlation.xcorr_max) to find the time shift related to the peak cross correlation\n", 359 | " - The time shift should be a **single value**\n", 360 | "2) Differentiate the synthetic waveform, $\\partial_t \\mathbf{s}(t, \\mathbf{m})$, using [NumPy gradient](https://numpy.org/doc/stable/reference/generated/numpy.gradient.html) \n", 361 | "3) Set the normalization factor $N$ as: $N = \\int_0^T ~ \\mathbf{s}(t, \\mathbf{m}) ~ \\partial^2_t \\mathbf{s}(t, \\mathbf{m}) dt$ \n", 362 | " - Where T is the total seismogram time\n", 363 | " - Use [SciPy's Simpson's rule](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.integrate.simps.html) to integrate \n", 364 | "4) Time reverse the differentiated synthetic waveform $\\partial_t \\mathbf{s}(T - t, \\mathbf{m})$ \n", 365 | "5) Return an adjoint source that combines parts 1 through 4 that follows the **Adjoint Source Equation** above. Remember the -1!\n", 366 | "\n", 367 | "\n", 368 | "---------\n", 369 | "Start with the following template and try to follow steps above:\n", 370 | "```python\n", 371 | "from numpy import gradient\n", 372 | "from scipy.integrate import simps\n", 373 | "from obspy.signal.cross_correlation import correlate, xcorr_max\n", 374 | "\n", 375 | "\n", 376 | "def cc_traveltime_adjsrc(d, s):\n", 377 | " \"\"\"\n", 378 | " Define a cross-correlation traveltime adjoint source\n", 379 | " \n", 380 | " :type d: np.array\n", 381 | " :param d: data array\n", 382 | " :type s: np.array\n", 383 | " :param s: synthetic array\n", 384 | " :rtype adj_src: np.array\n", 385 | " :return adj_src: adjoint source array\n", 386 | " \"\"\"\n", 387 | " # follow steps 1-5 here\n", 388 | " # ...\n", 389 | " return adj_src\n", 390 | "```" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": null, 396 | "id": "338af557-2d63-4f21-9993-2102f9da8c02", 397 | "metadata": {}, 398 | "outputs": [], 399 | "source": [] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "id": "4a33fc98-5102-4095-aba9-95bd05a7f78f", 404 | "metadata": {}, 405 | "source": [ 406 | "### 3b) Generate Adjoint Sources\n", 407 | "\n", 408 | "1) **Loop** through all available data and synthetic seismogram files, make sure filenames match! \n", 409 | "1) **Load** in data and synthetic seismogram for a single station (use `numpy.loadtxt`; see Day 2A; Section 2C)\n", 410 | "2) **Apply** your adjoint source function from 3A to **output** an adjoint source array \n", 411 | "3) **Save** the corresponding adjoint source in the `SEM/` directory (using `numpy.savetxt`; see Day 2A; Section 3A)\n", 412 | " - *Remember* to format the adjoint source the same as the input synthetics\n", 413 | " - *Remember* that adjoint sources must mimic the synthetic filename, but end with *.adj*\n", 414 | " \n", 415 | "*Feel free to import Python modules required to file match and loop!* " 416 | ] 417 | }, 418 | { 419 | "cell_type": "code", 420 | "execution_count": null, 421 | "id": "1138f433-d70b-49df-8450-697a17d6a68b", 422 | "metadata": {}, 423 | "outputs": [], 424 | "source": [] 425 | }, 426 | { 427 | "cell_type": "markdown", 428 | "id": "ee6f48b1-68dd-404d-ab9a-b088530cf940", 429 | "metadata": {}, 430 | "source": [ 431 | "## 3c) Optional: Plot an adjoint source\n", 432 | "\n", 433 | "- It's always useful to look at adjoint sources before running an adjoint simulation \n", 434 | "- Plot **one** adjoint source next to its corresponding data and synthetic waveforms if you can \n", 435 | "- We should be able to immediately tell if the adjoint source looks appropriate \n", 436 | "- *Remember* that we plotted adjoint sources in Day 2a Section 3b" 437 | ] 438 | }, 439 | { 440 | "cell_type": "code", 441 | "execution_count": null, 442 | "id": "3d61d228-7ed8-4899-b500-501b0c28e028", 443 | "metadata": {}, 444 | "outputs": [], 445 | "source": [] 446 | }, 447 | { 448 | "cell_type": "markdown", 449 | "id": "6c88da47-7d2d-47ae-9a20-e97e34f3c1c4", 450 | "metadata": {}, 451 | "source": [ 452 | "------------\n", 453 | "## 4) Run Adjoint Simulations\n", 454 | "\n", 455 | "- *Remember* to tell SPECFEM that this is an adjoint simulation (not a forward simulation)\n", 456 | "- *Remember* to tell SPECFEM to **output** kernel files in FORTRAN Binary format\n", 457 | "- Make sure that your adjoint sources are stored in the `SEM/` directory (Step 3) \n", 458 | "- Make sure your DATABASE files are available in the *OUTPUT_FILES/* directory (Step 2) \n", 459 | "- Remember you do **not** need to run the mesher, only the solver \n", 460 | "- Check the output log file and kernel files to make sure you ran an adjoint simulation (not forward)" 461 | ] 462 | }, 463 | { 464 | "cell_type": "code", 465 | "execution_count": null, 466 | "id": "5efa727f-2973-4777-a0b5-e0340343f6b6", 467 | "metadata": {}, 468 | "outputs": [], 469 | "source": [] 470 | }, 471 | { 472 | "cell_type": "markdown", 473 | "id": "7b4b6172-d231-4ecf-b392-fcd3e1c6b977", 474 | "metadata": {}, 475 | "source": [ 476 | "## 5) Smooth Kernel\n", 477 | "- **Smooth** your Vp and Vs kernels by 10km in the horizontal direction and 10 km in the vertical\n", 478 | "- Make sure that SPECFEM can find the appropriate files (kernels, database and model files are all locatable in the same directory) \n", 479 | "- Look at Day 2A Section 4 if you need help calling the smoothing executable \n", 480 | "- Remember to run the smoother with 4 MPI processors " 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "id": "90a6d185-d8c3-4549-8449-680bc4bc750b", 487 | "metadata": {}, 488 | "outputs": [], 489 | "source": [] 490 | }, 491 | { 492 | "cell_type": "markdown", 493 | "id": "6f009532-97f8-4143-9d27-21ef83cee1f1", 494 | "metadata": {}, 495 | "source": [ 496 | "-------------\n", 497 | "## 6) Visualize Results\n", 498 | "- Use SeisFlows (see Day 2a, Section 4) or NumPy + Matplotlib to visualize your kernel results \n", 499 | "- Does your kernel make sense?\n", 500 | "- Can you plot the sources and stations on top of the kernel figure?\n", 501 | "- **NOTE:** If you use SeisFlows, you'll need to import the Model tool and change the names of the kernels from 'alpha' -> 'vp' and 'beta' -> 'vs'\n", 502 | "\n", 503 | "```python \n", 504 | "from seisflows.tools.specfem import Model\n", 505 | "# OR\n", 506 | "from seisflows.tools.specfem import read_fortran_binary\n", 507 | "```" 508 | ] 509 | }, 510 | { 511 | "cell_type": "code", 512 | "execution_count": null, 513 | "id": "fa37fb45-a1bf-4448-9db1-f809e66aff6a", 514 | "metadata": {}, 515 | "outputs": [], 516 | "source": [] 517 | } 518 | ], 519 | "metadata": { 520 | "kernelspec": { 521 | "display_name": "Python 3 (ipykernel)", 522 | "language": "python", 523 | "name": "python3" 524 | }, 525 | "language_info": { 526 | "codemirror_mode": { 527 | "name": "ipython", 528 | "version": 3 529 | }, 530 | "file_extension": ".py", 531 | "mimetype": "text/x-python", 532 | "name": "python", 533 | "nbconvert_exporter": "python", 534 | "pygments_lexer": "ipython3", 535 | "version": "3.10.6" 536 | } 537 | }, 538 | "nbformat": 4, 539 | "nbformat_minor": 5 540 | } 541 | -------------------------------------------------------------------------------- /workshops/2025-08-04_cig-tng/2_forward_simulations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "347bc0f7-6dbe-454c-a95a-8469d8efe1c2", 6 | "metadata": {}, 7 | "source": [ 8 | "# Wavefield Simulations Using SPECFEM\n", 9 | "## Notebook 2: Forward Simulations\n", 10 | "\n", 11 | "- Here we build upon material learned in Notebook 1\n", 12 | "- This notebook allows Users to play around with their own SPECFEM2D homogeneous halfspace example in an exercise \n", 13 | "- **Objective:** Familiarize Users with setting `SOURCE` and `STATION` attributes, adjusting velocity model parameters, and assessing simulation results.\n", 14 | "- These instructions should be run from inside a Docker container, using Jupyter Lab (see instructions [here](https://github.com/adjtomo/adjdocs/blob/main/readmes/docker_image_install.md)). \n", 15 | "\n", 16 | "-----------\n", 17 | "\n", 18 | "**Jupyter Quick Tips:**\n", 19 | "\n", 20 | "- **Run cells** one-by-one by hitting the $\\blacktriangleright$ button at the top, or by hitting `Shift + Enter`\n", 21 | "- **Run all cells** by hitting the $\\blacktriangleright\\blacktriangleright$ button at the top, or by running `Run -> Run All Cells`\n", 22 | "- **Currently running cells** that are still processing will have a `[*]` symbol next to them\n", 23 | "- **Finished cells** will have a `[1]` symbol next to them. The number inside the brackets represents what order this cell has been run in.\n", 24 | "- Commands that start with `!` are Bash commands (i.e., commands you would run from the terminal)\n", 25 | "- Commands that start with `%` are Jupyter Magic commands.\n", 26 | "- To time a task, put a `%time` before the command (e.g., `%time ! ls`)\n" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "id": "17fed39d-3309-4957-b254-66c67ca131be", 32 | "metadata": {}, 33 | "source": [ 34 | "------\n", 35 | "## 1) Set Up a SPECFEM Working Directory\n", 36 | "\n", 37 | "- It is often desireable to run SPECFEM outside of the cloned repository, in order to keep files and outputs manageable \n", 38 | "- The trick here is that SPECFEM **only** requires 3 compenents for a sucessful simulation: `bin/`, `DATA/`, and `OUTPUT_FILES/` " 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "id": "112187f5-8bbc-400a-ae1e-2e73927b8832", 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "# Python packages required for this notebook\n", 49 | "import numpy as np\n", 50 | "import matplotlib.pyplot as plt\n", 51 | "from IPython.display import Image" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "id": "a7c386f6-0c75-4d44-b532-a1455eefd1cc", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# Create the correct dir. and move there\n", 62 | "! mkdir -p /home/scoped/work/specfem2d_workdir\n", 63 | "%cd /home/scoped/work/specfem2d_workdir\n", 64 | "\n", 65 | "# Symlink the executables, copy example DATA/, create empty OUTPUT_FILES\n", 66 | "! ln -s /home/scoped/specfem2d/bin .\n", 67 | "! cp -r /home/scoped/specfem2d/EXAMPLES/Tape2007/DATA .\n", 68 | "! mkdir OUTPUT_FILES\n", 69 | "\n", 70 | "! ls" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "id": "e6404755-a37c-40bf-aab5-377f46f57e9d", 76 | "metadata": {}, 77 | "source": [ 78 | "----------\n", 79 | "## 2) Experimental Setup: Tape et al. 2007 Example Problem\n", 80 | "\n", 81 | "- We will be working with an Example problem from the [Tape et al. 2007 GJI publication](https://academic.oup.com/gji/article/168/3/1105/929373)\n", 82 | "- [GitHub repository location of the example problem](https://github.com/geodynamics/specfem2d/tree/devel/EXAMPLES/Tape2007)\n", 83 | "- This example pre-defines two models (homogeneous halfspace, checkerboard), multiple (25) seismic sources, and multiple (132) stations \n", 84 | "- In this section we will use the homogeneous halfspace model \n" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "id": "1a6f77cb-7fe9-4904-8b67-9c4cc01fa5c2", 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "! ls DATA/" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "id": "5d25b21b-88bd-49ad-a004-2546a25033c7", 100 | "metadata": {}, 101 | "source": [ 102 | "#### DATA/ Directory\n", 103 | "- `Par_file` for a homogeneous halfspace model in `Par_file_Tape2007_onerec`\n", 104 | "- `Par_file` for a checkerboard model in `Par_file_Tape2007_132rec_checker`\n", 105 | "- `Mesh` files in: *interfaces_Tape2007.dat* and the `Par_file_*`\n", 106 | "- `Model` files in: *model_velocity.dat_checker*\n", 107 | "- `Source` files in: the 25 *SOURCE_???* files\n", 108 | "- `Stations`: in the *STATIONS_checker* file" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "id": "45306979-343e-4c4e-b789-883b68f9c13d", 114 | "metadata": {}, 115 | "source": [ 116 | "### 2a) The Homogeneous Halfspace Model\n", 117 | "\n", 118 | "- The homogeneous halfspace model in this example is defined internally, in the `Par_file`\n", 119 | "- A homogeneous halfspace defines single set of properties for the entire domain \n", 120 | "- We can also use the utility `seisflows sempar velocity_model` command to look at model values " 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "id": "750276b8-24a9-48df-98f9-fdfee1ccbc68", 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "# Bash commands to look at the Par_file\n", 131 | "! head -295 DATA/Par_file_Tape2007_onerec | tail -28\n", 132 | "! echo \"...\"\n", 133 | "\n", 134 | "# SeisFlows utility function to look at the Par_file\n", 135 | "! seisflows sempar -P DATA/Par_file_Tape2007_onerec velocity_model" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "id": "60e680ca-b9d9-4782-8e75-12cf3f8ecc7a", 141 | "metadata": {}, 142 | "source": [ 143 | "#### Understanding the Velocity Model \n", 144 | "According to the `Par_file` comments, the model parameter values represent the following: \n", 145 | "\n", 146 | "```bash\n", 147 | "model_number 1 rho Vp Vs 0 0 QKappa Qmu 0 0 0 0 0 0\n", 148 | "1 1 2600.d0 5800.d0 3500.0d0 0 0 10.d0 10.d0 0 0 0 0 0 0\n", 149 | "```\n", 150 | "\n", 151 | "- The homogeneous halfspace model defines a region with P-wave velocity $V_p=5.8$km/s and S-wave velocity $V_s=3.5$km/s.\n", 152 | "- The halfspace is also defined by density and attenuation\n", 153 | "- We can understand the structure of the mesh by looking at the `Par_file` and the `interfaces_Tape2007.dat` file. " 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": null, 159 | "id": "d37b7b2c-f83c-4719-9ce1-ffd50656f1eb", 160 | "metadata": {}, 161 | "outputs": [], 162 | "source": [ 163 | "# Look at Mesh parameters to view the size of the domain\n", 164 | "! head -310 DATA/Par_file_Tape2007_onerec | tail -n 13" 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": null, 170 | "id": "782e1f9f-58df-4f81-b744-dbd67896f806", 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "# Look at interface parameters \n", 175 | "! cat DATA/interfaces_Tape2007.dat" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "id": "3f1ed3bc-e50a-4942-89be-34acd56077ea", 181 | "metadata": {}, 182 | "source": [ 183 | "#### Understanding the Mesh parameters\n", 184 | "\n", 185 | "- From the files above, we can see that the X and Z dimensions of our mesh range from 0 to 480000.0m\n", 186 | "- Each dimension is separated into 40 elements (defined by `nxmin`, `nxmax` etc. in the `Par_file` and defined by the layer numbers in the interfaces file) \n", 187 | "- That means each spectral element in our 2D mesh spans: 480000m / 40 elements = 12000m / element (or 12km / element)\n", 188 | "- Also note that above we previously learned that the $V_s$ model has a homogeneous value of 3.5 km/s\n", 189 | "\n", 190 | "#### Visualizing the Model\n", 191 | "We can make a simple plot using Matplotlib to illustrate what our mesh might look like" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "id": "0e8a6c6c-373b-431a-a3ca-5e7e3915d5df", 198 | "metadata": {}, 199 | "outputs": [], 200 | "source": [ 201 | "def plot_homogeneous_halfspace():\n", 202 | " \"\"\"Plots a representation of the SPECFEM2D homogeneous halfspace model\"\"\"\n", 203 | " # Sets the X and Z dimensions of our mesh\n", 204 | " x = np.arange(0, 480001, 4000)\n", 205 | " z = np.arange(0, 480001, 4000)\n", 206 | " \n", 207 | " # Reformat the 1D arrays into 2D\n", 208 | " xv, zv = np.meshgrid(x, z)\n", 209 | "\n", 210 | " # Set a homogeneous value of Vs=3.5km/s \n", 211 | " vs = 3.5 * np.ones(np.shape(xv))\n", 212 | "\n", 213 | " # Plot the arrays as a homogeneous halfspace\n", 214 | " plt.tricontourf(xv.flatten(), zv.flatten(), vs.flatten(), cmap=\"seismic_r\", vmin=3.1, vmax=4.)\n", 215 | " plt.colorbar(label=\"Vs [km/s]\", format=\"%.1f\")\n", 216 | " plt.title(\"2D Homogeneous Halfspace Model\\n Vs=3.5km/s\")\n", 217 | " \n", 218 | "# Calls the function we just defined\n", 219 | "plot_homogeneous_halfspace()\n", 220 | "\n", 221 | "# Plot grid lines representing each of the spectral elements\n", 222 | "for i in range(12000, 480000, 12000):\n", 223 | " plt.axvline(i, c=\"k\", lw=0.5)\n", 224 | " plt.axhline(i, c=\"k\", lw=0.5)" 225 | ] 226 | }, 227 | { 228 | "cell_type": "markdown", 229 | "id": "7dbbfc22-a140-4160-87b0-bda887572f8f", 230 | "metadata": {}, 231 | "source": [ 232 | "### 2b) Visualizing Source-Receiver Geometry\n", 233 | "\n", 234 | "- We can similarly plot the SOURCES and STATIONS available to see what the experiemental setup looks like \n", 235 | "- This is the same Python-based approach we took in the Day 1A notebook" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "id": "b771c1f0-d11f-4087-9206-f9e146857106", 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "# Grab coordinates from STATIONS file\n", 246 | "sta_x, sta_z = np.genfromtxt(\"DATA/STATIONS_checker\", dtype=float, usecols=[2, 3]).T\n", 247 | "\n", 248 | "# Grab coordinates from each SOURCE file\n", 249 | "ev_x, ev_z = [], []\n", 250 | "for i in range(1, 26):\n", 251 | " source_file = f\"DATA/SOURCE_{i:0>3}\"\n", 252 | " with open(source_file, \"r\") as f:\n", 253 | " lines = f.readlines()\n", 254 | " # Trying to break apart the following line\n", 255 | " # 'xs = 299367.72 # source location x in meters\\n'\n", 256 | " xs = float(lines[2].split(\"=\")[1].split(\"#\")[0].strip())\n", 257 | " zs = float(lines[3].split(\"=\")[1].split(\"#\")[0].strip())\n", 258 | " \n", 259 | " ev_x.append(xs)\n", 260 | " ev_z.append(zs)\n", 261 | " \n", 262 | "# Plot SOURCES and STATIONS together. Annotate names\n", 263 | "plot_homogeneous_halfspace()\n", 264 | "plt.scatter(ev_x, ev_z, c=\"y\", marker=\"*\", s=100, edgecolor=\"k\")\n", 265 | "plt.scatter(sta_x, sta_z, c=\"c\", marker=\"v\", s=20, edgecolor=\"k\")\n", 266 | "plt.title(\"SOURCE-RECEIVER GEOMETRY\")" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "id": "b3fc21b9-2d3b-448e-894a-e5b24613b492", 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [ 276 | "# Plot SOURCES next to source names\n", 277 | "plot_homogeneous_halfspace()\n", 278 | "for i, (x, z) in enumerate(zip(ev_x, ev_z)):\n", 279 | " plt.scatter(ev_x, ev_z, c=\"y\", marker=\"*\", s=100, edgecolor=\"k\")\n", 280 | " plt.text(x, z, f\"{i+1:0>3}\") # SOURCE numbering starts at 1\n", 281 | "plt.title(f\"SOURCES; N={len(ev_x)}\")" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": null, 287 | "id": "76bbb1fc-143b-465c-a602-fecf18db2577", 288 | "metadata": { 289 | "tags": [] 290 | }, 291 | "outputs": [], 292 | "source": [ 293 | "# Plot STATIONS with their names\n", 294 | "# Because STATIONS are so close, numbers will be jumbled.\n", 295 | "plot_homogeneous_halfspace()\n", 296 | "for i, (x, z) in enumerate(zip(sta_x, sta_z)):\n", 297 | " plt.scatter(x, z, c=\"c\", marker=\"v\", s=12, edgecolor=\"k\")\n", 298 | " plt.text(x, z, f\"{i:0>3}\", fontsize=9)\n", 299 | "plt.title(f\"STATIONS; N={len(sta_x)}\")" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "id": "608f10cc-ec85-4ef3-af58-6d272595a6e7", 305 | "metadata": {}, 306 | "source": [ 307 | "- Upside-down blue triangles represent the 132 receivers in this example\n", 308 | "- The 25 yellow stars are the sources. " 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "id": "311aa16b-ea8b-4e4d-963f-be0f2934befd", 314 | "metadata": {}, 315 | "source": [ 316 | "----------\n", 317 | "## 3) Running SPECFEM2D\n", 318 | "\n", 319 | "- Before we run the example, we need to do some organizational bookkeeping \n", 320 | "- We will choose ONE of our source files to act as our source\n", 321 | "- We will choose ONE stations to act as our station file " 322 | ] 323 | }, 324 | { 325 | "cell_type": "markdown", 326 | "id": "3a108d69-3f38-4957-84a4-a1524cf2e363", 327 | "metadata": {}, 328 | "source": [ 329 | "### 3a) Choosing a Source file\n", 330 | "\n", 331 | "- SPECFEM2D will look for a file named `SOURCE` in the *DATA/* directory to define its source" 332 | ] 333 | }, 334 | { 335 | "cell_type": "code", 336 | "execution_count": null, 337 | "id": "ec99617a-eceb-47bb-a6dd-d1250baeb9d0", 338 | "metadata": {}, 339 | "outputs": [], 340 | "source": [ 341 | "# Choose SOURCE_001 as our SOURCE File\n", 342 | "! cp -f DATA/SOURCE_001 DATA/SOURCE\n", 343 | "\n", 344 | "# > Make sure that the SOURCE name printed below matches choice\n", 345 | "! head -1 DATA/SOURCE" 346 | ] 347 | }, 348 | { 349 | "cell_type": "markdown", 350 | "id": "8128b017-4b09-471e-82e1-e870163fdc4c", 351 | "metadata": {}, 352 | "source": [ 353 | "### 3b) Choosing Stations\n", 354 | "\n", 355 | "- SPECFEM2D will look for a file named `STATIONS` in the *DATA/* directory to define its stations\n", 356 | "- The `STATIONS_checker` file defines 132 different station locations, we only want **one** \n", 357 | "- *Remember*: Individual synthetic seismograms simply extract the simulated wavefield at a location, i.e., computational expense is not tied to the number of stations. " 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "id": "820a30c4-995a-40f4-af31-d191df9de2a3", 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [ 367 | "# Write out a NEW stations file by choosing station numbers\n", 368 | "! head -1 DATA/STATIONS_checker > DATA/STATIONS\n", 369 | "! cat DATA/STATIONS" 370 | ] 371 | }, 372 | { 373 | "cell_type": "markdown", 374 | "id": "434e7b88-c8df-445a-8b27-4aeca090ab79", 375 | "metadata": {}, 376 | "source": [ 377 | "### 3c) Setting up the `Par_file`\n", 378 | "\n", 379 | "- SPECFEM2D will look for a file called `Par_file` in the *DATA/* directory to set its parameters\n", 380 | "- We will copy over the `Par_file_Tape2007_onerec` to define our parameter file \n", 381 | "- We need to change a few key parameters in the `Par_file` to run SPECFEM2D with desired behavior \n", 382 | "- We use the `seisflows sempar` command to make the changes but this can be done with a text editor, Bash etc. " 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": null, 388 | "id": "bd334756-1b7f-42ee-9ab7-d5dfe4885924", 389 | "metadata": {}, 390 | "outputs": [], 391 | "source": [ 392 | "# Copy in the Example parameter file\n", 393 | "! cp -f DATA/Par_file_Tape2007_onerec DATA/Par_file\n", 394 | "\n", 395 | "# Set some necessary parameters for later in the Par_file\n", 396 | "! seisflows sempar -P DATA/Par_file nproc 4\n", 397 | "! seisflows sempar -P DATA/Par_file use_existing_stations .true." 398 | ] 399 | }, 400 | { 401 | "cell_type": "markdown", 402 | "id": "33f9725e-7961-4bfb-8e03-8c6fe182558a", 403 | "metadata": {}, 404 | "source": [ 405 | "#### Understanding Parameter Changes\n", 406 | "\n", 407 | "`NPROC`: Sets the number of MPI processors to partition the mesh and run the simulation with. This **must** match the value following `-n` in the MPI \n", 408 | "`use_existing_STATIONS`: Use the STATIONS file we created, as opposed to the `Par_file` definition of stations " 409 | ] 410 | }, 411 | { 412 | "cell_type": "markdown", 413 | "id": "b4e4902b-4f80-4649-8310-239c09160b7c", 414 | "metadata": {}, 415 | "source": [ 416 | "### 3d) Run SPECFEM\n", 417 | "\n", 418 | "- Now that we have set the `Par_file`, the `SOURCE` and `STATIONS` file, we are able to run `xmeshfem2D` and `xspecfem2D` to run our forward simulation.\n", 419 | "- We use 4 MPI processes to run this homogeneous halfspace simulation\n", 420 | "- We expect only **one** synthetic seismogram to be output from this simulation " 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": null, 426 | "id": "a7e9f27d-b9c7-4973-94b6-d70c18bb507d", 427 | "metadata": {}, 428 | "outputs": [], 429 | "source": [ 430 | "# Ensures we're running with a clean OUTPUT directory\n", 431 | "! rm -rf OUTPUT_FILES\n", 432 | "! mkdir OUTPUT_FILES\n", 433 | "\n", 434 | "! mpirun -n 4 bin/xmeshfem2D > OUTPUT_FILES/output_meshfem.txt\n", 435 | "! mpirun -n 4 bin/xspecfem2D > OUTPUT_FILES/output_solver.txt\n", 436 | "\n", 437 | "! tail OUTPUT_FILES/output_solver.txt" 438 | ] 439 | }, 440 | { 441 | "cell_type": "markdown", 442 | "id": "137539fe-a4ea-43de-a2dc-b5b9ba0efcee", 443 | "metadata": {}, 444 | "source": [ 445 | "### 3e) Examine Output Files\n", 446 | "\n", 447 | "- Let's confirm that we have created **one** displacement seismogram\n", 448 | "- Then we'll look at the forward simulation figures to see if things make sense\n" 449 | ] 450 | }, 451 | { 452 | "cell_type": "code", 453 | "execution_count": null, 454 | "id": "12fa8c43-0427-4e3e-a362-ff8b74bc49f9", 455 | "metadata": {}, 456 | "outputs": [], 457 | "source": [ 458 | "! ls OUTPUT_FILES/\n", 459 | "! echo\n", 460 | "! ls OUTPUT_FILES/*.semd" 461 | ] 462 | }, 463 | { 464 | "cell_type": "code", 465 | "execution_count": null, 466 | "id": "d6c7c0be-dc4a-40e8-b90c-b11a95a62b5e", 467 | "metadata": {}, 468 | "outputs": [], 469 | "source": [ 470 | "# We can use SeisFlows to plot our waveform\n", 471 | "! seisflows plotst OUTPUT_FILES/AA.S000000.BXY.semd --savefig AA.S000000.BXY.png\n", 472 | "Image(\"AA.S000000.BXY.png\")" 473 | ] 474 | }, 475 | { 476 | "cell_type": "code", 477 | "execution_count": null, 478 | "id": "ab3583a6-d390-4935-90ae-9daea4173032", 479 | "metadata": {}, 480 | "outputs": [], 481 | "source": [ 482 | "# We can also look at the wavefield snapshots\n", 483 | "Image(\"OUTPUT_FILES/forward_image000000800.jpg\")" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": null, 489 | "id": "10c412a1-4e03-479b-84f8-5fdbdf0a2815", 490 | "metadata": {}, 491 | "outputs": [], 492 | "source": [ 493 | "# We can also look at the wavefield snapshots\n", 494 | "Image(\"OUTPUT_FILES/forward_image000001200.jpg\")" 495 | ] 496 | }, 497 | { 498 | "cell_type": "code", 499 | "execution_count": null, 500 | "id": "c9ecc03a-4c45-412e-95ac-a8fe8cedc248", 501 | "metadata": {}, 502 | "outputs": [], 503 | "source": [ 504 | "# We can also look at the wavefield snapshots\n", 505 | "Image(\"OUTPUT_FILES/forward_image000002200.jpg\")" 506 | ] 507 | }, 508 | { 509 | "cell_type": "markdown", 510 | "id": "6fcefb08-43e0-4599-b98f-96bdab8e0f24", 511 | "metadata": {}, 512 | "source": [ 513 | "---------------------\n", 514 | "## 4) Forward Simulation Exercise\n", 515 | "\n", 516 | "- Participants will now be asked to edit simulation parameters to run their own simulation \n", 517 | "- Some things that you are asked to try include:\n", 518 | " 1) Change the parameters of the homogeneous halfspace model defined in the `Par_file` \n", 519 | " 2) Define a *STATIONS* file with a uniform grid of stations to record synthetics throughout the domain\n", 520 | " 3) Choose a different source, or increase the energy released by the source (using the moment tensor)\n", 521 | " 4) Re-run the mesher and solver to get new synthetics\n", 522 | " 5) Analyze the new results in comparison to the old results\n", 523 | "- First we set up a working directory for you " 524 | ] 525 | }, 526 | { 527 | "cell_type": "code", 528 | "execution_count": null, 529 | "id": "6b9a5277-4a40-42aa-8fca-9b8046f0e84a", 530 | "metadata": {}, 531 | "outputs": [], 532 | "source": [ 533 | "! rm -rf /home/scoped/work/exercise_1\n", 534 | "! mkdir -p /home/scoped/work/exercise_1\n", 535 | "%cd /home/scoped/work/exercise_1\n", 536 | "\n", 537 | "# Symlink the executables, copy example DATA/, create empty OUTPUT_FILES\n", 538 | "! ln -s /home/scoped/specfem2d/bin .\n", 539 | "! cp -r /home/scoped/specfem2d/EXAMPLES/Tape2007/DATA .\n", 540 | "! mkdir OUTPUT_FILES\n", 541 | "\n", 542 | "# Set the Par_file\n", 543 | "! cp DATA/Par_file_Tape2007_onerec DATA/Par_file\n", 544 | "\n", 545 | "! ls" 546 | ] 547 | }, 548 | { 549 | "cell_type": "markdown", 550 | "id": "8cd26365-87e4-40bd-800f-868afbd69676", 551 | "metadata": { 552 | "tags": [] 553 | }, 554 | "source": [ 555 | "#### Task 1: Edit the Velocity Model\n", 556 | "- Change the velocity model parameters in the homogeneous halfspace model\n", 557 | "- Remember, the velocity model is defined in the `Par_file`\n", 558 | "- Try **increasing** seismic velocity (Vp and Vs) by 10\\%\n", 559 | "- You can use Python, Bash, `seisflows sempar` or a Text Editor to do this " 560 | ] 561 | }, 562 | { 563 | "cell_type": "code", 564 | "execution_count": null, 565 | "id": "652544c7-a0aa-4865-b55c-e47d879a99d4", 566 | "metadata": {}, 567 | "outputs": [], 568 | "source": [] 569 | }, 570 | { 571 | "cell_type": "markdown", 572 | "id": "6c61455a-91a7-49fd-a1c4-68da1576bd64", 573 | "metadata": {}, 574 | "source": [ 575 | "#### Task 2: Create a New STATIONS File\n", 576 | "- Define a STATIONS file that covers the **entire** domain with a uniform grid spacing of: \n", 577 | " - dx = 80km \n", 578 | " - dz = 80km\n", 579 | " - x_start = 0km\n", 580 | " - z_start = 0km\n", 581 | "- **Or** Create your own station configuration. Some examples: spiral, concentric rings, dense linear array (like DAS)\n", 582 | "- You can find the X and Z dimensions of the mesh in the `Par_file` and the `interfaces` file, respectively \n", 583 | "- Use Python/NumPy to loop values, or simply write out a text file manually with the text editor\n", 584 | "- *Look* at *DATA/STATIONS_checker* for an example of how the file should look\n", 585 | "- **NOTE**: The last two columns (burial, elevation) can be set to 0 " 586 | ] 587 | }, 588 | { 589 | "cell_type": "code", 590 | "execution_count": null, 591 | "id": "f3994090-d1cd-40e4-8533-f859af509020", 592 | "metadata": {}, 593 | "outputs": [], 594 | "source": [] 595 | }, 596 | { 597 | "cell_type": "markdown", 598 | "id": "b1b6e9be-74d5-4362-9fe6-ab424f2a47c6", 599 | "metadata": {}, 600 | "source": [ 601 | "#### Task 3: Choose and edit a SOURCE file\n", 602 | "\n", 603 | "- Use one of the original sources as a template for your new source\n", 604 | "- **Set** the location of your source in the exact **middle** of your domain (or a location of your choice!) \n", 605 | "- **Set** the moment tensor (Mxx, Mzz, Mxz) of your event to make this an **explosive** source (or a mechanism of your choice!) \n", 606 | "- Don't change the scaling on the moment tensor " 607 | ] 608 | }, 609 | { 610 | "cell_type": "code", 611 | "execution_count": null, 612 | "id": "95d2adb6-09f0-4a01-91f9-958c3c94907c", 613 | "metadata": {}, 614 | "outputs": [], 615 | "source": [] 616 | }, 617 | { 618 | "cell_type": "markdown", 619 | "id": "c5f9886a-46bf-4834-8a5f-099c9da6aefd", 620 | "metadata": {}, 621 | "source": [ 622 | "#### Task 4: Run the Solver and Analyze Outputs\n", 623 | "\n", 624 | "- Run the mesher and solver with your new experimental setup and 4 MPI processes \n", 625 | "- **Remember** to tell SPECFEM to use your `STATIONS` file and not its internal representation of stations\n", 626 | "- **Remember** to tell SPECFEM that we want to run this with 4 processors\n", 627 | "- Look at the source images to see if your explosion makes sense\n", 628 | "- Plot waveforms output from your gridded stations" 629 | ] 630 | }, 631 | { 632 | "cell_type": "code", 633 | "execution_count": null, 634 | "id": "eb64e05c-d47a-49eb-a2a8-95df8e9189ae", 635 | "metadata": {}, 636 | "outputs": [], 637 | "source": [] 638 | } 639 | ], 640 | "metadata": { 641 | "kernelspec": { 642 | "display_name": "Python 3 (ipykernel)", 643 | "language": "python", 644 | "name": "python3" 645 | }, 646 | "language_info": { 647 | "codemirror_mode": { 648 | "name": "ipython", 649 | "version": 3 650 | }, 651 | "file_extension": ".py", 652 | "mimetype": "text/x-python", 653 | "name": "python", 654 | "nbconvert_exporter": "python", 655 | "pygments_lexer": "ipython3", 656 | "version": "3.10.14" 657 | } 658 | }, 659 | "nbformat": 4, 660 | "nbformat_minor": 5 661 | } 662 | --------------------------------------------------------------------------------