├── images ├── README.md ├── EI_0.png ├── EI_1.png ├── EI_2.png ├── EI_3.png ├── EI_diff_0.png ├── EI_diff_1.png ├── EI_diff_2.png ├── EI_diff_3.png ├── EI_inv_0.png ├── EI_inv_1.png ├── EI_inv_2.png ├── EI_inv_3.png ├── Scatter_0.png ├── Scatter_1.png ├── Scatter_2.png ├── Scatter_3.png ├── EI_trace_3300m.png └── EI_trace_8500m.png ├── LICENSE ├── .gitignore ├── core ├── functions.py └── models.py ├── README.md └── main.py /images/README.md: -------------------------------------------------------------------------------- 1 | Results from the paper 2 | -------------------------------------------------------------------------------- /images/EI_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_0.png -------------------------------------------------------------------------------- /images/EI_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_1.png -------------------------------------------------------------------------------- /images/EI_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_2.png -------------------------------------------------------------------------------- /images/EI_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_3.png -------------------------------------------------------------------------------- /images/EI_diff_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_diff_0.png -------------------------------------------------------------------------------- /images/EI_diff_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_diff_1.png -------------------------------------------------------------------------------- /images/EI_diff_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_diff_2.png -------------------------------------------------------------------------------- /images/EI_diff_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_diff_3.png -------------------------------------------------------------------------------- /images/EI_inv_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_inv_0.png -------------------------------------------------------------------------------- /images/EI_inv_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_inv_1.png -------------------------------------------------------------------------------- /images/EI_inv_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_inv_2.png -------------------------------------------------------------------------------- /images/EI_inv_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_inv_3.png -------------------------------------------------------------------------------- /images/Scatter_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/Scatter_0.png -------------------------------------------------------------------------------- /images/Scatter_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/Scatter_1.png -------------------------------------------------------------------------------- /images/Scatter_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/Scatter_2.png -------------------------------------------------------------------------------- /images/Scatter_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/Scatter_3.png -------------------------------------------------------------------------------- /images/EI_trace_3300m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_trace_3300m.png -------------------------------------------------------------------------------- /images/EI_trace_8500m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/HEAD/images/EI_trace_8500m.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 olivesgatech 2 | If you intend to use the code solely for non-commercial research purposes, you are welcome to download the source code. 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # Other: 107 | data/ 108 | checkpoints/ 109 | output_images/ 110 | -------------------------------------------------------------------------------- /core/functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | #%% Normalization 6 | class Normalization: 7 | def __init__(self, mean_val=None,std_val=None): 8 | self.mean_val = mean_val 9 | self.std_val = std_val 10 | 11 | def normalize(self, x): 12 | return (x-self.mean_val)/self.std_val 13 | 14 | def unnormalize(self, x): 15 | return x*self.std_val + self.mean_val 16 | 17 | 18 | #%% Metrics 19 | def metrics(y,x): 20 | #x: reference signal 21 | #y: estimated signal 22 | if torch.is_tensor(x): 23 | if x.is_cuda: 24 | x = x.cpu() 25 | x = x.numpy() 26 | if torch.is_tensor(y): 27 | if y.is_cuda: 28 | y = y.cpu() 29 | y = y.numpy() 30 | 31 | #corrlation 32 | x_mean = np.mean(x, axis=-1, keepdims=True) 33 | y_mean = np.mean(y, axis=-1, keepdims=True) 34 | x_std = np.std(x, axis=-1, keepdims=True) 35 | y_std = np.std(y, axis=-1, keepdims=True) 36 | corr = np.mean((x-x_mean)*(y-y_mean), axis=-1,keepdims=True)/(x_std*y_std) 37 | 38 | #coefficeint of determination (r2) 39 | S_tot = np.sum((x-x_mean)**2, axis=-1, keepdims=True) 40 | S_res = np.sum((x - y)**2, axis=-1, keepdims=True) 41 | 42 | r2 = (1-S_res/S_tot) 43 | 44 | return torch.tensor(corr), torch.tensor(r2) 45 | 46 | 47 | 48 | def display_results(loss, property_corr, property_r2, args, header): 49 | property_corr = torch.mean(torch.cat(property_corr), dim=0).squeeze() 50 | property_r2 = torch.mean(torch.cat(property_r2), dim=0).squeeze() 51 | loss = torch.mean(torch.tensor(loss)) 52 | corr_text = " | ".join([u"{:d}\xb0: {:.4f}".format(args.incident_angles[i], property_corr[i].squeeze()) for i in range(len(args.incident_angles))]) 53 | r2_text = " | ".join([u"{:d}\xb0: {:.4f}".format(args.incident_angles[i], property_r2[i].squeeze()) for i in range(len(args.incident_angles))]) 54 | print("loss: {:.4f}\nCorrelation: {:s}\nr2 Coeff. : {:s}".format(loss,corr_text,r2_text)) 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Semi-Supervised Sequence Modeling for Elastic Impedance Inversion 2 | [Motaz Alfarraj](http://www.motaz.me), and [Ghassan AlRegib](http://www.ghassanalregib.info) 3 | 4 | Codes and data for a manuscript published in Interpretation Journal, Aug 2019. 5 | 6 | This repository contains the codes for the paper: 7 | 8 | M. Alfarraj, and G. AlRegib, "**Semi-Supervised Sequence Modeling for Elastic Impedance Inversion**," in *Interpretation*, Aug. 2019. [[ArXiv]](https://arxiv.org/pdf/1908.07849.pdf) 9 | [[SEG Digital Library]](https://library.seg.org/doi/abs/10.1190/int-2018-0250.1) 10 | 11 | ## Abstract 12 | Recent applications of machine learning algorithms in the seismic domain have shown great potential in different areas such as seismic inversion and interpretation. However, such algorithms rarely enforce geophysical constraints — the lack of which might lead to undesirable results. To overcome this issue, we have developed a semisupervised sequence modeling framework based on recurrent neural networks for elastic impedance inversion from multiangle seismic data. Specifically, seismic traces and elastic impedance (EI) traces are modeled as a time series. Then, a neural-network-based inversion model comprising convolutional and recurrent neural layers is used to invert seismic data for EI. The proposed workflow uses well-log data to guide the inversion. In addition, it uses seismic forward modeling to regularize the training and to serve as a geophysical constraint for the inversion. The proposed workflow achieves an average correlation of 98% between the estimated and target EI using 10 well logs for training on a synthetic data set. 13 | 14 | ## Sample Results 15 | 16 | #### Estimated EI Section 17 | Incident Angle (degrees)|Estimated EI|True EI|Absolute Difference| 18 | |:--:|:--:|:--:|:--:| 19 | 0|![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_inv_0.png)| ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_0.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_diff_0.png) 20 | 10|![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_inv_1.png)| ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_1.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_diff_1.png) 21 | 20|![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_inv_2.png)| ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_2.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_diff_2.png) 22 | 30|![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_inv_3.png)| ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_3.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_diff_3.png) 23 | 24 | #### Scatter plots 25 | |0 degrees|10 degrees|20 degrees|30 degrees| 26 | |:--:|:--:|:--:|:--:| 27 | | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/Scatter_0.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/Scatter_1.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/Scatter_2.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/Scatter_3.png) 28 | 29 | #### Sample traces 30 | |x=3300 meters|x=8500 meters| 31 | |:--:|:--:| 32 | | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_trace_3300m.png) | ![](https://github.com/olivesgatech/Elastic-Impedance-Inversion-Using-Recurrent-Neural-Networks/blob/master/images/EI_trace_8500m.png) 33 | 34 | ## Data 35 | The data used in this code are from the elastic model of [Marmousi 2](https://library.seg.org/doi/abs/10.1190/1.1817083) 36 | The synthesis of the seismic data is described in the [paper](https://library.seg.org/doi/abs/10.1190/int-2018-0250.1) 37 | 38 | The data file should be downloaded automatically when the code is run. 39 | 40 | Alternatively, you can download the data file manually at this [link](https://www.dropbox.com/s/66u2hbbrvc15lyp/data.npy?raw=1) and place it in the same folder as main.py file 41 | 42 | Both elastic impedance and seismic are saved in the same `data.npy` file. 43 | 44 | ## Running the code 45 | 46 | ### Requirements: 47 | These are the python libraries that are needed to run the code. Newer version should work fine as well. 48 | ``` 49 | bruges==0.3.4 50 | matplotlib==3.1.1 51 | numpy==1.17.0 52 | pyparsing==2.4.1.1 53 | python-dateutil==2.8.0 54 | torch==1.1.0 55 | torchvision==0.3.0 56 | tqdm==4.33.0 57 | wget==3.2 58 | ``` 59 | Note: This code is built using [PyTorch](https://pytorch.org/) with GPU support. Follow the instructions on PyTorch's website to install it properly. The code can also be run without GPU, but it will be much slower. 60 | 61 | ### Training and testing 62 | 63 | To train the model using the default parameters (as reported in the paper), and test it on the full Marmousi 2 model, run the following command: 64 | 65 | ```bash 66 | python main.py 67 | ``` 68 | However, you can choose those parameters by including the arguments and their values. For example, to change the number of training traces, you can run: 69 | 70 | ```bash 71 | python main.py -num_train_wells 10 72 | ``` 73 | 74 | The list arguments can be found in the file `main.py`. 75 | 76 | 77 | 78 | ## Citation: 79 | 80 | If you have found our code and data useful, we kindly ask you to cite our work 81 | ```tex 82 | @article{alfarraj2019semi, 83 | title={Semi-supervised Sequence Modeling for Elastic Impedance Inversion}, 84 | author={Alfarraj, Motaz and AlRegib, Ghassan}, 85 | journal={Interpretation}, 86 | volume={7}, 87 | number={3}, 88 | pages={1--65}, 89 | year={2019}, 90 | publisher={Society of Exploration Geophysicists and American Association of Petroleum~…} 91 | } 92 | 93 | ``` 94 | -------------------------------------------------------------------------------- /core/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn.functional import conv1d 3 | from torch import nn, optim 4 | 5 | class inverse_model(nn.Module): 6 | def __init__(self, in_channels,resolution_ratio=6,nonlinearity="tanh"): 7 | super(inverse_model, self).__init__() 8 | self.in_channels = in_channels 9 | self.resolution_ratio = resolution_ratio #vertical scale mismtach between seismic and EI 10 | self.activation = nn.ReLU() if nonlinearity=="relu" else nn.Tanh() 11 | 12 | self.cnn1 = nn.Sequential(nn.Conv1d(in_channels=self.in_channels, 13 | out_channels=8, 14 | kernel_size=5, 15 | padding=2, 16 | dilation=1), 17 | nn.GroupNorm(num_groups=self.in_channels, 18 | num_channels=8)) 19 | 20 | self.cnn2 = nn.Sequential(nn.Conv1d(in_channels=self.in_channels, 21 | out_channels=8, 22 | kernel_size=5, 23 | padding=6, 24 | dilation=3), 25 | nn.GroupNorm(num_groups=self.in_channels, 26 | num_channels=8)) 27 | 28 | self.cnn3 = nn.Sequential(nn.Conv1d(in_channels=self.in_channels, 29 | out_channels=8, 30 | kernel_size=5, 31 | padding=12, 32 | dilation=6), 33 | nn.GroupNorm(num_groups=self.in_channels, 34 | num_channels=8)) 35 | 36 | self.cnn = nn.Sequential(self.activation, 37 | nn.Conv1d(in_channels=24, 38 | out_channels=16, 39 | kernel_size=3, 40 | padding=1), 41 | nn.GroupNorm(num_groups=self.in_channels, 42 | num_channels=16), 43 | self.activation, 44 | 45 | nn.Conv1d(in_channels=16, 46 | out_channels=16, 47 | kernel_size=3, 48 | padding=1), 49 | nn.GroupNorm(num_groups=self.in_channels, 50 | num_channels=16), 51 | self.activation, 52 | 53 | nn.Conv1d(in_channels=16, 54 | out_channels=16, 55 | kernel_size=1), 56 | nn.GroupNorm(num_groups=self.in_channels, 57 | num_channels=16), 58 | self.activation) 59 | 60 | self.gru = nn.GRU(input_size=self.in_channels, 61 | hidden_size=8, 62 | num_layers=3, 63 | batch_first=True, 64 | bidirectional=True) 65 | 66 | self.up = nn.Sequential(nn.ConvTranspose1d(in_channels=16, 67 | out_channels=8, 68 | stride=3, 69 | kernel_size=5, 70 | padding=1), 71 | nn.GroupNorm(num_groups=self.in_channels, 72 | num_channels=8), 73 | self.activation, 74 | 75 | nn.ConvTranspose1d(in_channels=8, 76 | out_channels=8, 77 | stride=2, 78 | kernel_size=4, 79 | padding=1), 80 | nn.GroupNorm(num_groups=self.in_channels, 81 | num_channels=8), 82 | self.activation) 83 | 84 | self.gru_out = nn.GRU(input_size=8, 85 | hidden_size=8, 86 | num_layers=1, 87 | batch_first=True, 88 | bidirectional=True) 89 | self.out = nn.Linear(in_features=16, out_features=self.in_channels) 90 | 91 | for m in self.modules(): 92 | if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d): 93 | nn.init.xavier_uniform_(m.weight.data) 94 | m.bias.data.zero_() 95 | elif isinstance(m, nn.GroupNorm): 96 | m.weight.data.fill_(1) 97 | m.bias.data.zero_() 98 | elif isinstance(m, nn.Linear): 99 | m.bias.data.zero_() 100 | 101 | 102 | self.optimizer = optim.Adam(self.parameters(), 0.005, weight_decay=1e-4) 103 | 104 | def forward(self, x): 105 | cnn_out1 = self.cnn1(x) 106 | cnn_out2 = self.cnn2(x) 107 | cnn_out3 = self.cnn3(x) 108 | cnn_out = self.cnn(torch.cat((cnn_out1,cnn_out2,cnn_out3),dim=1)) 109 | 110 | tmp_x = x.transpose(-1, -2) 111 | rnn_out, _ = self.gru(tmp_x) 112 | rnn_out = rnn_out.transpose(-1, -2) 113 | 114 | x = rnn_out + cnn_out 115 | x = self.up(x) 116 | 117 | tmp_x = x.transpose(-1, -2) 118 | x, _ = self.gru_out(tmp_x) 119 | 120 | x = self.out(x) 121 | x = x.transpose(-1,-2) 122 | return x 123 | 124 | 125 | class forward_model(nn.Module): 126 | def __init__(self, wavelet, resolution_ratio=6): 127 | super(forward_model, self).__init__() 128 | self.wavelet = wavelet.float() if torch.is_tensor(wavelet) else torch.tensor(wavelet).float() 129 | self.resolution_ratio = resolution_ratio 130 | def cuda(self): 131 | self.wavelet = self.wavelet.cuda() 132 | 133 | 134 | 135 | def forward(self, x): 136 | x_d = x[..., 1:] - x[..., :-1] 137 | x_a = (x[..., 1:] + x[..., :-1]) / 2 138 | 139 | rc = x_d / x_a 140 | for i in range(rc.shape[1]): 141 | tmp_synth = conv1d(rc[:, [i]], self.wavelet, padding=int(self.wavelet.shape[-1] / 2)) 142 | 143 | if i == 0: 144 | synth = tmp_synth 145 | else: 146 | synth = torch.cat((synth, tmp_synth), dim=1) 147 | 148 | synth = synth[...,::self.resolution_ratio] 149 | 150 | return synth 151 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | import torch 4 | from bruges.filters import wavelets 5 | from os.path import isdir 6 | import os 7 | from core.models import inverse_model, forward_model 8 | from torch.utils import data 9 | from core.functions import * 10 | from torch import nn, optim 11 | from datetime import datetime 12 | import matplotlib.pyplot as plt 13 | from tqdm import tqdm 14 | import wget 15 | import hashlib 16 | 17 | 18 | 19 | #Manual seeds for reproducibility 20 | random_seed=30 21 | torch.manual_seed(random_seed) 22 | if torch.cuda.is_available(): 23 | torch.cuda.manual_seed_all(random_seed) 24 | np.random.seed(random_seed) 25 | torch.backends.cudnn.deterministic = True 26 | torch.backends.cudnn.benchmark = False 27 | 28 | 29 | def get_data(args, test=False): 30 | #Loading data 31 | try: 32 | data_dic = np.load("data.npy",allow_pickle=True).item() 33 | 34 | except FileNotFoundError: 35 | print("Data file not found. Downloading the data..") 36 | url= "https://www.dropbox.com/s/66u2hbbrvc15lyp/data.npy?raw=1" 37 | wget.download(url,"./") 38 | 39 | assert hashlib.md5(open("./data.npy", "rb").read()).hexdigest()=="1fc229e7b7042829b8a834e6850ec9e5", "Data file checksum did not match. Redownload the data file" 40 | 41 | data_dic = np.load("data.npy",allow_pickle=True).item() 42 | seismic_data = data_dic["synth_seismic_15db_noise"] 43 | elastic_impedance_data = data_dic["elastic_impedance"] 44 | 45 | assert seismic_data.shape[1]==len(args.incident_angles) ,'Data dimensions are not consistent with incident angles. Got {} incident angles and {} in data dimensions'.format(len(args.incident_angles),seismic_data.shape[1]) 46 | assert seismic_data.shape[1]==elastic_impedance_data.shape[1] ,'Data dimensions are not consistent. Got {} channels for seismic data and {} for elastic elastic impedance dimensions'.format(seismic_data.shape[1],elastic_impedance_data.shape[1]) 47 | 48 | seismic_mean = torch.tensor(np.mean(seismic_data,axis=(0,-1),keepdims=True)).float() 49 | seismic_std = torch.tensor(np.std(seismic_data,axis=(0,-1),keepdims=True)).float() 50 | 51 | elastic_mean= torch.tensor(np.mean(elastic_impedance_data, keepdims=True)).float() 52 | elastic_std = torch.tensor(np.std(elastic_impedance_data,keepdims=True)).float() 53 | 54 | 55 | seismic_data = torch.tensor(seismic_data).float() 56 | elastic_impedance_data = torch.tensor(elastic_impedance_data).float() 57 | 58 | if torch.cuda.is_available(): 59 | seismic_data = seismic_data.cuda() 60 | elastic_impedance_data = elastic_impedance_data.cuda() 61 | seismic_mean = seismic_mean.cuda() 62 | seismic_std = seismic_std.cuda() 63 | elastic_mean = elastic_mean.cuda() 64 | elastic_std = elastic_std.cuda() 65 | 66 | seismic_normalization = Normalization(mean_val=seismic_mean, 67 | std_val=seismic_std) 68 | 69 | elastic_normalization = Normalization(mean_val=elastic_mean, 70 | std_val=elastic_std) 71 | 72 | 73 | seismic_data = seismic_normalization.normalize(seismic_data) 74 | elastic_impedance_data = elastic_normalization.normalize(elastic_impedance_data) 75 | 76 | 77 | 78 | if not test: 79 | num_samples = seismic_data.shape[0] 80 | indecies = np.arange(0,num_samples) 81 | train_indecies = indecies[(np.linspace(0,len(indecies)-1,args.num_train_wells)).astype(int)] 82 | 83 | train_data = data.Subset(data.TensorDataset(seismic_data,elastic_impedance_data), train_indecies) 84 | train_loader = data.DataLoader(train_data, batch_size=args.batch_size, shuffle=False) 85 | 86 | unlabeled_loader = data.DataLoader(data.TensorDataset(seismic_data), batch_size=args.batch_size, shuffle=True) 87 | return train_loader, unlabeled_loader, seismic_normalization, elastic_normalization 88 | else: 89 | test_loader = data.DataLoader(data.TensorDataset(seismic_data,elastic_impedance_data), batch_size=args.batch_size, shuffle=False, drop_last=False) 90 | return test_loader, seismic_normalization, elastic_normalization 91 | 92 | def get_models(args): 93 | 94 | if args.test_checkpoint is None: 95 | inverse_net = inverse_model(in_channels=len(args.incident_angles), nonlinearity=args.nonlinearity) 96 | else: 97 | try: 98 | inverse_net = torch.load(args.test_checkpoint) 99 | except FileNotFoundError: 100 | print("No checkpoint found at '{}'- Please specify the model for testing".format(args.test_checkpoint)) 101 | exit() 102 | 103 | #Set up forward model 104 | # For wavelet info, refer to https://github.com/agile-geoscience/bruges/blob/master/bruges/filters/wavelets.py 105 | # For simpicity, the same wavlet is used for all incident angles 106 | wavelet, wavelet_time = wavelets.ormsby(args.wavelet_duration, args.dt,args.f, return_t=True) 107 | wavelet = torch.tensor(wavelet).unsqueeze(dim=0).unsqueeze(dim=0).float() 108 | forward_net = forward_model(wavelet=wavelet) 109 | 110 | if torch.cuda.is_available(): 111 | inverse_net.cuda() 112 | forward_net.cuda() 113 | 114 | return inverse_net, forward_net 115 | 116 | def train(args): 117 | 118 | #writer = SummaryWriter() 119 | train_loader, unlabeled_loader, seismic_normalization, elastic_normalization = get_data(args) 120 | inverse_net, forward_net = get_models(args) 121 | inverse_net.train() 122 | criterion = nn.MSELoss() 123 | optimizer = inverse_net.optimizer 124 | 125 | #make a direcroty to save models if it doesn't exist 126 | if not isdir("checkpoints"): 127 | os.mkdir("checkpoints") 128 | 129 | print("Training the model") 130 | best_loss = np.inf 131 | for epoch in tqdm(range(args.max_epoch)): 132 | train_loss = [] 133 | train_property_corr = [] 134 | train_property_r2 = [] 135 | for x,y in train_loader: 136 | optimizer.zero_grad() 137 | 138 | y_pred = inverse_net(x) 139 | property_loss = criterion(y_pred,y) 140 | corr, r2 = metrics(y_pred.detach(),y.detach()) 141 | train_property_corr.append(corr) 142 | train_property_r2.append(r2) 143 | 144 | if args.beta!=0: 145 | #loading unlabeled data 146 | try: 147 | x_u = next(unlabeled)[0] 148 | except: 149 | unlabeled = iter(unlabeled_loader) 150 | x_u = next(unlabeled)[0] 151 | 152 | y_u_pred = inverse_net(x_u) 153 | y_u_pred = elastic_normalization.unnormalize(y_u_pred) 154 | x_u_rec = forward_net(y_u_pred) 155 | x_u_rec = seismic_normalization.normalize(x_u_rec) 156 | 157 | seismic_loss = criterion(x_u_rec,x_u) 158 | else: 159 | seismic_loss=0 160 | 161 | loss = args.alpha*property_loss + args.beta*seismic_loss 162 | loss.backward() 163 | optimizer.step() 164 | 165 | train_loss.append(loss.detach().clone()) 166 | 167 | torch.save(inverse_net,"./checkpoints/{}".format(args.session_name)) 168 | 169 | def test(args): 170 | #make a direcroty to save precited sections 171 | if not isdir("output_images"): 172 | os.mkdir("output_images") 173 | 174 | test_loader, seismic_normalization, elastic_normalization = get_data(args, test=True) 175 | if args.test_checkpoint is None: 176 | args.test_checkpoint = "./checkpoints/{}".format(args.session_name) 177 | inverse_net, forward_net = get_models(args) 178 | criterion = nn.MSELoss(reduction="sum") 179 | predicted_impedance = [] 180 | true_impedance = [] 181 | test_property_corr = [] 182 | test_property_r2 = [] 183 | inverse_net.eval() 184 | print("\nTesting the model\n") 185 | 186 | with torch.no_grad(): 187 | test_loss = [] 188 | for x,y in test_loader: 189 | y_pred = inverse_net(x) 190 | property_loss = criterion(y_pred,y)/np.prod(y.shape) 191 | corr, r2 = metrics(y_pred.detach(),y.detach()) 192 | test_property_corr.append(corr) 193 | test_property_r2.append(r2) 194 | 195 | x_rec = forward_net(elastic_normalization.unnormalize(y_pred)) 196 | x_rec = seismic_normalization.normalize(x_rec) 197 | seismic_loss = criterion(x_rec, x)/np.prod(x.shape) 198 | loss = args.alpha*property_loss + args.beta*seismic_loss 199 | test_loss.append(loss.item()) 200 | 201 | true_impedance.append(y) 202 | predicted_impedance.append(y_pred) 203 | 204 | 205 | display_results(test_loss, test_property_corr, test_property_r2, args, header="Test") 206 | 207 | predicted_impedance = torch.cat(predicted_impedance, dim=0) 208 | true_impedance = torch.cat(true_impedance, dim=0) 209 | 210 | predicted_impedance = elastic_normalization.unnormalize(predicted_impedance) 211 | true_impedance = elastic_normalization.unnormalize(true_impedance) 212 | 213 | if torch.cuda.is_available(): 214 | predicted_impedance = predicted_impedance.cpu() 215 | true_impedance = true_impedance.cpu() 216 | 217 | predicted_impedance = predicted_impedance.numpy() 218 | true_impedance = true_impedance.numpy() 219 | 220 | #diplaying estimated section 221 | cols = ['{}'.format(col) for col in ['Predicted EI','True EI', 'Absolute difference']] 222 | rows = [r'$\theta=$ {}$^\circ$'.format(row) for row in args.incident_angles] 223 | fig, axes = plt.subplots(nrows=len(args.incident_angles), ncols=3) 224 | 225 | for i, theta in enumerate(args.incident_angles): 226 | axes[i][0].imshow(predicted_impedance[:,i].T, cmap='rainbow',aspect=0.5, vmin=true_impedance.min(), vmax=true_impedance.max()) 227 | axes[i][0].axis('off') 228 | axes[i][1].imshow(true_impedance[:,i].T, cmap='rainbow',aspect=0.5,vmin=true_impedance.min(), vmax=true_impedance.max()) 229 | axes[i][1].axis('off') 230 | axes[i][2].imshow(abs(true_impedance[:,i].T-predicted_impedance[:,i].T), cmap='gray',aspect=0.5) 231 | axes[i][2].axis('off') 232 | 233 | pad = 10 # in points 234 | for ax, row in zip(axes[:,0], rows): 235 | ax.annotate(row,xy=(0,0.5), xytext=(-pad,0), xycoords='axes fraction', textcoords='offset points', ha='right', va='center') 236 | 237 | 238 | for ax, col in zip(axes[0], cols): 239 | ax.annotate(col, xy=(0.5, 1), xytext=(0, pad), 240 | xycoords='axes fraction', textcoords='offset points', ha='center', va='baseline') 241 | 242 | 243 | 244 | fig.tight_layout() 245 | plt.savefig("./output_images/{}.png".format(args.test_checkpoint.split("/")[-1])) 246 | 247 | plt.show() 248 | 249 | 250 | 251 | if __name__ == '__main__': 252 | ## Arguments and parameters 253 | parser = argparse.ArgumentParser() 254 | parser.add_argument('-num_train_wells', type=int, default=10, help="Number of EI traces from the model to be used for validation") 255 | parser.add_argument('-max_epoch', type=int, default=500, help="maximum number of training epochs") 256 | parser.add_argument('-batch_size', type=int, default=40,help="Batch size for training") 257 | parser.add_argument('-alpha', type=float, default=1, help="weight of property loss term") 258 | parser.add_argument('-beta', type=float, default=1, help="weight of seismic loss term") 259 | parser.add_argument('-test_checkpoint', type=str, action="store", default=None,help="path to model to test on. When this flag is used, no training is performed") 260 | parser.add_argument('-session_name', type=str, action="store", default=datetime.now().strftime('%b%d_%H%M%S'),help="name of the session to be ised in saving the model") 261 | parser.add_argument('-nonlinearity', action="store", type=str, default="tanh",help="Type of nonlinearity for the CNN [tanh, relu]", choices=["tanh","relu"]) 262 | 263 | ## Do not change these values unless you use the code on a different data and edit the code accordingly 264 | parser.add_argument('-dt', type=float, default=1e-3, help='Time resolution in seconds') 265 | parser.add_argument('-wavelet_duration', type=float, default=0.2, help='wavelet duration in seconds') 266 | parser.add_argument('-f', default="5, 10, 60, 80", help="Frequency of wavelet. if multiple frequencies use , to seperate them with no spaces, e.g., -f \"5,10,60,80\"", type=lambda x: np.squeeze(np.array(x.split(",")).astype(float))) 267 | parser.add_argument('-resolution_ratio', type=int, default=6, action="store",help="resolution mismtach between seismic and EI") 268 | parser.add_argument('-incident_angles', type=float, default=np.arange(0, 30+ 1, 10), help="Incident angles of the input seismic and EI") 269 | args = parser.parse_args() 270 | 271 | if args.test_checkpoint is not None: 272 | test(args) 273 | else: 274 | train(args) 275 | test(args) 276 | --------------------------------------------------------------------------------