├── models
├── __init__.py
├── autoencoder.py
├── baseline.py
├── skip.py
├── skip_residual.py
├── dual_map.py
├── dual_mask.py
├── dual_input.py
├── skip_map.py
├── skip_mask.py
├── skip_input.py
├── dual_mask_map.py
├── dual_map_mask.py
├── skip_mask_map.py
├── skip_map_mask.py
└── autoencoders.py
├── .gitignore
├── scalers
└── minmax_scaler_zero_min134.joblib
├── colab_requirements.txt
├── requirements.txt
├── dataset
├── communications.py
├── map_sampler.py
├── insite_map_generator.py
├── map_generator.py
├── gudmundson_map_generator.py
└── generate_dataset.py
├── data_utils.py
├── README.md
├── notebooks
├── Evaluate_Model.ipynb
├── Train_Model.ipynb
├── Visualize_Inputs_Outputs.ipynb
└── Visualize_Results.ipynb
└── test_utils.py
/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pickle
2 | *.cpython-39.pyc
3 | **/.DS_Store
4 | .DS_Store
--------------------------------------------------------------------------------
/scalers/minmax_scaler_zero_min134.joblib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nikitalokhmachev-ai/radio-map-estimation-public/HEAD/scalers/minmax_scaler_zero_min134.joblib
--------------------------------------------------------------------------------
/colab_requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | wandb
4 | joblib
5 | plotly
6 | matplotlib
7 | scipy
8 | scikit-learn
9 | pandas
10 | opencv-python
11 | ipykernel
12 | kaleido
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | wandb==0.14.2
4 | joblib==1.2.0
5 | plotly==5.15.0
6 | matplotlib==3.7.1
7 | scipy==1.11.3
8 | scikit-learn==1.0.2
9 | pandas==1.5.3
10 | opencv-python==4.8.0.76
11 | ipykernel==5.5.6
12 | kaleido
--------------------------------------------------------------------------------
/dataset/communications.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Yves Teganya and Daniel Romero
2 | #
3 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
4 | # arXiv preprint arXiv:2005. 05964, 2020.
5 | #
6 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
7 |
8 | import numpy as np
9 |
10 |
11 | def dbm_to_natural(x_variable_dbm):
12 | return db_to_natural(dbm_to_db(x_variable_dbm))
13 |
14 |
15 | def natural_to_dbm(x_variable_nat):
16 | return db_to_dbm(natural_to_db(x_variable_nat))
17 |
18 |
19 | def db_to_natural(x_variable_db):
20 | """
21 | Arguments:
22 | x_variable_db: must be an np array
23 | """
24 | return 10 ** (x_variable_db / 10)
25 |
26 |
27 | def natural_to_db(x_variable_nat):
28 | """
29 | Arguments:
30 | x_variable_nat: must be an np array
31 | """
32 | return 10 * np.log10(x_variable_nat)
33 |
34 |
35 | def db_to_dbm(x_variable_db):
36 | """
37 | Arguments:
38 | x_variable_db: must be an np array
39 | """
40 | return x_variable_db + 30
41 |
42 |
43 | def dbm_to_db(x_variable_dbm):
44 | """
45 | Arguments:
46 | x_variable_dbm: must be an np array
47 | """
48 | return x_variable_dbm - 30
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/models/autoencoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
6 |
7 | class Autoencoder(torch.nn.Module):
8 | def __init__(self):
9 | super().__init__()
10 |
11 | self.encoder = None#Encoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
12 | self.decoder = None#Decoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
13 |
14 | def forward(self, x):
15 | x = self.encoder(x)
16 | x = self.decoder(x)
17 | return x
18 |
19 | def fit(self, train_dl, optimizer, epochs=100, loss='mse'):
20 | for epoch in range(epochs):
21 | running_loss = 0.0
22 | for i, data in enumerate(train_dl):
23 | optimizer.zero_grad()
24 | t_x_point, t_y_point, t_y_mask, t_channel_pow, file_path, j = data
25 | t_x_point, t_y_point, t_y_mask = t_x_point.to(torch.float32).to(device), t_y_point.flatten(1).to(device), t_y_mask.flatten(1).to(device)
26 | t_channel_pow = t_channel_pow.flatten(1).to(device)
27 | t_y_point_pred = self.forward(t_x_point).to(torch.float64)
28 | loss_ = torch.nn.functional.mse_loss(t_y_point * t_y_mask, t_y_point_pred * t_y_mask).to(torch.float32)
29 | if loss == 'rmse':
30 | loss_ = torch.sqrt(loss_)
31 | loss_.backward()
32 | optimizer.step()
33 |
34 | running_loss += loss_.item()
35 | print(f'{loss_}, [{epoch + 1}, {i + 1:5d}] loss: {running_loss/(i+1)}')
36 |
37 | return running_loss / (i+1)
38 |
39 |
40 | def evaluate(self, test_dl, scaler):
41 | losses = []
42 | with torch.no_grad():
43 | for i, data in enumerate(test_dl):
44 | t_x_point, t_y_point, t_y_mask, t_channel_pow, file_path, j = data
45 | t_x_point, t_y_point, t_y_mask = t_x_point.to(torch.float32).to(device), t_y_point.flatten(1).to(device), t_y_mask.flatten(1).to(device)
46 | t_channel_pow = t_channel_pow.flatten(1).to(device).detach().cpu().numpy()
47 | t_y_point_pred = self.forward(t_x_point).detach().cpu().numpy()
48 | building_mask = (t_x_point[:,1,:,:].flatten(1) == -1).to(torch.float64).detach().cpu().numpy()
49 | loss = (np.linalg.norm((1 - building_mask) * (scaler.reverse_transform(t_channel_pow) - scaler.reverse_transform(t_y_point_pred)), axis=1) ** 2 / np.sum(building_mask == 0, axis=1)).tolist()
50 | losses += loss
51 |
52 | print(f'{np.sqrt(np.mean(loss))}')
53 |
54 | return torch.sqrt(torch.Tensor(losses).mean())
55 |
56 | def fit_wandb(self, train_dl, test_dl, scaler, optimizer, project_name, run_name, epochs=100, loss='mse'):
57 | import wandb
58 | wandb.init(project=project_name, name=run_name)
59 | for epoch in range(epochs):
60 | train_loss = self.fit(train_dl, optimizer, epochs=1, loss=loss)
61 | test_loss = self.evaluate(test_dl, scaler)
62 | wandb.log({'train_loss': train_loss, 'test_loss': test_loss})
63 |
64 | def save_model(self, out_path):
65 | torch.save(self, out_path)
--------------------------------------------------------------------------------
/data_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from sklearn.preprocessing import StandardScaler, MinMaxScaler
4 |
5 | class MapDataset(torch.utils.data.IterableDataset):
6 | def __init__(self, pickles, scaler=None, building_value=None, unsampled_value=None, sampled_value=None):
7 | super().__init__()
8 | self.pickles = pickles
9 | self.scaler = scaler
10 | self.building_value = building_value
11 | self.unsampled_value = unsampled_value
12 | self.sampled_value = sampled_value
13 |
14 | def __iter__(self):
15 | yield from file_path_generator(self.pickles, self.scaler, self.building_value, self.unsampled_value, self.sampled_value)
16 |
17 | def file_path_generator(pickles, scaler, building_value=None, unsampled_value=None, sampled_value=None):
18 | for file_path in pickles:
19 | t_x_points, t_y_points, t_y_masks, t_channel_pows = load_numpy_array(
20 | file_path, scaler, building_value=building_value, unsampled_value=unsampled_value, sampled_value=sampled_value)
21 | for i, (t_x_point, t_y_point, t_y_mask, t_channel_pow) in enumerate(zip(t_x_points, t_y_points, t_y_masks, t_channel_pows)):
22 | yield t_x_point, t_y_point, t_y_mask, t_channel_pow, file_path, i
23 |
24 |
25 | def load_numpy_array(file_path, scaler, building_value=None, unsampled_value=None, sampled_value=None):
26 | t_x_points, t_channel_pows, t_y_masks = np.load(file_path, allow_pickle=True)
27 | t_y_points = t_channel_pows * t_y_masks
28 |
29 | if scaler:
30 | t_x_mask = t_x_points[:,1,:,:] == 1
31 | t_x_points[:,0,:,:] = scaler.transform(t_x_points[:,0,:,:]) * t_x_mask
32 | t_channel_pows = scaler.transform(t_channel_pows)
33 | t_y_points = scaler.transform(t_y_points)
34 |
35 | if building_value:
36 | t_x_points[:,0][t_x_points[:,1] == -1] = building_value
37 |
38 | if unsampled_value:
39 | t_x_points[:,0][t_x_points[:,1] == 0] = unsampled_value
40 |
41 | if sampled_value:
42 | t_x_points[:,0][t_x_points[:,1] == 1] += sampled_value
43 |
44 | return t_x_points, t_y_points, t_y_masks, t_channel_pows
45 |
46 |
47 | class Scaler():
48 | def __init__(self, scaler='minmax', bounds=(0, 1), min_trunc=None, max_trunc=None):
49 | self.scaler = scaler
50 | self.bounds = bounds
51 | self.min_trunc = min_trunc
52 | self.max_trunc = max_trunc
53 | if scaler == 'minmax':
54 | self.sc = MinMaxScaler(feature_range=self.bounds)
55 | else:
56 | self.sc = StandardScaler()
57 |
58 | def fit(self, data):
59 | data = data.flatten().reshape(-1,1)
60 | self.sc.partial_fit(data)
61 | if self.min_trunc:
62 | if self.sc.data_min_ < self.min_trunc:
63 | self.sc.data_min_ = self.min_trunc
64 | if self.max_trunc:
65 | if self.sc.data_max_ > self.max_trunc:
66 | self.sc.data_max_ = self.max_trunc
67 |
68 | def transform(self, data):
69 | data_shape = data.shape
70 | data = data.flatten().reshape(-1,1)
71 | if self.min_trunc:
72 | data[data < self.min_trunc] = self.min_trunc
73 | if self.max_trunc:
74 | data[data > self.max_trunc] = self.max_trunc
75 | data = self.sc.transform(data)
76 | data = data.reshape(data_shape)
77 | return data
78 |
79 | def reverse_transform(self, data):
80 | data_shape = data.shape
81 | data = data.flatten().reshape(-1,1)
82 | data = self.sc.inverse_transform(data)
83 | data = data.reshape(data_shape)
84 | return data
85 |
86 | def train_scaler(scaler, pickles):
87 | gen = file_path_generator(pickles, scaler=None)
88 | for t_x_point, t_y_point, t_y_mask, t_channel_pow, file_path, i in gen:
89 | scaler.fit(t_channel_pow)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Radio Map Estimation
2 |
3 | This repository is the official implementation of all the models mentioned in the paper [Radio Map Estimation with Deep Dual Path Autoencoders and Skip Connection Learning](https://ieeexplore.ieee.org/document/10293748), published and presented at the 2023 IEEE PIMRC Conference.
4 |
5 | ## Project File Structure
6 |
7 | - data_utils.py contains all the data-related utilities. Specifically, it defines the pytorch implementation of the radiomap dataset and the data scaler used in the paper.
8 | - models contains all the models mentioned in the paper. The autoencoder.py file contains an abstract autoencoder class implementation. The classes in autoencoders.py inherit the hyperparameters and methods of the autoencoder class to define all the architectures of the paper. The rest of the files correspond to specific autoencoder arhitectures and contain implementations of encoders and decoders for each of them.
9 | - scalers has all the pretrained scalers used in the paper. They are needed to reproduce the experiments.
10 | - notebooks contains all the notebooks required to reproduce the results of the paper.
11 | - dataset contains all the files needed to generate a new dataset.
12 |
13 | ## Reproducibility
14 |
15 | In order to reproduce the paper results, follow the following procedure:
16 |
17 | 1. Clone the current repository.
18 |
19 | 2. Download required dependencies. Our original code ran on Python 3.9 or 3.10 and with the libraries specified in requirements.txt. If you have Python 3.9 or 3.10, you can create a virtual environment, e.g. using [venv](https://docs.python.org/3/library/venv.html), and install the required dependencies as shown below:
20 | ```
21 | python -m venv radio_map_estimation
22 | source radio_map_estimation/bin/activate
23 | pip install -r repository/requirements.txt
24 | ```
25 | Where `radio_map_estimation` and `repository` stand in for the full paths to where you want to save the new virtual environment and where you saved the cloned repository respectively.
26 |
27 | If you are using a later version of Python or running the notebooks on Google Colab, you may need to use updated versions of these dependencies. For this, you can use the `colab_requirements.txt` to install the latest version of each library, or edit `requirements.txt` to install a specific version of a given library.
28 |
29 | 3. Download the [Train](https://drive.google.com/file/d/1-z1gWOLLjD9O0K0whbCA7DsUJt64x6iq/view?usp=sharing), [Validation](https://drive.google.com/file/d/1-ONtHgLgNkI-kPAkdsta0DVkPfjS73js/view?usp=sharing), and [Test](https://drive.google.com/file/d/1KjCLM6DFGDwiIk_DIr005NsEeTbgRoXn/view?usp=sharing) datasets. The downloaded files are tarred and zipped and take about 2 GB, 208 MB, and 215 MB respectively. Their unzipped contents are about 7.36 GB, 819 MB, and 819 MB respectively.
30 |
31 | 3. To train a model, run Train_Model.ipynb in notebooks and follow instructions there.
32 |
33 | 3. After training, run Evaluate_Model.ipynb in notebooks. Before execution, specify the path to the model.
34 |
35 | 4. To visualize the results, run Visualize_Results.ipynb in notebooks.
36 |
37 | 5. Optionally, to visualize model inputs and outputs, run Visualize_Inputs_Outputs.ipynb in notebooks
38 |
39 | The paths in every notebook have to be changed according to your file structure.
40 |
41 | ## References
42 |
43 | Code used to generate the dataset, as well as the the underlying data itself, were copied and adapted from "Deep Completion Autoencoders for Radio Map Estimation" by Yves Teganya and Daniel Romero. Copied python files feature the following copyright / citation at the top of the file:
44 |
45 | ```
46 | # Copyright (c) Yves Teganya and Daniel Romero
47 | #
48 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
49 | # arXiv preprint arXiv:2005. 05964, 2020.
50 | #
51 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
52 | ```
53 |
54 | ## Reproducibility Disclaimer
55 |
56 | Due to the stochastic nature of the data generation process as well as the difference in GPU architectures, your results might vary slightly.
57 |
--------------------------------------------------------------------------------
/models/baseline.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x):
33 | x = self.leaky_relu(self.conv2d(x))
34 | x = self.leaky_relu(self.conv2d_1(x))
35 | x = self.leaky_relu(self.conv2d_2(x))
36 | x = self.average_pooling2d(x)
37 | x = self.leaky_relu(self.conv2d_3(x))
38 | x = self.leaky_relu(self.conv2d_4(x))
39 | x = self.leaky_relu(self.conv2d_5(x))
40 | x = self.average_pooling2d_1(x)
41 | x = self.leaky_relu(self.conv2d_6(x))
42 | x = self.leaky_relu(self.conv2d_7(x))
43 | x = self.leaky_relu(self.conv2d_8(x))
44 | x = self.average_pooling2d_2(x)
45 | x = self.leaky_relu(self.mu(x))
46 | return x
47 |
48 |
49 | class Decoder(nn.Module):
50 | def db_to_natural(self, x):
51 | return 10 ** (x / 10)
52 |
53 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
54 | super(Decoder, self).__init__()
55 |
56 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
57 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in, n_dim, kernel_size=(3,3), stride=1, padding=1)
58 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
59 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
60 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
61 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
62 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
63 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
64 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
65 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim, dec_out, kernel_size=(3,3), stride=1, padding=1)
66 |
67 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
68 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
69 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
70 |
71 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
72 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
73 | self.log_10 = torch.log(torch.tensor([10])).to(device)
74 |
75 | self._init_weights()
76 |
77 | def _init_weights(self):
78 | for m in self.modules():
79 | if isinstance(m, nn.ConvTranspose2d):
80 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
81 |
82 | def forward(self, x):
83 | x = self.leaky_relu(self.conv2d_transpose(x))
84 | x = self.up_sampling2d(x)
85 | x = self.leaky_relu(self.conv2d_transpose_1(x))
86 | x = self.leaky_relu(self.conv2d_transpose_2(x))
87 | x = self.leaky_relu(self.conv2d_transpose_3(x))
88 | x = self.up_sampling2d_1(x)
89 | x = self.leaky_relu(self.conv2d_transpose_4(x))
90 | x = self.leaky_relu(self.conv2d_transpose_5(x))
91 | x = self.leaky_relu(self.conv2d_transpose_6(x))
92 | x = self.up_sampling2d_2(x)
93 | x = self.leaky_relu(self.conv2d_transpose_7(x))
94 | x = self.leaky_relu(self.conv2d_transpose_8(x))
95 | x = self.leaky_relu(self.conv2d_transpose_9(x))
96 | x = torch.flatten(x, start_dim=1)
97 | return x
--------------------------------------------------------------------------------
/models/skip.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
12 | self.conv2d_3 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
13 | self.conv2d_4 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_5 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_6 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
16 | self.conv2d_7 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
17 | self.conv2d_8 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
18 | self.mu = nn.Conv2d(n_dim, enc_out, kernel_size=(3, 3), padding='same')
19 |
20 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
22 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
23 |
24 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
25 |
26 | self._init_weights()
27 |
28 | def _init_weights(self):
29 | for m in self.modules():
30 | if isinstance(m, nn.Conv2d):
31 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
32 |
33 | def forward(self, x):
34 | x = self.leaky_relu(self.conv2d(x))
35 | x = self.leaky_relu(self.conv2d_1(x))
36 | x = self.leaky_relu(self.conv2d_2(x))
37 | skip1 = x
38 | x = self.average_pooling2d(x)
39 | x = self.leaky_relu(self.conv2d_3(x))
40 | x = self.leaky_relu(self.conv2d_4(x))
41 | x = self.leaky_relu(self.conv2d_5(x))
42 | skip2 = x
43 | x = self.average_pooling2d_1(x)
44 | x = self.leaky_relu(self.conv2d_6(x))
45 | x = self.leaky_relu(self.conv2d_7(x))
46 | x = self.leaky_relu(self.conv2d_8(x))
47 | skip3 = x
48 | x = self.average_pooling2d_2(x)
49 | x = self.leaky_relu(self.mu(x))
50 | return x, skip1, skip2, skip3
51 |
52 |
53 | class Decoder(nn.Module):
54 | def db_to_natural(self, x):
55 | return 10 ** (x / 10)
56 |
57 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
58 | super(Decoder, self).__init__()
59 |
60 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
61 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
62 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
63 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
64 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
65 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
66 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
67 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
68 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
69 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim, dec_out, kernel_size=(3,3), stride=1, padding=1)
70 |
71 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
72 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
73 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
74 |
75 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
76 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
77 | self.log_10 = torch.log(torch.tensor([10])).to(device)
78 |
79 | self._init_weights()
80 |
81 | def _init_weights(self):
82 | for m in self.modules():
83 | if isinstance(m, nn.ConvTranspose2d):
84 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
85 |
86 | def forward(self, x, skip1, skip2, skip3):
87 | x = self.leaky_relu(self.conv2d_transpose(x))
88 | x = self.up_sampling2d(x)
89 | x = torch.cat((x, skip3), dim=1)
90 | x = self.leaky_relu(self.conv2d_transpose_1(x))
91 | x = self.leaky_relu(self.conv2d_transpose_2(x))
92 | x = self.leaky_relu(self.conv2d_transpose_3(x))
93 | x = self.up_sampling2d_1(x)
94 | x = torch.cat((x, skip2), dim=1)
95 | x = self.leaky_relu(self.conv2d_transpose_4(x))
96 | x = self.leaky_relu(self.conv2d_transpose_5(x))
97 | x = self.leaky_relu(self.conv2d_transpose_6(x))
98 | x = self.up_sampling2d_2(x)
99 | x = torch.cat((x, skip1), dim=1)
100 | x = self.leaky_relu(self.conv2d_transpose_7(x))
101 | x = self.leaky_relu(self.conv2d_transpose_8(x))
102 | x = self.leaky_relu(self.conv2d_transpose_9(x))
103 | x = torch.flatten(x, start_dim=1)
104 | return x
--------------------------------------------------------------------------------
/models/skip_residual.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(2 * n_dim, n_dim, kernel_size=(3, 3), padding='same')
12 | self.conv2d_3 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
13 | self.conv2d_4 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_5 = nn.Conv2d(2 * n_dim, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_6 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
16 | self.conv2d_7 = nn.Conv2d(n_dim, n_dim, kernel_size=(3, 3), padding='same')
17 | self.conv2d_8 = nn.Conv2d(2 * n_dim, n_dim, kernel_size=(3, 3), padding='same')
18 | self.mu = nn.Conv2d(n_dim, enc_out, kernel_size=(3, 3), padding='same')
19 |
20 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
22 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
23 |
24 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
25 |
26 | self._init_weights()
27 |
28 | def _init_weights(self):
29 | for m in self.modules():
30 | if isinstance(m, nn.Conv2d):
31 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
32 |
33 | def forward(self, x):
34 | x = self.leaky_relu(self.conv2d(x))
35 | res_skip = x
36 | x = self.leaky_relu(self.conv2d_1(x))
37 | x = self.leaky_relu(self.conv2d_2(torch.cat([x, res_skip], 1)))
38 | u_skip1 = x
39 | x = self.average_pooling2d(x)
40 | res_skip = x
41 | x = self.leaky_relu(self.conv2d_3(x))
42 | x = self.leaky_relu(self.conv2d_4(x))
43 | x = self.leaky_relu(self.conv2d_5(torch.cat([x, res_skip], 1)))
44 | u_skip2 = x
45 | x = self.average_pooling2d_1(x)
46 | res_skip = x
47 | x = self.leaky_relu(self.conv2d_6(x))
48 | x = self.leaky_relu(self.conv2d_7(x))
49 | x = self.leaky_relu(self.conv2d_8(torch.cat([x, res_skip], 1)))
50 | u_skip3 = x
51 | x = self.average_pooling2d_2(x)
52 | x = self.leaky_relu(self.mu(x))
53 | return x, u_skip1, u_skip2, u_skip3
54 |
55 |
56 | class Decoder(nn.Module):
57 | def db_to_natural(self, x):
58 | return 10 ** (x / 10)
59 |
60 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
61 | super(Decoder, self).__init__()
62 |
63 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
64 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
65 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
66 | self.conv2d_transpose_3 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
67 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
68 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
69 | self.conv2d_transpose_6 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
70 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
71 | self.conv2d_transpose_8 = nn.ConvTranspose2d(2 * n_dim, n_dim, kernel_size=(3,3), stride=1, padding=1)
72 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim, dec_out, kernel_size=(3,3), stride=1, padding=1)
73 |
74 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
75 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
76 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
77 |
78 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
79 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
80 | self.log_10 = torch.log(torch.tensor([10])).to(device)
81 |
82 | self._init_weights()
83 |
84 | def _init_weights(self):
85 | for m in self.modules():
86 | if isinstance(m, nn.ConvTranspose2d):
87 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
88 |
89 | def forward(self, x, u_skip1, u_skip2, u_skip3):
90 | x = self.leaky_relu(self.conv2d_transpose(x))
91 | x = self.up_sampling2d(x)
92 | x = torch.cat((x, u_skip3), dim=1)
93 | x = self.leaky_relu(self.conv2d_transpose_1(x))
94 | res_skip = x
95 | x = self.leaky_relu(self.conv2d_transpose_2(x))
96 | x = self.leaky_relu(self.conv2d_transpose_3(torch.cat([x, res_skip], 1)))
97 | x = self.up_sampling2d_1(x)
98 | res_skip = x
99 | x = torch.cat((x, u_skip2), dim=1)
100 | x = self.leaky_relu(self.conv2d_transpose_4(x))
101 | x = self.leaky_relu(self.conv2d_transpose_5(x))
102 | x = self.leaky_relu(self.conv2d_transpose_6(torch.cat([x, res_skip], 1)))
103 | x = self.up_sampling2d_2(x)
104 | res_skip = x
105 | x = torch.cat((x, u_skip1), dim=1)
106 | x = self.leaky_relu(self.conv2d_transpose_7(x))
107 | x = self.leaky_relu(self.conv2d_transpose_8(torch.cat([x, res_skip], 1)))
108 | x = self.leaky_relu(self.conv2d_transpose_9(x))
109 | x = torch.flatten(x, start_dim=1)
110 | return x
--------------------------------------------------------------------------------
/models/dual_map.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, map_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, map_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | x = self.average_pooling2d(x)
41 | map1 = map_
42 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
43 |
44 | x = torch.cat([x, map_], 1)
45 | x = self.leaky_relu(self.conv2d_3(x))
46 | x = torch.cat([x, map_], 1)
47 | x = self.leaky_relu(self.conv2d_4(x))
48 | x = torch.cat([x, map_], 1)
49 | x = self.leaky_relu(self.conv2d_5(x))
50 | x = self.average_pooling2d_1(x)
51 | map2 = map_
52 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
53 |
54 | x = torch.cat([x, map_], 1)
55 | x = self.leaky_relu(self.conv2d_6(x))
56 | x = torch.cat([x, map_], 1)
57 | x = self.leaky_relu(self.conv2d_7(x))
58 | x = torch.cat([x, map_], 1)
59 | x = self.leaky_relu(self.conv2d_8(x))
60 | x = self.average_pooling2d_2(x)
61 | map3 = map_
62 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
63 |
64 | x = torch.cat([x, map_], 1)
65 | x = self.leaky_relu(self.mu(x))
66 | return x, map1, map2, map3
67 |
68 |
69 | class Decoder(nn.Module):
70 | def db_to_natural(self, x):
71 | return 10 ** (x / 10)
72 |
73 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
74 | super(Decoder, self).__init__()
75 |
76 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
77 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
78 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
79 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
80 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
81 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
82 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
86 |
87 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
88 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
89 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
90 |
91 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
92 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
93 | self.log_10 = torch.log(torch.tensor([10])).to(device)
94 |
95 | self._init_weights()
96 |
97 | def _init_weights(self):
98 | for m in self.modules():
99 | if isinstance(m, nn.ConvTranspose2d):
100 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
101 |
102 | def forward(self, x, map1, map2, map3):
103 | x = self.leaky_relu(self.conv2d_transpose(x))
104 | x = self.up_sampling2d(x)
105 | x = torch.cat((x, map3), dim=1)
106 | x = self.leaky_relu(self.conv2d_transpose_1(x))
107 | x = torch.cat((x, map3), dim=1)
108 | x = self.leaky_relu(self.conv2d_transpose_2(x))
109 | x = torch.cat((x, map3), dim=1)
110 | x = self.leaky_relu(self.conv2d_transpose_3(x))
111 | x = self.up_sampling2d_1(x)
112 | x = torch.cat((x, map2), dim=1)
113 | x = self.leaky_relu(self.conv2d_transpose_4(x))
114 | x = torch.cat((x, map2), dim=1)
115 | x = self.leaky_relu(self.conv2d_transpose_5(x))
116 | x = torch.cat((x, map2), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_6(x))
118 | x = self.up_sampling2d_2(x)
119 | x = torch.cat((x, map1), dim=1)
120 | x = self.leaky_relu(self.conv2d_transpose_7(x))
121 | x = torch.cat((x, map1), dim=1)
122 | x = self.leaky_relu(self.conv2d_transpose_8(x))
123 | x = torch.cat((x, map1), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_9(x))
125 | x = torch.flatten(x, start_dim=1)
126 | return x
--------------------------------------------------------------------------------
/models/dual_mask.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | mask_ = x_[:,1].unsqueeze(1)
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, mask_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, mask_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | x = self.average_pooling2d(x)
41 | mask1 = mask_
42 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
43 |
44 | x = torch.cat([x, mask_], 1)
45 | x = self.leaky_relu(self.conv2d_3(x))
46 | x = torch.cat([x, mask_], 1)
47 | x = self.leaky_relu(self.conv2d_4(x))
48 | x = torch.cat([x, mask_], 1)
49 | x = self.leaky_relu(self.conv2d_5(x))
50 | x = self.average_pooling2d_1(x)
51 | mask2 = mask_
52 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
53 |
54 | x = torch.cat([x, mask_], 1)
55 | x = self.leaky_relu(self.conv2d_6(x))
56 | x = torch.cat([x, mask_], 1)
57 | x = self.leaky_relu(self.conv2d_7(x))
58 | x = torch.cat([x, mask_], 1)
59 | x = self.leaky_relu(self.conv2d_8(x))
60 | x = self.average_pooling2d_2(x)
61 | mask3 = mask_
62 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
63 |
64 | x = torch.cat([x, mask_], 1)
65 | x = self.leaky_relu(self.mu(x))
66 | return x, mask1, mask2, mask3
67 |
68 |
69 | class Decoder(nn.Module):
70 | def db_to_natural(self, x):
71 | return 10 ** (x / 10)
72 |
73 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
74 | super(Decoder, self).__init__()
75 |
76 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
77 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
78 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
79 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
80 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
81 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
82 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
86 |
87 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
88 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
89 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
90 |
91 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
92 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
93 | self.log_10 = torch.log(torch.tensor([10])).to(device)
94 |
95 | self._init_weights()
96 |
97 | def _init_weights(self):
98 | for m in self.modules():
99 | if isinstance(m, nn.ConvTranspose2d):
100 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
101 |
102 | def forward(self, x, mask1, mask2, mask3):
103 | x = self.leaky_relu(self.conv2d_transpose(x))
104 | x = self.up_sampling2d(x)
105 | x = torch.cat((x, mask3), dim=1)
106 | x = self.leaky_relu(self.conv2d_transpose_1(x))
107 | x = torch.cat((x, mask3), dim=1)
108 | x = self.leaky_relu(self.conv2d_transpose_2(x))
109 | x = torch.cat((x, mask3), dim=1)
110 | x = self.leaky_relu(self.conv2d_transpose_3(x))
111 | x = self.up_sampling2d_1(x)
112 | x = torch.cat((x, mask2), dim=1)
113 | x = self.leaky_relu(self.conv2d_transpose_4(x))
114 | x = torch.cat((x, mask2), dim=1)
115 | x = self.leaky_relu(self.conv2d_transpose_5(x))
116 | x = torch.cat((x, mask2), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_6(x))
118 | x = self.up_sampling2d_2(x)
119 | x = torch.cat((x, mask1), dim=1)
120 | x = self.leaky_relu(self.conv2d_transpose_7(x))
121 | x = torch.cat((x, mask1), dim=1)
122 | x = self.leaky_relu(self.conv2d_transpose_8(x))
123 | x = torch.cat((x, mask1), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_9(x))
125 | x = torch.flatten(x, start_dim=1)
126 | return x
--------------------------------------------------------------------------------
/models/dual_input.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+2, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | input_ = x_
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, input_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, input_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | x = self.average_pooling2d(x)
41 | input1 = input_
42 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
43 |
44 | x = torch.cat([x, input_], 1)
45 | x = self.leaky_relu(self.conv2d_3(x))
46 | x = torch.cat([x, input_], 1)
47 | x = self.leaky_relu(self.conv2d_4(x))
48 | x = torch.cat([x, input_], 1)
49 | x = self.leaky_relu(self.conv2d_5(x))
50 | x = self.average_pooling2d_1(x)
51 | input2 = input_
52 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
53 |
54 | x = torch.cat([x, input_], 1)
55 | x = self.leaky_relu(self.conv2d_6(x))
56 | x = torch.cat([x, input_], 1)
57 | x = self.leaky_relu(self.conv2d_7(x))
58 | x = torch.cat([x, input_], 1)
59 | x = self.leaky_relu(self.conv2d_8(x))
60 | x = self.average_pooling2d_2(x)
61 | input3 = input_
62 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
63 |
64 | x = torch.cat([x, input_], 1)
65 | x = self.leaky_relu(self.mu(x))
66 | return x, input1, input2, input3
67 |
68 |
69 | class Decoder(nn.Module):
70 | def db_to_natural(self, x):
71 | return 10 ** (x / 10)
72 |
73 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
74 | super(Decoder, self).__init__()
75 |
76 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
77 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
78 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
79 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
80 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
81 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
82 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+2, dec_out, kernel_size=(3,3), stride=1, padding=1)
86 |
87 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
88 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
89 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
90 |
91 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
92 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
93 | self.log_10 = torch.log(torch.tensor([10])).to(device)
94 |
95 | self._init_weights()
96 |
97 | def _init_weights(self):
98 | for m in self.modules():
99 | if isinstance(m, nn.ConvTranspose2d):
100 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
101 |
102 | def forward(self, x, input1, input2, input3):
103 | x = self.leaky_relu(self.conv2d_transpose(x))
104 | x = self.up_sampling2d(x)
105 | x = torch.cat((x, input3), dim=1)
106 | x = self.leaky_relu(self.conv2d_transpose_1(x))
107 | x = torch.cat((x, input3), dim=1)
108 | x = self.leaky_relu(self.conv2d_transpose_2(x))
109 | x = torch.cat((x, input3), dim=1)
110 | x = self.leaky_relu(self.conv2d_transpose_3(x))
111 | x = self.up_sampling2d_1(x)
112 | x = torch.cat((x, input2), dim=1)
113 | x = self.leaky_relu(self.conv2d_transpose_4(x))
114 | x = torch.cat((x, input2), dim=1)
115 | x = self.leaky_relu(self.conv2d_transpose_5(x))
116 | x = torch.cat((x, input2), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_6(x))
118 | x = self.up_sampling2d_2(x)
119 | x = torch.cat((x, input1), dim=1)
120 | x = self.leaky_relu(self.conv2d_transpose_7(x))
121 | x = torch.cat((x, input1), dim=1)
122 | x = self.leaky_relu(self.conv2d_transpose_8(x))
123 | x = torch.cat((x, input1), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_9(x))
125 | x = torch.flatten(x, start_dim=1)
126 | return x
--------------------------------------------------------------------------------
/models/skip_map.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, map_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, map_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | skip1 = x
41 | map1 = map_
42 |
43 | x = self.average_pooling2d(x)
44 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
45 |
46 | x = torch.cat([x, map_], 1)
47 | x = self.leaky_relu(self.conv2d_3(x))
48 | x = torch.cat([x, map_], 1)
49 | x = self.leaky_relu(self.conv2d_4(x))
50 | x = torch.cat([x, map_], 1)
51 | x = self.leaky_relu(self.conv2d_5(x))
52 | skip2 = x
53 | map2 = map_
54 |
55 | x = self.average_pooling2d_1(x)
56 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
57 |
58 | x = torch.cat([x, map_], 1)
59 | x = self.leaky_relu(self.conv2d_6(x))
60 | x = torch.cat([x, map_], 1)
61 | x = self.leaky_relu(self.conv2d_7(x))
62 | x = torch.cat([x, map_], 1)
63 | x = self.leaky_relu(self.conv2d_8(x))
64 | skip3 = x
65 | map3 = map_
66 |
67 | x = self.average_pooling2d_2(x)
68 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
69 |
70 | x = torch.cat([x, map_], 1)
71 | x = self.leaky_relu(self.mu(x))
72 | return x, skip1, skip2, skip3, map1, map2, map3
73 |
74 |
75 | class Decoder(nn.Module):
76 | def db_to_natural(self, x):
77 | return 10 ** (x / 10)
78 |
79 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
80 | super(Decoder, self).__init__()
81 |
82 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
90 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
91 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
92 |
93 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
94 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
95 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
96 |
97 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
98 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
99 | self.log_10 = torch.log(torch.tensor([10])).to(device)
100 |
101 | self._init_weights()
102 |
103 | def _init_weights(self):
104 | for m in self.modules():
105 | if isinstance(m, nn.ConvTranspose2d):
106 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
107 |
108 | def forward(self, x, skip1, skip2, skip3, map1, map2, map3):
109 | x = self.leaky_relu(self.conv2d_transpose(x))
110 | x = self.up_sampling2d(x)
111 | x = torch.cat((x, skip3, map3), dim=1)
112 | x = self.leaky_relu(self.conv2d_transpose_1(x))
113 | x = torch.cat((x, map3), dim=1)
114 | x = self.leaky_relu(self.conv2d_transpose_2(x))
115 | x = torch.cat((x, map3), dim=1)
116 | x = self.leaky_relu(self.conv2d_transpose_3(x))
117 | x = self.up_sampling2d_1(x)
118 | x = torch.cat((x, skip2, map2), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_4(x))
120 | x = torch.cat((x, map2), dim=1)
121 | x = self.leaky_relu(self.conv2d_transpose_5(x))
122 | x = torch.cat((x, map2), dim=1)
123 | x = self.leaky_relu(self.conv2d_transpose_6(x))
124 | x = self.up_sampling2d_2(x)
125 | x = torch.cat((x, skip1, map1), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_7(x))
127 | x = torch.cat((x, map1), dim=1)
128 | x = self.leaky_relu(self.conv2d_transpose_8(x))
129 | x = torch.cat((x, map1), dim=1)
130 | x = self.leaky_relu(self.conv2d_transpose_9(x))
131 | x = torch.flatten(x, start_dim=1)
132 | return x
--------------------------------------------------------------------------------
/models/skip_mask.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | mask_ = x_[:,1].unsqueeze(1)
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, mask_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, mask_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | skip1 = x
41 | mask1 = mask_
42 |
43 | x = self.average_pooling2d(x)
44 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
45 |
46 | x = torch.cat([x, mask_], 1)
47 | x = self.leaky_relu(self.conv2d_3(x))
48 | x = torch.cat([x, mask_], 1)
49 | x = self.leaky_relu(self.conv2d_4(x))
50 | x = torch.cat([x, mask_], 1)
51 | x = self.leaky_relu(self.conv2d_5(x))
52 | skip2 = x
53 | mask2 = mask_
54 |
55 | x = self.average_pooling2d_1(x)
56 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
57 |
58 | x = torch.cat([x, mask_], 1)
59 | x = self.leaky_relu(self.conv2d_6(x))
60 | x = torch.cat([x, mask_], 1)
61 | x = self.leaky_relu(self.conv2d_7(x))
62 | x = torch.cat([x, mask_], 1)
63 | x = self.leaky_relu(self.conv2d_8(x))
64 | skip3 = x
65 | mask3 = mask_
66 |
67 | x = self.average_pooling2d_2(x)
68 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
69 |
70 | x = torch.cat([x, mask_], 1)
71 | x = self.leaky_relu(self.mu(x))
72 | return x, skip1, skip2, skip3, mask1, mask2, mask3
73 |
74 |
75 | class Decoder(nn.Module):
76 | def db_to_natural(self, x):
77 | return 10 ** (x / 10)
78 |
79 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
80 | super(Decoder, self).__init__()
81 |
82 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
90 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
91 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
92 |
93 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
94 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
95 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
96 |
97 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
98 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
99 | self.log_10 = torch.log(torch.tensor([10])).to(device)
100 |
101 | self._init_weights()
102 |
103 | def _init_weights(self):
104 | for m in self.modules():
105 | if isinstance(m, nn.ConvTranspose2d):
106 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
107 |
108 | def forward(self, x, skip1, skip2, skip3, mask1, mask2, mask3):
109 | x = self.leaky_relu(self.conv2d_transpose(x))
110 | x = self.up_sampling2d(x)
111 | x = torch.cat((x, skip3, mask3), dim=1)
112 | x = self.leaky_relu(self.conv2d_transpose_1(x))
113 | x = torch.cat((x, mask3), dim=1)
114 | x = self.leaky_relu(self.conv2d_transpose_2(x))
115 | x = torch.cat((x, mask3), dim=1)
116 | x = self.leaky_relu(self.conv2d_transpose_3(x))
117 | x = self.up_sampling2d_1(x)
118 | x = torch.cat((x, skip2, mask2), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_4(x))
120 | x = torch.cat((x, mask2), dim=1)
121 | x = self.leaky_relu(self.conv2d_transpose_5(x))
122 | x = torch.cat((x, mask2), dim=1)
123 | x = self.leaky_relu(self.conv2d_transpose_6(x))
124 | x = self.up_sampling2d_2(x)
125 | x = torch.cat((x, skip1, mask1), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_7(x))
127 | x = torch.cat((x, mask1), dim=1)
128 | x = self.leaky_relu(self.conv2d_transpose_8(x))
129 | x = torch.cat((x, mask1), dim=1)
130 | x = self.leaky_relu(self.conv2d_transpose_9(x))
131 | x = torch.flatten(x, start_dim=1)
132 | return x
--------------------------------------------------------------------------------
/models/skip_input.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+2, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+2, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | input_ = x_
34 |
35 | x = self.leaky_relu(self.conv2d(x_))
36 | x = torch.cat([x, input_], 1)
37 | x = self.leaky_relu(self.conv2d_1(x))
38 | x = torch.cat([x, input_], 1)
39 | x = self.leaky_relu(self.conv2d_2(x))
40 | skip1 = x
41 | input1 = input_
42 |
43 | x = self.average_pooling2d(x)
44 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
45 |
46 | x = torch.cat([x, input_], 1)
47 | x = self.leaky_relu(self.conv2d_3(x))
48 | x = torch.cat([x, input_], 1)
49 | x = self.leaky_relu(self.conv2d_4(x))
50 | x = torch.cat([x, input_], 1)
51 | x = self.leaky_relu(self.conv2d_5(x))
52 | skip2 = x
53 | input2 = input_
54 |
55 | x = self.average_pooling2d_1(x)
56 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
57 |
58 | x = torch.cat([x, input_], 1)
59 | x = self.leaky_relu(self.conv2d_6(x))
60 | x = torch.cat([x, input_], 1)
61 | x = self.leaky_relu(self.conv2d_7(x))
62 | x = torch.cat([x, input_], 1)
63 | x = self.leaky_relu(self.conv2d_8(x))
64 | skip3 = x
65 | input3 = input_
66 |
67 | x = self.average_pooling2d_2(x)
68 | input_ = torch.nn.functional.interpolate(input_, scale_factor = (0.5, 0.5))
69 |
70 | x = torch.cat([x, input_], 1)
71 | x = self.leaky_relu(self.mu(x))
72 | return x, skip1, skip2, skip3, input1, input2, input3
73 |
74 |
75 | class Decoder(nn.Module):
76 | def db_to_natural(self, x):
77 | return 10 ** (x / 10)
78 |
79 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
80 | super(Decoder, self).__init__()
81 |
82 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim + 2, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim + 2, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim + 2, n_dim, kernel_size=(3,3), stride=1, padding=1)
90 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+2, n_dim, kernel_size=(3,3), stride=1, padding=1)
91 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+2, dec_out, kernel_size=(3,3), stride=1, padding=1)
92 |
93 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
94 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
95 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
96 |
97 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
98 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
99 | self.log_10 = torch.log(torch.tensor([10])).to(device)
100 |
101 | self._init_weights()
102 |
103 | def _init_weights(self):
104 | for m in self.modules():
105 | if isinstance(m, nn.ConvTranspose2d):
106 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
107 |
108 | def forward(self, x, skip1, skip2, skip3, input1, input2, input3):
109 | x = self.leaky_relu(self.conv2d_transpose(x))
110 | x = self.up_sampling2d(x)
111 | x = torch.cat((x, skip3, input3), dim=1)
112 | x = self.leaky_relu(self.conv2d_transpose_1(x))
113 | x = torch.cat((x, input3), dim=1)
114 | x = self.leaky_relu(self.conv2d_transpose_2(x))
115 | x = torch.cat((x, input3), dim=1)
116 | x = self.leaky_relu(self.conv2d_transpose_3(x))
117 | x = self.up_sampling2d_1(x)
118 | x = torch.cat((x, skip2, input2), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_4(x))
120 | x = torch.cat((x, input2), dim=1)
121 | x = self.leaky_relu(self.conv2d_transpose_5(x))
122 | x = torch.cat((x, input2), dim=1)
123 | x = self.leaky_relu(self.conv2d_transpose_6(x))
124 | x = self.up_sampling2d_2(x)
125 | x = torch.cat((x, skip1, input1), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_7(x))
127 | x = torch.cat((x, input1), dim=1)
128 | x = self.leaky_relu(self.conv2d_transpose_8(x))
129 | x = torch.cat((x, input1), dim=1)
130 | x = self.leaky_relu(self.conv2d_transpose_9(x))
131 | x = torch.flatten(x, start_dim=1)
132 | return x
--------------------------------------------------------------------------------
/models/dual_mask_map.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 | mask_ = x_[:,1].unsqueeze(1)
35 |
36 | x = self.leaky_relu(self.conv2d(x_))
37 | x = torch.cat([x, mask_], 1)
38 | x = self.leaky_relu(self.conv2d_1(x))
39 | x = torch.cat([x, mask_], 1)
40 | x = self.leaky_relu(self.conv2d_2(x))
41 | x = self.average_pooling2d(x)
42 | map1 = map_
43 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
44 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
45 |
46 | x = torch.cat([x, mask_], 1)
47 | x = self.leaky_relu(self.conv2d_3(x))
48 | x = torch.cat([x, mask_], 1)
49 | x = self.leaky_relu(self.conv2d_4(x))
50 | x = torch.cat([x, mask_], 1)
51 | x = self.leaky_relu(self.conv2d_5(x))
52 | x = self.average_pooling2d_1(x)
53 | map2 = map_
54 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
55 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
56 |
57 | x = torch.cat([x, mask_], 1)
58 | x = self.leaky_relu(self.conv2d_6(x))
59 | x = torch.cat([x, mask_], 1)
60 | x = self.leaky_relu(self.conv2d_7(x))
61 | x = torch.cat([x, mask_], 1)
62 | x = self.leaky_relu(self.conv2d_8(x))
63 | x = self.average_pooling2d_2(x)
64 | map3 = map_
65 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
66 |
67 | x = torch.cat([x, mask_], 1)
68 | x = self.leaky_relu(self.mu(x))
69 | return x, map1, map2, map3
70 |
71 |
72 | class Decoder(nn.Module):
73 | def db_to_natural(self, x):
74 | return 10 ** (x / 10)
75 |
76 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
77 | super(Decoder, self).__init__()
78 |
79 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
80 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
81 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
82 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
89 |
90 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
91 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
92 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
93 |
94 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
95 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
96 | self.log_10 = torch.log(torch.tensor([10])).to(device)
97 |
98 | self._init_weights()
99 |
100 | def _init_weights(self):
101 | for m in self.modules():
102 | if isinstance(m, nn.ConvTranspose2d):
103 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
104 |
105 | def forward(self, x, map1, map2, map3):
106 | x = self.leaky_relu(self.conv2d_transpose(x))
107 | x = self.up_sampling2d(x)
108 | x = torch.cat((x, map3), dim=1)
109 | x = self.leaky_relu(self.conv2d_transpose_1(x))
110 | x = torch.cat((x, map3), dim=1)
111 | x = self.leaky_relu(self.conv2d_transpose_2(x))
112 | x = torch.cat((x, map3), dim=1)
113 | x = self.leaky_relu(self.conv2d_transpose_3(x))
114 | x = self.up_sampling2d_1(x)
115 | x = torch.cat((x, map2), dim=1)
116 | x = self.leaky_relu(self.conv2d_transpose_4(x))
117 | x = torch.cat((x, map2), dim=1)
118 | x = self.leaky_relu(self.conv2d_transpose_5(x))
119 | x = torch.cat((x, map2), dim=1)
120 | x = self.leaky_relu(self.conv2d_transpose_6(x))
121 | x = self.up_sampling2d_2(x)
122 | x = torch.cat((x, map1), dim=1)
123 | x = self.leaky_relu(self.conv2d_transpose_7(x))
124 | x = torch.cat((x, map1), dim=1)
125 | x = self.leaky_relu(self.conv2d_transpose_8(x))
126 | x = torch.cat((x, map1), dim=1)
127 | x = self.leaky_relu(self.conv2d_transpose_9(x))
128 | x = torch.flatten(x, start_dim=1)
129 | return x
--------------------------------------------------------------------------------
/models/dual_map_mask.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 | mask_ = x_[:,1].unsqueeze(1)
35 |
36 | x = self.leaky_relu(self.conv2d(x_))
37 | x = torch.cat([x, map_], 1)
38 | x = self.leaky_relu(self.conv2d_1(x))
39 | x = torch.cat([x, map_], 1)
40 | x = self.leaky_relu(self.conv2d_2(x))
41 | x = self.average_pooling2d(x)
42 | mask1 = mask_
43 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
44 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
45 |
46 | x = torch.cat([x, map_], 1)
47 | x = self.leaky_relu(self.conv2d_3(x))
48 | x = torch.cat([x, map_], 1)
49 | x = self.leaky_relu(self.conv2d_4(x))
50 | x = torch.cat([x, map_], 1)
51 | x = self.leaky_relu(self.conv2d_5(x))
52 | x = self.average_pooling2d_1(x)
53 | mask2 = mask_
54 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
55 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
56 |
57 | x = torch.cat([x, map_], 1)
58 | x = self.leaky_relu(self.conv2d_6(x))
59 | x = torch.cat([x, map_], 1)
60 | x = self.leaky_relu(self.conv2d_7(x))
61 | x = torch.cat([x, map_], 1)
62 | x = self.leaky_relu(self.conv2d_8(x))
63 | x = self.average_pooling2d_2(x)
64 | mask3 = mask_
65 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
66 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
67 |
68 | x = torch.cat([x, map_], 1)
69 | x = self.leaky_relu(self.mu(x))
70 | return x, mask1, mask2, mask3
71 |
72 |
73 | class Decoder(nn.Module):
74 | def db_to_natural(self, x):
75 | return 10 ** (x / 10)
76 |
77 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
78 | super(Decoder, self).__init__()
79 |
80 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
81 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
82 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
83 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
84 | self.conv2d_transpose_4 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
85 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_7 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
90 |
91 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
92 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
93 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
94 |
95 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
96 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
97 | self.log_10 = torch.log(torch.tensor([10])).to(device)
98 |
99 | self._init_weights()
100 |
101 | def _init_weights(self):
102 | for m in self.modules():
103 | if isinstance(m, nn.ConvTranspose2d):
104 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
105 |
106 | def forward(self, x, mask1, mask2, mask3):
107 | x = self.leaky_relu(self.conv2d_transpose(x))
108 | x = self.up_sampling2d(x)
109 | x = torch.cat((x, mask3), dim=1)
110 | x = self.leaky_relu(self.conv2d_transpose_1(x))
111 | x = torch.cat((x, mask3), dim=1)
112 | x = self.leaky_relu(self.conv2d_transpose_2(x))
113 | x = torch.cat((x, mask3), dim=1)
114 | x = self.leaky_relu(self.conv2d_transpose_3(x))
115 | x = self.up_sampling2d_1(x)
116 | x = torch.cat((x, mask2), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_4(x))
118 | x = torch.cat((x, mask2), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_5(x))
120 | x = torch.cat((x, mask2), dim=1)
121 | x = self.leaky_relu(self.conv2d_transpose_6(x))
122 | x = self.up_sampling2d_2(x)
123 | x = torch.cat((x, mask1), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_7(x))
125 | x = torch.cat((x, mask1), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_8(x))
127 | x = torch.cat((x, mask1), dim=1)
128 | x = self.leaky_relu(self.conv2d_transpose_9(x))
129 | x = torch.flatten(x, start_dim=1)
130 | return x
--------------------------------------------------------------------------------
/models/skip_mask_map.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 | mask_ = x_[:,1].unsqueeze(1)
35 |
36 | x = self.leaky_relu(self.conv2d(x_))
37 | x = torch.cat([x, mask_], 1)
38 | x = self.leaky_relu(self.conv2d_1(x))
39 | x = torch.cat([x, mask_], 1)
40 | x = self.leaky_relu(self.conv2d_2(x))
41 | skip1 = x
42 | map1 = map_
43 |
44 | x = self.average_pooling2d(x)
45 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
46 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
47 |
48 | x = torch.cat([x, mask_], 1)
49 | x = self.leaky_relu(self.conv2d_3(x))
50 | x = torch.cat([x, mask_], 1)
51 | x = self.leaky_relu(self.conv2d_4(x))
52 | x = torch.cat([x, mask_], 1)
53 | x = self.leaky_relu(self.conv2d_5(x))
54 | skip2 = x
55 | map2 = map_
56 |
57 | x = self.average_pooling2d_1(x)
58 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
59 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
60 |
61 | x = torch.cat([x, mask_], 1)
62 | x = self.leaky_relu(self.conv2d_6(x))
63 | x = torch.cat([x, mask_], 1)
64 | x = self.leaky_relu(self.conv2d_7(x))
65 | x = torch.cat([x, mask_], 1)
66 | x = self.leaky_relu(self.conv2d_8(x))
67 | skip3 = x
68 | map3 = map_
69 |
70 | x = self.average_pooling2d_2(x)
71 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
72 |
73 | x = torch.cat([x, mask_], 1)
74 | x = self.leaky_relu(self.mu(x))
75 | return x, skip1, skip2, skip3, map1, map2, map3
76 |
77 |
78 | class Decoder(nn.Module):
79 | def db_to_natural(self, x):
80 | return 10 ** (x / 10)
81 |
82 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
83 | super(Decoder, self).__init__()
84 |
85 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
90 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
91 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
92 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
93 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
94 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
95 |
96 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
97 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
98 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
99 |
100 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
101 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
102 | self.log_10 = torch.log(torch.tensor([10])).to(device)
103 |
104 | self._init_weights()
105 |
106 | def _init_weights(self):
107 | for m in self.modules():
108 | if isinstance(m, nn.ConvTranspose2d):
109 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
110 |
111 | def forward(self, x, skip1, skip2, skip3, map1, map2, map3):
112 | x = self.leaky_relu(self.conv2d_transpose(x))
113 | x = self.up_sampling2d(x)
114 | x = torch.cat((x, skip3, map3), dim=1)
115 | x = self.leaky_relu(self.conv2d_transpose_1(x))
116 | x = torch.cat((x, map3), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_2(x))
118 | x = torch.cat((x, map3), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_3(x))
120 | x = self.up_sampling2d_1(x)
121 | x = torch.cat((x, skip2, map2), dim=1)
122 | x = self.leaky_relu(self.conv2d_transpose_4(x))
123 | x = torch.cat((x, map2), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_5(x))
125 | x = torch.cat((x, map2), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_6(x))
127 | x = self.up_sampling2d_2(x)
128 | x = torch.cat((x, skip1, map1), dim=1)
129 | x = self.leaky_relu(self.conv2d_transpose_7(x))
130 | x = torch.cat((x, map1), dim=1)
131 | x = self.leaky_relu(self.conv2d_transpose_8(x))
132 | x = torch.cat((x, map1), dim=1)
133 | x = self.leaky_relu(self.conv2d_transpose_9(x))
134 | x = torch.flatten(x, start_dim=1)
135 | return x
--------------------------------------------------------------------------------
/models/skip_map_mask.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
5 |
6 | class Encoder(nn.Module):
7 | def __init__(self, enc_in, enc_out, n_dim, leaky_relu_alpha=0.3):
8 | super(Encoder, self).__init__()
9 | self.conv2d = nn.Conv2d(enc_in, n_dim, kernel_size=(3, 3), padding='same')
10 | self.conv2d_1 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
11 | self.conv2d_2 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
12 | self.average_pooling2d = nn.AvgPool2d(kernel_size=(2, 2))
13 | self.conv2d_3 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
14 | self.conv2d_4 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
15 | self.conv2d_5 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
16 | self.average_pooling2d_1 = nn.AvgPool2d(kernel_size=(2, 2))
17 | self.conv2d_6 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
18 | self.conv2d_7 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
19 | self.conv2d_8 = nn.Conv2d(n_dim+1, n_dim, kernel_size=(3, 3), padding='same')
20 | self.average_pooling2d_2 = nn.AvgPool2d(kernel_size=(2, 2))
21 | self.mu = nn.Conv2d(n_dim+1, enc_out, kernel_size=(3, 3), padding='same')
22 |
23 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
24 |
25 | self._init_weights()
26 |
27 | def _init_weights(self):
28 | for m in self.modules():
29 | if isinstance(m, nn.Conv2d):
30 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
31 |
32 | def forward(self, x_):
33 | map_ = x_[:,0].unsqueeze(1)
34 | mask_ = x_[:,1].unsqueeze(1)
35 |
36 | x = self.leaky_relu(self.conv2d(x_))
37 | x = torch.cat([x, map_], 1)
38 | x = self.leaky_relu(self.conv2d_1(x))
39 | x = torch.cat([x, map_], 1)
40 | x = self.leaky_relu(self.conv2d_2(x))
41 | skip1 = x
42 | mask1 = mask_
43 |
44 | x = self.average_pooling2d(x)
45 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
46 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
47 |
48 | x = torch.cat([x, map_], 1)
49 | x = self.leaky_relu(self.conv2d_3(x))
50 | x = torch.cat([x, map_], 1)
51 | x = self.leaky_relu(self.conv2d_4(x))
52 | x = torch.cat([x, map_], 1)
53 | x = self.leaky_relu(self.conv2d_5(x))
54 | skip2 = x
55 | mask2 = mask_
56 |
57 | x = self.average_pooling2d_1(x)
58 | mask_ = torch.nn.functional.interpolate(mask_, scale_factor = (0.5, 0.5))
59 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
60 |
61 | x = torch.cat([x, map_], 1)
62 | x = self.leaky_relu(self.conv2d_6(x))
63 | x = torch.cat([x, map_], 1)
64 | x = self.leaky_relu(self.conv2d_7(x))
65 | x = torch.cat([x, map_], 1)
66 | x = self.leaky_relu(self.conv2d_8(x))
67 | skip3 = x
68 | mask3 = mask_
69 |
70 | x = self.average_pooling2d_2(x)
71 | map_ = torch.nn.functional.interpolate(map_, scale_factor = (0.5, 0.5))
72 |
73 | x = torch.cat([x, map_], 1)
74 | x = self.leaky_relu(self.mu(x))
75 | return x, skip1, skip2, skip3, mask1, mask2, mask3
76 |
77 |
78 | class Decoder(nn.Module):
79 | def db_to_natural(self, x):
80 | return 10 ** (x / 10)
81 |
82 | def __init__(self, dec_in, dec_out, n_dim, leaky_relu_alpha=0.3):
83 | super(Decoder, self).__init__()
84 |
85 | self.conv2d_transpose = nn.ConvTranspose2d(dec_in, dec_in, kernel_size=(3,3), stride=1, padding=1)
86 | self.conv2d_transpose_1 = nn.ConvTranspose2d(dec_in + n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
87 | self.conv2d_transpose_2 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
88 | self.conv2d_transpose_3 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
89 | self.conv2d_transpose_4 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
90 | self.conv2d_transpose_5 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
91 | self.conv2d_transpose_6 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
92 | self.conv2d_transpose_7 = nn.ConvTranspose2d(2 * n_dim + 1, n_dim, kernel_size=(3,3), stride=1, padding=1)
93 | self.conv2d_transpose_8 = nn.ConvTranspose2d(n_dim+1, n_dim, kernel_size=(3,3), stride=1, padding=1)
94 | self.conv2d_transpose_9 = nn.ConvTranspose2d(n_dim+1, dec_out, kernel_size=(3,3), stride=1, padding=1)
95 |
96 | self.up_sampling2d = nn.Upsample(scale_factor=2, mode='bilinear')
97 | self.up_sampling2d_1 = nn.Upsample(scale_factor=2, mode='bilinear')
98 | self.up_sampling2d_2 = nn.Upsample(scale_factor=2, mode='bilinear')
99 |
100 | self.leaky_relu = torch.nn.LeakyReLU(negative_slope=leaky_relu_alpha)
101 | self.bases = torch.tensor([[1]], dtype=torch.float32).to(device)
102 | self.log_10 = torch.log(torch.tensor([10])).to(device)
103 |
104 | self._init_weights()
105 |
106 | def _init_weights(self):
107 | for m in self.modules():
108 | if isinstance(m, nn.ConvTranspose2d):
109 | torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
110 |
111 | def forward(self, x, skip1, skip2, skip3, mask1, mask2, mask3):
112 | x = self.leaky_relu(self.conv2d_transpose(x))
113 | x = self.up_sampling2d(x)
114 | x = torch.cat((x, skip3, mask3), dim=1)
115 | x = self.leaky_relu(self.conv2d_transpose_1(x))
116 | x = torch.cat((x, mask3), dim=1)
117 | x = self.leaky_relu(self.conv2d_transpose_2(x))
118 | x = torch.cat((x, mask3), dim=1)
119 | x = self.leaky_relu(self.conv2d_transpose_3(x))
120 | x = self.up_sampling2d_1(x)
121 | x = torch.cat((x, skip2, mask2), dim=1)
122 | x = self.leaky_relu(self.conv2d_transpose_4(x))
123 | x = torch.cat((x, mask2), dim=1)
124 | x = self.leaky_relu(self.conv2d_transpose_5(x))
125 | x = torch.cat((x, mask2), dim=1)
126 | x = self.leaky_relu(self.conv2d_transpose_6(x))
127 | x = self.up_sampling2d_2(x)
128 | x = torch.cat((x, skip1, mask1), dim=1)
129 | x = self.leaky_relu(self.conv2d_transpose_7(x))
130 | x = torch.cat((x, mask1), dim=1)
131 | x = self.leaky_relu(self.conv2d_transpose_8(x))
132 | x = torch.cat((x, mask1), dim=1)
133 | x = self.leaky_relu(self.conv2d_transpose_9(x))
134 | x = torch.flatten(x, start_dim=1)
135 | return x
--------------------------------------------------------------------------------
/dataset/map_sampler.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Yves Teganya and Daniel Romero
2 | #
3 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
4 | # arXiv preprint arXiv:2005. 05964, 2020.
5 | #
6 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
7 |
8 | import numpy as np
9 |
10 |
11 | class MapSampler:
12 | """
13 |
14 | ARGUMENTS:
15 |
16 | `sampling_factor`: can be:
17 |
18 | - fraction between 0 and 1 determining on average the percentage of entries to select
19 | as samples, i.e sampling_factor= 0.3 will allow selection of 30 % of total entries of
20 | the map that are different from np.nan.
21 |
22 | - Interval (list or tuple of length 2). The aforementioned fraction is drawn uniformly at random
23 | within the interval [sampling_factor[0], sampling_factor[1] ] each time a map is sampled.
24 |
25 | `std_noise`: standard deviation of the Gaussian noise in the sampled map
26 | `sample_threshold`: set to -200 dBW ( this is the rx value returned by wireless Insite when receivers are out
27 | of the coverage are)
28 |
29 | `set_unobserved_val_to_minus1`: flag that is used to set the unsampled entries to -1
30 | """
31 |
32 | def __init__(self,
33 | v_sampling_factor=[],
34 | std_noise=0,
35 | ):
36 | self.v_sampling_factor = v_sampling_factor
37 | self.std_noise = std_noise
38 | self.set_unobserved_val_to_minus1 = False
39 |
40 | def sample_map(self, t_map_to_sample, m_meta_map):
41 |
42 | """
43 | Returns:
44 | `t_sampled map`: Nx x Ny x Nf tensor
45 | `m_mask`: Nx x Ny, each entry is 1 if that grid point is sampled; 0 otherwise, same mask is applied along Nf dimension.
46 | """
47 | if np.size(self.v_sampling_factor) == 1:
48 | sampling_factor = self.v_sampling_factor
49 | elif np.size(self.v_sampling_factor) == 2:
50 | sampling_factor = np.round((self.v_sampling_factor[1] - self.v_sampling_factor[0]) * np.random.rand() +
51 | self.v_sampling_factor[0], decimals=2)
52 | else:
53 | Exception("invalid value of v_sampling_factor")
54 | shape_in = t_map_to_sample.shape
55 | m_mask = np.ones((shape_in[0], shape_in[1]))
56 | if sampling_factor == 1:
57 | sampled_map = t_map_to_sample
58 | m_mask_ret = m_mask
59 | else:
60 | m_map_to_sample = np.reshape(t_map_to_sample, (shape_in[0] * shape_in[1], shape_in[2]),
61 | order='F')
62 | v_mask = m_mask.flatten('F')
63 | v_meta_data = m_meta_map.flatten('F')
64 | unrelevant_ind = np.where(v_meta_data == 1)[0]
65 | indices_to_sampled_from = np.where(v_meta_data == 0)[0]
66 | unobs_val_ind = np.random.choice(indices_to_sampled_from,
67 | size=int((1 - sampling_factor) * len(indices_to_sampled_from)),
68 | replace=False)
69 | all_unobs_ind_in = list(map(int, np.concatenate((unrelevant_ind, unobs_val_ind),
70 | axis=0)))
71 | if self.set_unobserved_val_to_minus1:
72 | m_map_to_sample[all_unobs_ind_in, :] = -1
73 | else:
74 | m_map_to_sample[all_unobs_ind_in, :] = 0
75 | v_mask[list(map(int, all_unobs_ind_in))] = 0
76 | m_mask_ret = np.reshape(v_mask, (shape_in[0], shape_in[1]), order='F')
77 | sampled_map = np.reshape(m_map_to_sample, t_map_to_sample.shape, order='F')
78 | t_sampled_map_ret = sampled_map + np.multiply(
79 | np.random.normal(loc=0, scale=self.std_noise, size=t_map_to_sample.shape),
80 | np.expand_dims(m_mask_ret, axis=2))
81 | return t_sampled_map_ret, m_mask_ret
82 |
83 | def resample_map(self, t_sampled_map, m_mask, v_split_frac):
84 | """
85 | Returns:
86 | `t_sampled map_in`: Nx x Ny x Nf tensor
87 | `m_mask_in`: Nx x Ny, indicates which entries in sampled map_in are sampled, each entry is 1 if that grid
88 | point is sampled; 0 otherwise, same mask is applied along Nf dimension.
89 | `t_sampled map_out`: Nx x Ny x Nf tensor
90 | `m_mask_out`: Nx x Ny, indicates which entries in sampled map_out are sampled, each entry is 1 if that
91 | grid point is sampled; 0 otherwise, same mask is applied along Nf dimension.
92 | """
93 | shape_in = t_sampled_map.shape
94 | v_mask_in = m_mask.flatten('F')
95 | v_mask_target = m_mask.flatten('F')
96 | m_map_to_resample_in = np.reshape(t_sampled_map, (shape_in[0] * shape_in[1], shape_in[2]),
97 | order='F')
98 | m_map_to_resample_out = np.reshape(t_sampled_map, (shape_in[0] * shape_in[1], shape_in[2]),
99 | order='F')
100 | indices_to_sampled_from = np.where(v_mask_in == 1)[0]
101 | unobs_val_ind_in = np.random.choice(indices_to_sampled_from,
102 | size=int((1 - v_split_frac[0]) * len(indices_to_sampled_from)),
103 | replace=False)
104 | unobs_val_ind_target = np.random.choice(indices_to_sampled_from,
105 | size=int((1 - v_split_frac[1]) * len(indices_to_sampled_from)),
106 | replace=False)
107 |
108 | if self.set_unobserved_val_to_minus1:
109 | m_map_to_resample_in[unobs_val_ind_in, :] = -1
110 | m_map_to_resample_out[unobs_val_ind_target, :] = -1
111 | else:
112 | m_map_to_resample_in[unobs_val_ind_in, :] = 0
113 | m_map_to_resample_out[unobs_val_ind_target, :] = 0
114 | v_mask_in[list(map(int, unobs_val_ind_in))] = 0
115 | v_mask_target[list(map(int, unobs_val_ind_target))] = 0
116 | t_resampled_map_in = np.reshape(m_map_to_resample_in, t_sampled_map.shape, order='F')
117 | t_resampled_map_out = np.reshape(m_map_to_resample_out, t_sampled_map.shape, order='F')
118 | m_mask_in_ret = np.reshape(v_mask_in, (shape_in[0], shape_in[1]), order='F')
119 | m_mask_target_ret = np.reshape(v_mask_target, (shape_in[0], shape_in[1]), order='F')
120 | t_resampled_map_in_ret = t_resampled_map_in + np.multiply(
121 | np.random.normal(loc=0, scale=self.std_noise, size=t_sampled_map.shape),
122 | np.expand_dims(m_mask_in_ret, axis=2))
123 | t_resampled_map_out_ret = t_resampled_map_out + np.multiply(
124 | np.random.normal(loc=0, scale=self.std_noise, size=t_sampled_map.shape),
125 | np.expand_dims(m_mask_target_ret, axis=2))
126 | return t_resampled_map_in_ret, m_mask_in_ret, t_resampled_map_out_ret, m_mask_target_ret
127 |
128 |
129 | def list_complement_elements(list_1, list_2):
130 | complement_list = []
131 | for num in list_1:
132 | if num not in list_2:
133 | complement_list.append(num)
134 | return np.array(complement_list)
135 |
--------------------------------------------------------------------------------
/dataset/insite_map_generator.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Yves Teganya and Daniel Romero
2 | #
3 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
4 | # arXiv preprint arXiv:2005. 05964, 2020.
5 | #
6 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
7 | #
8 | # Adapted by William Locke and Nikita Lokhmachev
9 | # Import paths and file paths were changed according to the current repository structure,
10 | # but all other code and comments are unchanged.
11 |
12 | from map_generator import MapGenerator
13 | from communications import dbm_to_natural, natural_to_dbm, dbm_to_db, db_to_natural
14 | import pandas as pd
15 | import numpy as np
16 | import cv2
17 | import os
18 |
19 | building_threshold = -200 # Threshold in dBm to determine building locations
20 |
21 |
22 | class InsiteMapGenerator(MapGenerator):
23 | def __init__(
24 | self,
25 | num_tx_per_channel=2,
26 | l_file_num=np.arange(1, 40),
27 | large_map_size=(244, 246),
28 | # The Wireless Insight software provides a map of size 244 x 246
29 | filter_map=True,
30 | filter_size=3,
31 | inter_grid_points_dist_factor=1, # set to an integer greater than 1
32 | input_dir='remcom_maps',
33 | *args,
34 | **kwargs):
35 |
36 | super(InsiteMapGenerator, self).__init__(*args, **kwargs)
37 | self.num_tx_per_channel = num_tx_per_channel
38 | self.l_file_num = l_file_num
39 | self.large_map_size = large_map_size
40 | self.filter_map = filter_map
41 | self.filter_size = filter_size
42 | self.inter_grid_points_dist_factor = inter_grid_points_dist_factor
43 | self.input_dir = input_dir
44 |
45 | def generate_power_map_per_freq(self, num_bases):
46 |
47 | l_maps = []
48 |
49 | if self.l_file_num[0] == 50 and self.l_file_num[-1] == 51 and self.num_tx_per_channel == 2:
50 | # reconstructing a Wireless Insite map taken with a higher resolution (used in the conference paper)
51 | rx_power_tx1 = np.array(
52 | pd.read_csv(os.path.join(self.input_dir, "power_tx50.txt"),
53 | delim_whitespace=True,
54 | skipinitialspace=True))
55 | rx_power_tx2 = np.array(
56 | pd.read_csv(os.path.join(self.input_dir, "power_tx51.txt"),
57 | delim_whitespace=True,
58 | skipinitialspace=True))
59 | rx_power_tx1_dBW = dbm_to_db(np.reshape(rx_power_tx1,
60 | (self.n_grid_points_x, self.n_grid_points_y), order='C'))
61 | rx_power_tx2_dBW = dbm_to_db(np.reshape(rx_power_tx2,
62 | (self.n_grid_points_x, self.n_grid_points_y), order='C'))
63 | rx_pow_tot_dBw = db_to_natural(rx_power_tx1_dBW) + db_to_natural(rx_power_tx2_dBW)
64 |
65 | l_maps.append(rx_pow_tot_dBw)
66 |
67 | else:
68 |
69 | # Generate coordinates of random patch
70 | patch_indices = np.random.choice(self.large_map_size[0] -
71 | self.n_grid_points_x * self.inter_grid_points_dist_factor,
72 | size=2)
73 |
74 | for basis_ind in range(num_bases):
75 | map_this_frequency = np.zeros(
76 | (self.n_grid_points_x, self.n_grid_points_y))
77 | assert len(self.l_file_num) >= self.num_tx_per_channel, 'The number of map extraction files should be ' \
78 | 'greater or equal to the number of transmitters per channel'
79 | files_ind = np.random.choice(self.l_file_num,
80 | size=self.num_tx_per_channel,
81 | replace=False)
82 | for ind_tx in range(self.num_tx_per_channel):
83 | # Choose a file and get the large map
84 | file_name = 'power_tx%s.p2m' % files_ind[ind_tx]
85 | large_map_tx = np.array(
86 | pd.read_csv(
87 | os.path.join(self.input_dir, file_name),
88 | delim_whitespace=True,
89 | skiprows=[0],
90 | usecols=['Power(dBm)']))
91 | large_map_tx_resh = dbm_to_natural(np.reshape(large_map_tx,
92 | newshape=self.large_map_size,
93 | order='C'))
94 | # Extract patch from the file
95 | maps_as_patch = self.get_patch(large_map_tx_resh,
96 | patch_indices)
97 |
98 | map_this_frequency += maps_as_patch
99 |
100 | # Filter the map
101 | if self.filter_map:
102 | filter_to_use = np.ones(
103 | (self.filter_size, self.filter_size),
104 | np.float32) / (self.filter_size * self.filter_size)
105 | map_this_frequency_filter = cv2.filter2D(map_this_frequency, -1,
106 | filter_to_use)
107 | else:
108 | map_this_frequency_filter = map_this_frequency
109 |
110 | l_maps.append(map_this_frequency_filter) # list of Nx x Ny matrices
111 |
112 | return l_maps, obtain_meta_map(l_maps[0])
113 |
114 | def get_patch(self, large_image, startRow_and_Col):
115 | if self.inter_grid_points_dist_factor > 1:
116 | v_patch_indices_y = np.array(range(startRow_and_Col[0], startRow_and_Col[0] +
117 | self.inter_grid_points_dist_factor * self.n_grid_points_y))
118 | v_patch_indices_x = np.array(range(startRow_and_Col[1], startRow_and_Col[1] +
119 | self.inter_grid_points_dist_factor * self.n_grid_points_x))
120 | v_coarse_patch_indices_y = v_patch_indices_y[0::self.inter_grid_points_dist_factor]
121 | v_coarse_patch_indices_x = v_patch_indices_x[0::self.inter_grid_points_dist_factor]
122 | return large_image[v_coarse_patch_indices_y.reshape(-1, 1), v_coarse_patch_indices_x.reshape(1, -1)]
123 | else:
124 | return large_image[startRow_and_Col[0]:startRow_and_Col[0] +
125 | self.n_grid_points_y,
126 | startRow_and_Col[1]:startRow_and_Col[1] +
127 | self.n_grid_points_x]
128 |
129 |
130 | def obtain_meta_map(m_map):
131 | """
132 | Returns:
133 | `m_meta_map_ret`: Nx x Ny matrix where each entry is 1 if that grid point is inside the building,
134 | 0 otherwise.
135 | """
136 | m_meta_map = np.zeros((m_map.shape[0], m_map.shape[1]))
137 | v_meta_map = m_meta_map.flatten('F')
138 | v_map = m_map.flatten('F')
139 | ind_pts_in_building = np.where(
140 | v_map < dbm_to_natural(building_threshold))[0]
141 | v_meta_map[list(map(int, ind_pts_in_building))] = 1
142 | m_meta_map_ret = np.reshape(v_meta_map, m_meta_map.shape, order='F')
143 | return m_meta_map_ret
144 |
--------------------------------------------------------------------------------
/models/autoencoders.py:
--------------------------------------------------------------------------------
1 | from .autoencoder import Autoencoder
2 | from .baseline import Encoder as BaselineEncoder, Decoder as BaselineDecoder
3 |
4 | from .skip import Encoder as SkipEncoder, Decoder as SkipDecoder
5 | from .skip_residual import Encoder as SkipResidualEncoder, Decoder as SkipResidualDecoder
6 | from .skip_mask import Encoder as SkipMaskEncoder, Decoder as SkipMaskDecoder
7 | from .skip_mask_map import Encoder as SkipMaskMapEncoder, Decoder as SkipMaskMapDecoder
8 | from .skip_map import Encoder as SkipMapEncoder, Decoder as SkipMapDecoder
9 | from .skip_map_mask import Encoder as SkipMapMaskEncoder, Decoder as SkipMapMaskDecoder
10 | from .skip_input import Encoder as SkipInputEncoder, Decoder as SkipInputDecoder
11 |
12 | from .dual_mask import Encoder as DualMaskEncoder, Decoder as DualMaskDecoder
13 | from .dual_mask_map import Encoder as DualMaskMapEncoder, Decoder as DualMaskMapDecoder
14 | from .dual_map import Encoder as DualMapEncoder, Decoder as DualMapDecoder
15 | from .dual_map_mask import Encoder as DualMapMaskEncoder, Decoder as DualMapMaskDecoder
16 | from .dual_input import Encoder as DualInputEncoder, Decoder as DualInputDecoder
17 |
18 | import torch
19 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
20 |
21 |
22 | # Baseline Autoencoder
23 |
24 | class BaselineAutoencoder(Autoencoder):
25 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
26 | super().__init__()
27 |
28 | self.encoder = BaselineEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
29 | self.decoder = BaselineDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
30 |
31 |
32 | # Skip Connection Autoencoders
33 |
34 | class SkipAutoencoder(Autoencoder):
35 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
36 | super().__init__()
37 |
38 | self.encoder = SkipEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
39 | self.decoder = SkipDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
40 |
41 | def forward(self, x):
42 | x, skip1, skip2, skip3 = self.encoder(x)
43 | x = self.decoder(x, skip1, skip2, skip3)
44 | return x
45 |
46 |
47 | class SkipResidualAutoencoder(SkipAutoencoder):
48 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
49 | super().__init__()
50 |
51 | self.encoder = SkipResidualEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
52 | self.decoder = SkipResidualDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
53 |
54 |
55 | class SkipMaskAutoencoder(SkipAutoencoder):
56 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
57 | super().__init__()
58 |
59 | self.encoder = SkipMaskEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
60 | self.decoder = SkipMaskDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
61 |
62 | def forward(self, x):
63 | x, skip1, skip2, skip3, mask1, mask2, mask3 = self.encoder(x)
64 | x = self.decoder(x, skip1, skip2, skip3, mask1, mask2, mask3)
65 | return x
66 |
67 |
68 | class SkipMaskMapAutoencoder(SkipAutoencoder):
69 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
70 | super().__init__()
71 |
72 | self.encoder = SkipMaskMapEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
73 | self.decoder = SkipMaskMapDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
74 |
75 | def forward(self, x):
76 | x, skip1, skip2, skip3, map1, map2, map3 = self.encoder(x)
77 | x = self.decoder(x, skip1, skip2, skip3, map1, map2, map3)
78 | return x
79 |
80 |
81 | class SkipMapAutoencoder(SkipAutoencoder):
82 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
83 | super().__init__()
84 |
85 | self.encoder = SkipMapEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
86 | self.decoder = SkipMapDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
87 |
88 | def forward(self, x):
89 | x, skip1, skip2, skip3, map1, map2, map3 = self.encoder(x)
90 | x = self.decoder(x, skip1, skip2, skip3, map1, map2, map3)
91 | return x
92 |
93 |
94 | class SkipMapMaskAutoencoder(SkipAutoencoder):
95 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
96 | super().__init__()
97 |
98 | self.encoder = SkipMapMaskEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
99 | self.decoder = SkipMapMaskDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
100 |
101 | def forward(self, x):
102 | x, skip1, skip2, skip3, mask1, mask2, mask3 = self.encoder(x)
103 | x = self.decoder(x, skip1, skip2, skip3, mask1, mask2, mask3)
104 | return x
105 |
106 |
107 | class SkipInputAutoencoder(SkipAutoencoder):
108 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
109 | super().__init__()
110 |
111 | self.encoder = SkipInputEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
112 | self.decoder = SkipInputDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
113 |
114 | def forward(self, x):
115 | x, skip1, skip2, skip3, input1, input2, input3 = self.encoder(x)
116 | x = self.decoder(x, skip1, skip2, skip3, input1, input2, input3)
117 | return x
118 |
119 |
120 | # Dual Path Autoencoders
121 |
122 | class DualMaskAutoencoder(SkipAutoencoder):
123 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
124 | super().__init__()
125 |
126 | self.encoder = DualMaskEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
127 | self.decoder = DualMaskDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
128 |
129 |
130 | class DualMaskMapAutoencoder(SkipAutoencoder):
131 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
132 | super().__init__()
133 |
134 | self.encoder = DualMaskMapEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
135 | self.decoder = DualMaskMapDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
136 |
137 |
138 | class DualMapAutoencoder(SkipAutoencoder):
139 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
140 | super().__init__()
141 |
142 | self.encoder = DualMapEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
143 | self.decoder = DualMapDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
144 |
145 |
146 | class DualMapMaskAutoencoder(SkipAutoencoder):
147 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
148 | super().__init__()
149 |
150 | self.encoder = DualMapMaskEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
151 | self.decoder = DualMapMaskDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
152 |
153 |
154 | class DualInputAutoencoder(SkipAutoencoder):
155 | def __init__(self, enc_in=2, enc_out=4, dec_out=1, n_dim=27, leaky_relu_alpha=0.3):
156 | super().__init__()
157 |
158 | self.encoder = DualInputEncoder(enc_in, enc_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
159 | self.decoder = DualInputDecoder(enc_out, dec_out, n_dim, leaky_relu_alpha=leaky_relu_alpha)
--------------------------------------------------------------------------------
/notebooks/Evaluate_Model.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Google Colab"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "You can use the button below to open this notebook in Google Colab. Note that changes made to the notebook in Colab will not be reflected in Github, nor can the notebook be saved on Colab without first making a copy. \n",
15 | "\n",
16 | "[](https://colab.research.google.com/github/nikitalokhmachev-ai/radio-map-estimation-public/blob/main/notebooks/Evaluate_Model.ipynb)"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "If opened in Colab, set `using_colab` to `True` in the code block below, then run the second and (optionally) third blocks. The second block will clone the github repository into Colab's local storage in order to load the models and other functions. The third block will connect to Google Drive (user login required), which allows the Colab notebook to read and write data to the drive (e.g. training data or evaluation results)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "using_colab = False"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "if using_colab:\n",
42 | " %cd /content/\n",
43 | " !rm -rf /content/radio-map-estimation-public\n",
44 | " !git clone https://github.com/nikitalokhmachev-ai/radio-map-estimation-public.git\n",
45 | " !pip install -q -r /content/radio-map-estimation-public/colab_requirements.txt"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "if using_colab:\n",
55 | " from google.colab import drive\n",
56 | " drive.mount('/content/drive')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "# Check GPU"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "It is recommended to run this notebook with GPU support. If you have an Nvidea graphics card and drivers installed, the following block of code should show the details of the installed GPU."
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "!nvidia-smi"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "# Untar Testing Data"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "metadata": {},
92 | "source": [
93 | "In the code block below, specify the path to the saved testing data in tar format. This will untar the data into a folder of the same name in the parent directory of this notebook."
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {
100 | "id": "GUF595UIxGlm"
101 | },
102 | "outputs": [],
103 | "source": [
104 | "!tar -xkf '/path/to/saved/tar/file' -C '/path/to/save/untarred/files'"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "metadata": {},
110 | "source": [
111 | "# Import Packages"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "metadata": {
118 | "id": "zWfrHtpz0pbf"
119 | },
120 | "outputs": [],
121 | "source": [
122 | "import torch\n",
123 | "import numpy as np\n",
124 | "\n",
125 | "import os\n",
126 | "import glob\n",
127 | "import random\n",
128 | "import pickle"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "# Import model architectures and data structures\n",
138 | "\n",
139 | "os.chdir('path/to/repository')\n",
140 | "from test_utils import get_model_error"
141 | ]
142 | },
143 | {
144 | "cell_type": "markdown",
145 | "metadata": {},
146 | "source": [
147 | "# Set Hyperparameters"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "# Set random seed, define device\n",
157 | "\n",
158 | "seed = 3\n",
159 | "torch.manual_seed(seed)\n",
160 | "torch.use_deterministic_algorithms(True)\n",
161 | "np.random.seed(seed)\n",
162 | "random.seed(seed)\n",
163 | "\n",
164 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "metadata": {},
171 | "outputs": [],
172 | "source": [
173 | "# Set batch size\n",
174 | "test_batch_size = 1024\n",
175 | "\n",
176 | "# Manually set values for buildings, unsampled locations, and sampled locations in the environment mask. \n",
177 | "# For the models in the PIMRC paper, these are set to \"None\", meaning they keep the default values of -1, 0, and 1 respectively.\n",
178 | "building_value = None\n",
179 | "unsampled_value = None\n",
180 | "sampled_value = None"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "Set the folder to load trained models from. All models in the selected model_folder will be tested. Additionally create and specify a folder to save the results to, and specify the paths to the saved data and data scaler (located within this repository)."
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "metadata": {
194 | "id": "w1ax78Ucwaa8"
195 | },
196 | "outputs": [],
197 | "source": [
198 | "# Specify folder containing trained models\n",
199 | "model_folder = '/folder/with/trained/models'\n",
200 | "\n",
201 | "# Specify path to untarred test data\n",
202 | "test_data_folder = '/path/to/untarred/testing/data'\n",
203 | "\n",
204 | "# Specify path to data scaler\n",
205 | "scaler_path = 'scalers/minmax_scaler_zero_min134.joblib'\n",
206 | "\n",
207 | "# Set folder to save current results\n",
208 | "results_folder = '/folder/to/save/results'\n",
209 | "if not os.path.exists(results_folder):\n",
210 | " os.makedirs(results_folder)"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": null,
216 | "metadata": {
217 | "id": "w21gqRPMvVkw"
218 | },
219 | "outputs": [],
220 | "source": [
221 | "model_names = glob.glob(os.path.join(model_folder, '*.pth'))\n",
222 | "for model_path in model_names:\n",
223 | " error = get_model_error(test_data_folder, test_batch_size, model_path, scaler_path, building_value=building_value, sampled_value=sampled_value)\n",
224 | " filename = os.path.basename(model_path).split('.')[0] + '.pickle'\n",
225 | " with open(os.path.join(results_folder, filename), 'wb') as f:\n",
226 | " pickle.dump(error, f)"
227 | ]
228 | }
229 | ],
230 | "metadata": {
231 | "accelerator": "GPU",
232 | "colab": {
233 | "gpuType": "T4",
234 | "machine_shape": "hm",
235 | "provenance": []
236 | },
237 | "kernelspec": {
238 | "display_name": "Python 3",
239 | "name": "python3"
240 | },
241 | "language_info": {
242 | "name": "python"
243 | }
244 | },
245 | "nbformat": 4,
246 | "nbformat_minor": 0
247 | }
248 |
--------------------------------------------------------------------------------
/dataset/map_generator.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Yves Teganya and Daniel Romero
2 | #
3 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
4 | # arXiv preprint arXiv:2005. 05964, 2020.
5 | #
6 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
7 | #
8 | # Adapted by William Locke and Nikita Lokhmachev
9 | # Import paths were changed according to the current repository structure,
10 | # but all other code and comments are unchanged.
11 |
12 | from abc import abstractmethod
13 | from communications import dbm_to_natural, natural_to_db, db_to_natural
14 | import numpy as np
15 | import matplotlib.pyplot as plt
16 |
17 |
18 | class MapGenerator:
19 | """ARGUMENTS:
20 |
21 | n_grid_points_x : number of grid points along x-axis
22 | n_grid_points_y : number of grid points along y-axis
23 |
24 | `
25 |
26 | """
27 |
28 | def __init__(self,
29 | x_length=100,
30 | y_length=100,
31 | n_grid_points_x=32,
32 | n_grid_points_y=32,
33 | m_basis_functions=np.array([[1]]),
34 | noise_power_interval=None
35 | ):
36 | self.x_length = x_length
37 | self.y_length = y_length
38 | self.n_grid_points_x = n_grid_points_x
39 | self.n_grid_points_y = n_grid_points_y
40 | self.m_basis_functions = m_basis_functions # num_bases x len(v_sampled_frequencies) matrix
41 | self.noise_power_interval = noise_power_interval
42 |
43 | def generate(self):
44 | """
45 | Returns:
46 | `map`: Nx x Ny x Nf tensor. map at each freq.
47 | `meta_map`: Nx x Ny, each entry is 1 if that grid point is inside a building; 0 otherwise.
48 | """
49 |
50 | num_freqs = self.m_basis_functions.shape[1]
51 | num_bases = self.m_basis_functions.shape[0]
52 |
53 | num_signal_bases = num_bases - 1 if self.noise_power_interval is not None else num_bases
54 |
55 | # Obtain one power map per basis function
56 | l_signal_maps, m_meta_map = self.generate_power_map_per_freq(num_signal_bases)
57 |
58 | # Obtain power at each sampled frequency
59 | t_freq_map = np.zeros(shape=(l_signal_maps[0].shape[0], l_signal_maps[0].shape[1], num_freqs))
60 | for ind_sampled_freq in range(num_freqs):
61 | t_freq_map_all_bs = np.zeros(shape=(l_signal_maps[0].shape[0], l_signal_maps[0].shape[1], num_signal_bases))
62 | for ind_central_freq in range(num_signal_bases):
63 | t_freq_map_all_bs[:, :, ind_central_freq] = l_signal_maps[ind_central_freq] * self.m_basis_functions[
64 | ind_central_freq,
65 | ind_sampled_freq]
66 | t_freq_map[:, :, ind_sampled_freq] = np.sum(t_freq_map_all_bs, axis=2)
67 |
68 | if self.noise_power_interval is not None:
69 | noise_power_interval_nat = dbm_to_natural(np.array(self.noise_power_interval))
70 |
71 | # add noise to the map
72 | noise_power = (noise_power_interval_nat[1] - noise_power_interval_nat[0]) * np.random.rand() + \
73 | noise_power_interval_nat[0]
74 | t_freq_map += noise_power
75 |
76 | # add noise map for coefficient visualization
77 | l_signal_maps.append(noise_power * np.ones((l_signal_maps[0].shape[0], l_signal_maps[0].shape[1])))
78 |
79 | # Output channel power maps
80 | t_channel_pow = natural_to_db(np.transpose(np.array(l_signal_maps), (1, 2, 0)))
81 |
82 | return natural_to_db(t_freq_map), m_meta_map, t_channel_pow
83 |
84 | # TO BE IMPLEMENTED BY ALL DESCENDANTS
85 | @abstractmethod
86 | def generate_power_map_per_freq(self, num_bases):
87 | """Returns:
88 |
89 | - a list of length num_bases, each one
90 | with the power map of the corresponding basis function.
91 |
92 | - a meta mask (explain)
93 |
94 | """
95 |
96 | pass
97 |
98 | @staticmethod
99 | def generate_bases(
100 | v_central_frequencies=np.array([0]),
101 | v_sampled_frequencies=np.array([0]),
102 | fun_base_function=lambda freq: 1,
103 | b_noise_function=True,
104 | plot_bases=False):
105 |
106 | """`fun_base_function` is a function of 1 frequency argument.
107 |
108 | If `b_noise_power==False`, then, this function retuns a
109 | len(v_central_frequencies) x len(v_sampled_frequencies) matrix
110 | whose (i,j)-th entry is `fun_base_function(
111 | v_sampled_frequencies[j] - v_central_freq[i] )`.
112 |
113 | Else, the returned matrix equals the aforementioned matrix
114 | with an extra row of all 1s.
115 |
116 | """
117 |
118 | num_bases = np.size(v_central_frequencies)
119 | num_freq = len(v_sampled_frequencies)
120 |
121 | # Generate the basis functions
122 | bases_vals = np.zeros((num_bases, num_freq))
123 | for ind_base in range(num_bases):
124 | for ind_sampled_freq in range(num_freq):
125 | bases_vals[ind_base, ind_sampled_freq] = fun_base_function(
126 | v_sampled_frequencies[ind_sampled_freq] - v_central_frequencies[ind_base])
127 |
128 | # Add the noise function
129 | if b_noise_function:
130 | bases_vals = np.vstack((bases_vals, np.ones((1, num_freq))))
131 |
132 | # Normalize bases
133 | for ind_base in range(bases_vals.shape[0]):
134 | bases_vals[ind_base, :] = bases_vals[ind_base, :] / sum(bases_vals[ind_base, :])
135 |
136 | # Visualize bases
137 | if plot_bases:
138 | num_plot_points = 400
139 | f_range = np.linspace(v_sampled_frequencies[0], v_sampled_frequencies[-1], num_plot_points)
140 |
141 | v_amplitudes = [0.2, 1] # np.zeros((num_bases + 1, 1)) # db_to_natural(np.array([-97, -90]))
142 |
143 | ammpl_noise_inter = [0.0, 0.02] # db_to_natural(np.array([-0, -0]))
144 | noise = 0.05 * np.ones((1, len(f_range))) # (ammpl_noise_inter[1] - ammpl_noise_inter[0]) * np.random.rand(len(f_range)) + \
145 | # ammpl_noise_inter[0]
146 |
147 | bases_vals_plot = np.zeros((num_bases, len(f_range)))
148 | for ind_base in range(num_bases):
149 | ampl_ind_base = (v_amplitudes[1] - v_amplitudes[
150 | 0]) * np.random.rand() + v_amplitudes[0]
151 | for ind_freq in range(len(f_range)):
152 | bases_vals_plot[ind_base, ind_freq] = ampl_ind_base * fun_base_function(
153 | f_range[ind_freq] - v_central_frequencies[ind_base])
154 | if b_noise_function:
155 | bases_vals_plot = np.vstack((bases_vals_plot, noise))
156 |
157 | # Normalize bases for plotting
158 | # for ind_base in range(bases_vals_plot.shape[0]):
159 | # bases_vals_plot[ind_base, :] = bases_vals_plot[ind_base, :] / sum(bases_vals_plot[ind_base, :])
160 |
161 | fig = plt.figure()
162 | n_curves = bases_vals.shape[0]
163 | for ind_curv in range(n_curves):
164 | bases_vals_ind_plot = bases_vals_plot[ind_curv, :]
165 | label = r'$ \pi_%d(\mathbf{x}) \beta_%d (f)$' % (ind_curv + 1, ind_curv + 1)
166 | # label = r'$ \pi_{s%d} \beta_%d (f)$' % (ind_curv + 1, ind_curv + 1)
167 | plt.plot(f_range / 1e6, bases_vals_ind_plot,
168 | label=label)
169 | sum_base = np.sum(bases_vals_plot, axis=0)
170 | plt.plot(f_range / 1e6, sum_base, linestyle='-', color='m',
171 | label=r'$\sum_b \pi_b(\mathbf{x}) \beta_b(f)$'
172 | # label = r'$\sum_b \pi_{sb} \beta_b(f)$'
173 | )
174 | plt.legend()
175 | plt.xlabel('f [MHz]')
176 | plt.ylabel(r'$ \Psi(\mathbf{x}, f)$')
177 | # plt.ylabel(r'$ \Upsilon_s(f)$')
178 | plt.grid()
179 | plt.show()
180 | quit()
181 | return bases_vals
182 |
183 | @staticmethod
184 | def gaussian_base(x, scale):
185 | return np.exp(-np.power(x, 2.) / (2 * np.power(scale, 2.)))
186 |
187 | @staticmethod
188 | def raised_cosine_base(freq, roll_off, bandwidth):
189 | gamma_l = (1 - roll_off) * bandwidth / 2
190 | gamma_u = (1 + roll_off) * bandwidth / 2
191 | if abs(freq) <= gamma_l:
192 | return 1
193 | elif gamma_l < abs(freq) <= gamma_u:
194 | return 1 / 2 * (1 + np.cos(np.pi / (bandwidth * roll_off) * (abs(freq) - gamma_l)))
195 | else:
196 | return 1e-13
197 |
198 | @staticmethod
199 | def ofdm_base(freq, num_carriers, bandwidth):
200 | pass
201 |
--------------------------------------------------------------------------------
/dataset/gudmundson_map_generator.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Yves Teganya and Daniel Romero
2 | #
3 | # Y. Teganya and D. Romero, ‘Deep Completion Autoencoders for Radio Map Estimation’,
4 | # arXiv preprint arXiv:2005. 05964, 2020.
5 | #
6 | # Source code: https://github.com/fachu000/deep-autoencoders-cartography
7 | #
8 | # Adapted by William Locke and Nikita Lokhmachev
9 | # Import paths were changed according to the current repository structure.
10 | # We also made changes at lines 77-78, 90, 95, and 175-177 to ensure the code ran without errors.
11 | # We have noted these changes in the comments immediately above those lines.
12 |
13 | import numpy as np
14 | from numpy import linalg as npla
15 | from scipy.spatial import distance
16 | from scipy.stats import multivariate_normal
17 | from map_generator import MapGenerator
18 | from communications import dbm_to_natural, db_to_natural, natural_to_db
19 |
20 |
21 | class GudmundsonMapGenerator(MapGenerator):
22 | """
23 | Arguments:
24 |
25 | Only one of the following can be set:
26 |
27 | # EDIT: changed to reflect order of array axes
28 | tx_power: num_bases x num_sources matrix with the transmitter power.
29 |
30 | tx_power_interval: length-2 vector. Tx. power of all sources at all bases
31 | is chosen uniformly at random in the interval
32 | [tx_power_interval[0], tx_power_interval[1]].
33 |
34 | path_loss_exp: path loss exponent of the propagation environment
35 | eps: small constant avoiding large values of power for small distances
36 | corr_shad_sigma2 : the variance of the shadowing in dB in correlated shadowing
37 | corr_base: correlation coefficient , see Gudmundson model.
38 |
39 | Output shape:
40 | 2D tensor with shape:
41 | (n_grid_points_x, n_grid_points_y)
42 |
43 | """
44 |
45 | def __init__(self,
46 | *args,
47 | v_central_frequencies=None,
48 | tx_power=None,
49 | n_tx=2,
50 | tx_power_interval=None,
51 | path_loss_exp=3,
52 | corr_shad_sigma2=10,
53 | corr_base=0.95,
54 | b_shadowing=False,
55 | num_precomputed_shadowing_mats=1,
56 | **kwargs
57 | ):
58 |
59 | super(GudmundsonMapGenerator, self).__init__(*args, **kwargs)
60 |
61 | assert not(tx_power is not None and tx_power_interval is not None), "tx_power and tx_power_interval cannot be simultaneously set."
62 | assert v_central_frequencies is not None, "Argument `v_central_frequencies` must be provided."
63 | self.v_central_frequencies = v_central_frequencies
64 | self.tx_power = tx_power
65 | self.n_tx = n_tx
66 | self.tx_power_interval = tx_power_interval
67 | self.path_loss_exp = path_loss_exp
68 | self.corr_shad_sigma2 = corr_shad_sigma2
69 | self.corr_base = corr_base
70 | self.b_shadowing = b_shadowing
71 | self.num_precomputed_shadowing_mats = num_precomputed_shadowing_mats
72 | if self.b_shadowing:
73 | self.shadowing_dB = self.generate_shadowing(size=(self.num_precomputed_shadowing_mats,
74 | self.n_tx)) # buffer to store precomputed maps
75 | self.ind_shadowing_mat = 0 # next returned map will correspond to self.shadowing_dB[self.ind_shadowing_mat,:,:]
76 | self.eps = min(self.x_length / self.n_grid_points_x, self.y_length / self.n_grid_points_y)
77 |
78 | # EDIT: Added if statement to assert that n_tx matches second axis (num_sources) in tx_power
79 | if self.tx_power is not None:
80 | assert self.n_tx == self.tx_power.shape[1]
81 |
82 |
83 | def generate_power_map_per_freq(self, num_bases):
84 |
85 | assert len(self.v_central_frequencies) == num_bases
86 |
87 | l_maps = [] # Nx x Ny x Nf tensor
88 |
89 | # Convert to natural units
90 |
91 | # EDIT: Original code, ``if self.tx_power_interval:``, raised errors
92 | if self.tx_power_interval is not None:
93 | tx_power_interval_nat = dbm_to_natural(np.array(self.tx_power_interval))
94 | n_sources = self.n_tx
95 |
96 | # EDIT: Original code, ``if self.tx_power.all():``, raised errors
97 | elif self.tx_power is not None:
98 | tx_power_nat = dbm_to_natural(self.tx_power)
99 | n_sources = self.tx_power.shape[1]
100 | else:
101 | tx_power_interval_nat = None
102 | tx_power_nat = None
103 | Exception("at least one of tx_power or tx_power_interval must be set.")
104 |
105 | n_bases = self.m_basis_functions.shape[0]
106 |
107 | x_grid, y_grid = self.generate_grid(self.x_length, self.y_length, self.n_grid_points_x,
108 | self.n_grid_points_y)
109 |
110 | source_pos_x = np.min(np.min(x_grid)) + (np.max(np.max(x_grid)) - np.min(np.min(x_grid))) * \
111 | np.random.rand(n_sources, 1)
112 | source_pos_y = np.min(np.min(y_grid)) + (np.max(np.max(y_grid)) - np.min(np.min(y_grid))) * \
113 | np.random.rand(n_sources, 1)
114 | source_pos = np.concatenate((source_pos_x, source_pos_y), axis=1)
115 | c_light = 3e8
116 |
117 | for freq in self.v_central_frequencies:
118 | k_val = (c_light / (4 * np.pi * freq)) ** 2
119 | # generate the pathloss componemt
120 | path_loss_comp = np.zeros((n_sources, self.n_grid_points_x, self.n_grid_points_y))
121 | for ind_source in range(n_sources):
122 | for ind_y in range(self.n_grid_points_y):
123 | one_y_rep = y_grid[ind_y]
124 | all_p_oney = np.array([x_grid[ind_y], one_y_rep])
125 | dist_all_p_oney = npla.norm(np.subtract(source_pos[ind_source, :].reshape(-1, 1), all_p_oney),
126 | axis=0)
127 | all_power_oney_all_bases = np.zeros((1, self.n_grid_points_x, n_bases))
128 | for ind_base in range(n_bases):
129 | if self.tx_power_interval:
130 | tx_power_to_use = (tx_power_interval_nat[1] - tx_power_interval_nat[
131 | 0]) * np.random.rand() + tx_power_interval_nat[0]
132 | else:
133 | tx_power_to_use = tx_power_nat[ind_base, ind_source]
134 | all_power_oney = tx_power_to_use * k_val / (
135 | (dist_all_p_oney + self.eps) ** self.path_loss_exp)
136 | all_power_oney_all_bases[:, :, ind_base] = all_power_oney
137 | path_loss_comp[ind_source, ind_y, :] = np.sum(all_power_oney_all_bases, axis=2)[0]
138 |
139 | # add the shadowing componemt
140 | if self.b_shadowing:
141 | # pathloss combined with shadowing
142 | shadow_map_ind = self.next_shadowing_dB()
143 | shadowing_reshaped = np.reshape(shadow_map_ind, (n_sources, self.n_grid_points_x, self.n_grid_points_y),
144 | order='F')
145 | # shadowing_reshaped_rep = np.repeat(shadowing_reshaped[:, :, :, np.newaxis], self.n_freqs, axis=3)
146 | map_with_shadowing = natural_to_db(path_loss_comp) + shadowing_reshaped
147 | generated_map = sum(db_to_natural(map_with_shadowing))
148 | else:
149 | generated_map = sum(path_loss_comp)
150 |
151 | l_maps.append(generated_map) # Nx x Ny matrices
152 |
153 | return l_maps, np.zeros((self.n_grid_points_x, self.n_grid_points_y))
154 |
155 |
156 | def next_shadowing_dB(self):
157 | if not self.ind_shadowing_mat < self.shadowing_dB.shape[0]:
158 | # No maps left
159 | self.shadowing_dB = self.generate_shadowing(size=(self.num_precomputed_shadowing_mats,
160 | self.n_tx))
161 | self.ind_shadowing_mat = 0
162 | # There are maps left
163 | shadowing_map = self.shadowing_dB[self.ind_shadowing_mat, :, :]
164 | self.ind_shadowing_mat += 1
165 | return shadowing_map
166 |
167 | def generate_shadowing(self, size=(1, 1)):
168 | x_grid, y_grid = self.generate_grid(self.x_length, self.y_length, self.n_grid_points_x, self.n_grid_points_y)
169 | vec_x_grid = x_grid.flatten('F')
170 | vec_y_grid = y_grid.flatten('F')
171 | all_points = list(zip(vec_x_grid, vec_y_grid))
172 | dist_pairs = distance.cdist(np.asarray(all_points), np.asarray(all_points), 'euclidean')
173 | cov_mat = self.corr_shad_sigma2 * self.corr_base ** dist_pairs
174 | shadowing_dB = multivariate_normal.rvs(mean=np.zeros((len(vec_x_grid))), cov=cov_mat, size=size)
175 |
176 | # EDIT: Added for loop expanding dimension of shadowing_dB for any axis with size of 1.
177 | for i in range(len(size)):
178 | if size[i] == 1:
179 | shadowing_dB = np.expand_dims(shadowing_dB, axis=i)
180 |
181 | return shadowing_dB
182 |
183 | def generate_grid(self, x_len, y_len, n_points_x, n_points_y):
184 | x_grid, y_grid = np.meshgrid(np.linspace(0, x_len, n_points_x),
185 | np.linspace(0, y_len, n_points_y))
186 | return x_grid, y_grid
187 |
--------------------------------------------------------------------------------
/test_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import torch
4 | import pickle
5 | import joblib
6 | import numpy as np
7 | import pandas as pd
8 | import plotly.express as px
9 | import plotly.graph_objects as go
10 |
11 | from data_utils import MapDataset
12 |
13 | def get_test_dl(test_data_folder, test_batch_size, scaler, building_value, unsampled_value, sampled_value, percentage=0.01):
14 | test_pickle_path = os.path.join(test_data_folder, f'test_{percentage:.2f}%_*.pickle')
15 | test_pickles = glob.glob(test_pickle_path)
16 | test_ds = MapDataset(test_pickles, scaler, building_value=building_value, unsampled_value=unsampled_value, sampled_value=sampled_value)
17 | test_dl = torch.utils.data.DataLoader(test_ds, batch_size=test_batch_size, shuffle=False, num_workers=1)
18 | return test_dl
19 |
20 | def get_model_error(test_data_folder, test_batch_size, model_path, scaler_path, building_value=None, unsampled_value=None, sampled_value=None):
21 | with open(scaler_path, 'rb') as f:
22 | scaler = joblib.load(f)
23 |
24 | test_dls = []
25 | percentages = np.arange(0.02, 0.42, 0.02)
26 | for percentage in percentages:
27 | dl = get_test_dl(
28 | test_data_folder, test_batch_size, scaler, building_value=building_value, unsampled_value=unsampled_value, sampled_value=sampled_value, percentage=percentage)
29 | test_dls.append(dl)
30 | model = torch.load(model_path, weights_only=False, map_location=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
31 | model.eval()
32 |
33 | error = [model.evaluate(test_dl, scaler) for test_dl in test_dls]
34 | return error
35 |
36 | def get_error_df(results_folder, filename, model_name = None):
37 | percentages = np.arange(0.02, 0.42, 0.02)
38 | with open(os.path.join(results_folder, filename), 'rb') as f:
39 | error = pickle.load(f)
40 |
41 | if model_name:
42 | df = pd.DataFrame.from_dict({'error':error, 'percentages':percentages, 'model': model_name})
43 | else:
44 | df = pd.DataFrame.from_dict({'error':error, 'percentages':percentages, 'model': os.path.basename(filename).split('.')[0]})
45 |
46 | return df
47 |
48 |
49 | def get_sample_error(results_folder, filename, model_name = None):
50 | percentages = np.arange(0.02, 0.42, 0.02)
51 | with open(os.path.join(results_folder, filename), 'rb') as f:
52 | error = pickle.load(f)
53 | if model_name:
54 | df = pd.DataFrame.from_dict({'Error':error, 'Percentages':percentages, 'Model': model_name})
55 | else:
56 | df = pd.DataFrame.from_dict({'Error':error, 'Percentages':percentages, 'Model': os.path.basename(filename).split('.')[0]})
57 | return df
58 |
59 |
60 | def visualize_sample_error(results_folder, width=800, height=700, text_size=18, display_names=None, line_styles=None, marker_size=10, consistent_colors=None, y_range=None, x_range=None):
61 | filenames = glob.glob(os.path.join(results_folder, '*.pickle'))
62 | dfs = [get_sample_error(results_folder, filename) for filename in filenames]
63 | dfs = pd.concat(dfs)
64 | dfs = dfs.drop_duplicates(['Model', 'Percentages'])
65 | if display_names:
66 | dfs = dfs.replace({'Model': display_names})
67 | if consistent_colors:
68 | dfs['Colors'] = dfs['Model'].copy()
69 | dfs.replace({'Colors': consistent_colors})
70 | fig = px.line(dfs, x="Percentages", y="Error", color="Model", line_group="Model", line_shape="spline", render_mode="svg", markers=True, width=width, height=height)
71 | else:
72 | fig = px.line(dfs, x="Percentages", y="Error", line_group="Model", line_shape="spline", render_mode="svg", markers=True, width=width, height=height)
73 | fig.update_layout(shapes=[go.layout.Shape(type='rect', xref='paper', yref='paper', x0=0, y0=0, x1=1, y1=1, line={'width': 1, 'color': 'black', 'dash':'solid'})])
74 | fig.update_xaxes(
75 | ticks="outside",
76 | tickson="labels")
77 | fig.update_yaxes(
78 | ticks="outside",
79 | tickson="labels")
80 | fig.update_layout(plot_bgcolor='rgba(0, 0, 0, 0)', yaxis_title='RMSE (dB)')
81 | fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
82 | fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
83 | fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="right", x=0.99))
84 | fig.update_layout(legend_font=dict(size=20))
85 | fig.update_layout(legend_title=None)
86 | fig.update_layout(legend_borderwidth=1)
87 | fig.update_layout(legend_bgcolor='hsla(1,1,1,0.5)')
88 | fig.update_layout(font=dict(size=text_size))
89 | fig.update_xaxes(title=dict(text='Sampling Rate'))
90 | fig.update_traces(mode='lines+markers', marker=dict(size=marker_size))
91 | if y_range:
92 | fig.update_yaxes(range=y_range)
93 | if x_range:
94 | fig.update_xaxes(range=x_range)
95 | if line_styles:
96 | for i, model in enumerate(fig.data):
97 | model.line['dash'] = line_styles[i][0]
98 | model.marker['symbol'] = line_styles[i][1]
99 | fig.show()
100 | return fig
101 |
102 |
103 | def get_average_error(results_folder):
104 | filenames = glob.glob(os.path.join(results_folder, '*.pickle'))
105 | dfs = [get_sample_error(results_folder, filename) for filename in filenames]
106 | avgs = [(np.sqrt(df['Error'].pow(2).mean()), df.loc[0,'Model']) for df in dfs]
107 | avg_dfs = pd.DataFrame(avgs, columns=['Avg Error', 'Model'])
108 | avg_dfs = avg_dfs.drop_duplicates()
109 | return avg_dfs
110 |
111 |
112 | def visualize_average_error(avg_dfs, display_names=None, baseline_name='Baseline', category_orders={},
113 | width=550, height=500, text_size=19, y_range=None):
114 | show_dfs = avg_dfs[avg_dfs['Model']!=baseline_name]
115 | if display_names:
116 | show_dfs = show_dfs.replace({'Model': display_names})
117 | baseline = avg_dfs.loc[avg_dfs['Model']==baseline_name, 'Avg Error'].item()
118 | fig = px.bar(show_dfs, x='Model', y='Avg Error', width=width, height=height, category_orders=category_orders)
119 | fig.update_layout(shapes=[go.layout.Shape(type='rect', xref='paper', yref='paper', x0=0, y0=0, x1=1, y1=1, line={'width': 1, 'color': 'black', 'dash':'solid'})])
120 | fig.update_xaxes(
121 | ticks="outside",
122 | tickson="labels")
123 | fig.update_yaxes(
124 | ticks="outside",
125 | tickson="labels")
126 | fig.add_hline(y=baseline, line_dash='dash', annotation_text='Baseline', annotation_position='bottom right')
127 | fig.update_layout(plot_bgcolor='rgba(0, 0, 0, 0)', yaxis_title='RMSE (dB)')
128 | fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
129 | fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
130 | fig.update_layout(font=dict(size=text_size))
131 | if y_range:
132 | fig.update_yaxes(range=y_range)
133 | fig.update_xaxes(title=None)
134 |
135 | fig.show()
136 | return fig
137 |
138 |
139 | def visualize_hist(results_folder, display_names=None, baseline_name='Baseline',
140 | text_size=19, width=800, height=700, y_range=None):
141 | filenames = glob.glob(os.path.join(results_folder, '*.pickle'))
142 | df_percentage = []
143 | for filename in filenames:
144 | df = get_sample_error(results_folder, filename, model_name = None)
145 | model_name = df['Model'].iloc[0]
146 | if model_name == baseline_name:
147 | continue
148 | bin_splits = np.arange(0.1, 0.5, 0.1)
149 | less_ten, ten_twenty = df.query('Percentages <= 0.1')['Error'].pow(2).mean()**0.5, df.query('0.1 < Percentages <= 0.2')['Error'].pow(2).mean()**0.5
150 | twenty_thirty, greater_thirty = df.query('0.2 < Percentages <= 0.3')['Error'].pow(2).mean()**0.5, df.query('0.3 < Percentages <= 0.4')['Error'].pow(2).mean()**0.5
151 | df_model_p = pd.DataFrame.from_dict({'Error':[less_ten, ten_twenty, twenty_thirty, greater_thirty],
152 | 'Category':['1%-10%', '11%-20%', '21%-30%', '31%-40%'],
153 | 'Model Name':[model_name, model_name, model_name, model_name]})
154 | df_percentage.append(df_model_p)
155 |
156 | df_percentage = pd.concat(df_percentage)
157 | df_percentage = df_percentage.drop_duplicates()
158 | if display_names:
159 | df_percentage = df_percentage.replace({'Model Name': display_names})
160 |
161 | order = {'Category':['1%-10%', '11%-20%', '21%-30%', '31%-40%']}
162 | df_top_percent = df_percentage[df_percentage['Category'] == '31%-40%']
163 | top_percent_order = df_top_percent.sort_values('Error', ascending=False)['Model Name'].tolist()
164 | order['Model Name'] = top_percent_order
165 |
166 | fig = px.bar(df_percentage, x="Category", y="Error", color="Model Name", category_orders=order, barmode="group")
167 | fig.update_layout(shapes=[go.layout.Shape(type='rect', xref='paper', yref='paper', x0=0, y0=0, x1=1, y1=1, line={'width': 1, 'color': 'black', 'dash':'solid'})])
168 | fig.update_xaxes(
169 | ticks="outside",
170 | tickson="labels")
171 | fig.update_yaxes(
172 | ticks="outside",
173 | tickson="labels")
174 | fig.update_layout(plot_bgcolor='rgba(0, 0, 0, 0)', yaxis_title='RMSE (dB)')
175 | fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
176 | fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')
177 | fig.update_layout(width=width, height=height)
178 | fig.update_layout(font=dict(size=text_size))
179 | fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="right", x=0.99))
180 | fig.update_layout(legend_font=dict(size=20))
181 | fig.update_layout(legend_title=None)
182 | fig.update_layout(legend_borderwidth=1)
183 | fig.update_layout(legend_bgcolor='hsla(1,1,1,0.5)')
184 | fig.update_xaxes(title=dict(text='Sampling Rate'))
185 | if y_range:
186 | fig.update_yaxes(range=y_range)
187 | fig.show()
188 | return fig
--------------------------------------------------------------------------------
/notebooks/Train_Model.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Google Colab"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "You can use the button below to open this notebook in Google Colab. Note that changes made to the notebook in Colab will not be reflected in Github, nor can the notebook be saved on Colab without first making a copy. \n",
15 | "\n",
16 | "[](https://colab.research.google.com/github/nikitalokhmachev-ai/radio-map-estimation-public/blob/main/notebooks/Train_Model.ipynb)"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "If opened in Colab, set `using_colab` to `True` in the code block below, then run the second and (optionally) third blocks. The second block will clone the github repository into Colab's local storage in order to load the models and other functions. The third block will connect to Google Drive (user login required), which allows the Colab notebook to read and write data to the drive (e.g. training data or evaluation results)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "using_colab = False"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "if using_colab:\n",
42 | " %cd /content/\n",
43 | " !rm -rf /content/radio-map-estimation-public\n",
44 | " !git clone https://github.com/nikitalokhmachev-ai/radio-map-estimation-public.git\n",
45 | " !pip install -q -r /content/radio-map-estimation-public/colab_requirements.txt"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "if using_colab:\n",
55 | " from google.colab import drive\n",
56 | " drive.mount('/content/drive')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {
62 | "id": "4pv6cjhpNqyz"
63 | },
64 | "source": [
65 | "# Check GPU"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "It is recommended to run this notebook with GPU support. If you have an Nvidea graphics card and drivers installed, the following block of code should show the details of the installed GPU."
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": null,
78 | "metadata": {
79 | "colab": {
80 | "base_uri": "https://localhost:8080/"
81 | },
82 | "id": "u_MZ4vNCW5DV",
83 | "outputId": "1c1eb835-7289-4cb1-f3e3-5ae03e7207d8"
84 | },
85 | "outputs": [],
86 | "source": [
87 | "!nvidia-smi"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "# Untar Training Data"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "metadata": {},
100 | "source": [
101 | "In the code block below, specify the path to the saved training data in tar format. This will untar the data into a folder of the same name in the parent directory of this notebook."
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": 2,
107 | "metadata": {
108 | "colab": {
109 | "base_uri": "https://localhost:8080/"
110 | },
111 | "id": "GUF595UIxGlm",
112 | "outputId": "855a7c95-f099-4d07-f389-351c0fa7858b"
113 | },
114 | "outputs": [],
115 | "source": [
116 | "# Train set\n",
117 | "!tar -xkf '/path/to/saved/tar/file' -C '/path/to/save/untarred/files'"
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "metadata": {
123 | "id": "80MkxCfJF9_T"
124 | },
125 | "source": [
126 | "# Import Packages"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": 3,
132 | "metadata": {
133 | "id": "zWfrHtpz0pbf"
134 | },
135 | "outputs": [],
136 | "source": [
137 | "# Import packages\n",
138 | "\n",
139 | "import torch\n",
140 | "import numpy as np\n",
141 | "\n",
142 | "import os\n",
143 | "import glob\n",
144 | "import joblib\n",
145 | "import random"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 4,
151 | "metadata": {
152 | "colab": {
153 | "base_uri": "https://localhost:8080/"
154 | },
155 | "id": "8xFgICgWbu8A",
156 | "outputId": "c53258f1-644b-4122-f207-47e6167ec636"
157 | },
158 | "outputs": [],
159 | "source": [
160 | "# Import model architectures and data structures\n",
161 | "\n",
162 | "os.chdir('path/to/repository')\n",
163 | "from data_utils import MapDataset\n",
164 | "\n",
165 | "from models.autoencoders import BaselineAutoencoder\n",
166 | "from models.autoencoders import SkipAutoencoder, SkipResidualAutoencoder, SkipMaskAutoencoder, SkipMaskMapAutoencoder\n",
167 | "from models.autoencoders import SkipMapAutoencoder, SkipMapMaskAutoencoder, SkipInputAutoencoder\n",
168 | "from models.autoencoders import DualMaskAutoencoder, DualMaskMapAutoencoder, DualMapAutoencoder, DualMaskMapAutoencoder, DualInputAutoencoder"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {
174 | "id": "I4MY7viXdRn9"
175 | },
176 | "source": [
177 | "# Set Hyperparameters"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": 5,
183 | "metadata": {
184 | "id": "wgsCKcuZdGdB"
185 | },
186 | "outputs": [],
187 | "source": [
188 | "# Set random seed, define device\n",
189 | "\n",
190 | "seed = 3\n",
191 | "torch.manual_seed(seed)\n",
192 | "torch.use_deterministic_algorithms(True)\n",
193 | "np.random.seed(seed)\n",
194 | "random.seed(seed)\n",
195 | "\n",
196 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 6,
202 | "metadata": {
203 | "id": "kQYvSUybdeAh"
204 | },
205 | "outputs": [],
206 | "source": [
207 | "# Set batch size, learning rate, and number of epochs\n",
208 | "train_batch_size = 256\n",
209 | "num_epochs = 1\n",
210 | "lr = 5e-4\n",
211 | "\n",
212 | "# Manually set values for buildings, unsampled locations, and sampled locations in the environment mask. \n",
213 | "# For the models in the PIMRC paper, these are set to \"None\", meaning they keep the default values of -1, 0, and 1 respectively.\n",
214 | "building_value = None\n",
215 | "unsampled_value = None\n",
216 | "sampled_value = None"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "metadata": {},
222 | "source": [
223 | "Specify the model architecture by selecting one of the classes imported above from models.autoencoders. Different hyperparameters can be set for each model, but the default values match the ones used in our experiments."
224 | ]
225 | },
226 | {
227 | "cell_type": "code",
228 | "execution_count": 7,
229 | "metadata": {},
230 | "outputs": [],
231 | "source": [
232 | "# Specify model type. Below we give an example for one of the models from the paper.\n",
233 | "model = SkipResidualAutoencoder().to(device)\n",
234 | "model_name = 'Skip_Residual.pth'"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "Before running the following code block, create a folder to save the trained models, then enter the path to that folder in the variable `model_folder`."
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": 8,
247 | "metadata": {},
248 | "outputs": [],
249 | "source": [
250 | "# Set where to save the trained model weights\n",
251 | "model_folder = 'path/to/save/trained/model'\n",
252 | "\n",
253 | "if not os.path.exists(model_folder):\n",
254 | " os.makedirs(model_folder)"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": 9,
260 | "metadata": {},
261 | "outputs": [],
262 | "source": [
263 | "# Identify paths to untarred training data and data scaler\n",
264 | "train_data_folder = 'path/to/untarred/training/data'\n",
265 | "scaler_path = 'scalers/minmax_scaler_zero_min134.joblib'\n",
266 | "\n",
267 | "assert os.path.isdir(train_data_folder)\n",
268 | "assert os.path.exists(scaler_path)"
269 | ]
270 | },
271 | {
272 | "cell_type": "markdown",
273 | "metadata": {},
274 | "source": [
275 | "# Load Training data into DataLoader"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 35,
281 | "metadata": {
282 | "colab": {
283 | "base_uri": "https://localhost:8080/"
284 | },
285 | "id": "QEpL79nFeQ-K",
286 | "outputId": "14aebd2b-921b-4b52-c4d7-1d3e84edc2ef"
287 | },
288 | "outputs": [
289 | {
290 | "name": "stderr",
291 | "output_type": "stream",
292 | "text": [
293 | "/Users/william/RadioMap/pimrc_test_env/lib/python3.9/site-packages/sklearn/base.py:318: UserWarning: Trying to unpickle estimator MinMaxScaler from version 1.0.2 when using version 1.2.2. This might lead to breaking code or invalid results. Use at your own risk. For more info please refer to:\n",
294 | "https://scikit-learn.org/stable/model_persistence.html#security-maintainability-limitations\n",
295 | " warnings.warn(\n"
296 | ]
297 | }
298 | ],
299 | "source": [
300 | "train_pickle_path = os.path.join(train_data_folder, '*.pickle')\n",
301 | "train_pickles = glob.glob(train_pickle_path)\n",
302 | "\n",
303 | "with open(scaler_path, 'rb') as f:\n",
304 | " scaler = joblib.load(f)\n",
305 | "\n",
306 | "train_ds = MapDataset(train_pickles, scaler=scaler, building_value=building_value, sampled_value=sampled_value)\n",
307 | "train_dl = torch.utils.data.DataLoader(train_ds, batch_size=train_batch_size, shuffle=False)\n",
308 | "\n",
309 | "optimizer = torch.optim.AdamW(model.parameters(), lr=lr)"
310 | ]
311 | },
312 | {
313 | "cell_type": "markdown",
314 | "metadata": {
315 | "id": "4uE-E_SKfDPn"
316 | },
317 | "source": [
318 | "# Train Model"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": null,
324 | "metadata": {
325 | "colab": {
326 | "base_uri": "https://localhost:8080/",
327 | "height": 1000
328 | },
329 | "id": "Y6vmIID1fEZZ",
330 | "outputId": "f20ef565-454c-4988-83a4-18fcbdb85d8a"
331 | },
332 | "outputs": [],
333 | "source": [
334 | "model.fit(train_dl, optimizer, epochs=num_epochs, loss='mse')"
335 | ]
336 | },
337 | {
338 | "cell_type": "markdown",
339 | "metadata": {
340 | "id": "ZKoQOYELfL04"
341 | },
342 | "source": [
343 | "# Save Model"
344 | ]
345 | },
346 | {
347 | "cell_type": "code",
348 | "execution_count": 39,
349 | "metadata": {
350 | "id": "ln6If5_PfNC4"
351 | },
352 | "outputs": [],
353 | "source": [
354 | "model.save_model(os.path.join(model_folder, model_name))"
355 | ]
356 | }
357 | ],
358 | "metadata": {
359 | "colab": {
360 | "gpuType": "T4",
361 | "provenance": []
362 | },
363 | "kernelspec": {
364 | "display_name": "Python 3",
365 | "name": "python3"
366 | },
367 | "language_info": {
368 | "codemirror_mode": {
369 | "name": "ipython",
370 | "version": 3
371 | },
372 | "file_extension": ".py",
373 | "mimetype": "text/x-python",
374 | "name": "python",
375 | "nbconvert_exporter": "python",
376 | "pygments_lexer": "ipython3",
377 | "version": "3.9.12"
378 | }
379 | },
380 | "nbformat": 4,
381 | "nbformat_minor": 0
382 | }
383 |
--------------------------------------------------------------------------------
/notebooks/Visualize_Inputs_Outputs.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Google Colab"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "You can use the button below to open this notebook in Google Colab. Note that changes made to the notebook in Colab will not be reflected in Github, nor can the notebook be saved on Colab without first making a copy. \n",
15 | "\n",
16 | "[](https://colab.research.google.com/github/nikitalokhmachev-ai/radio-map-estimation-public/blob/main/notebooks/Visualize_Inputs_Outputs.ipynb)"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "If opened in Colab, set `using_colab` to `True` in the code block below, then run the second and (optionally) third blocks. The second block will clone the github repository into Colab's local storage in order to load the models and other functions. The third block will connect to Google Drive (user login required), which allows the Colab notebook to read and write data to the drive (e.g. training data or evaluation results)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "using_colab = False"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "if using_colab:\n",
42 | " %cd /content/\n",
43 | " !rm -rf /content/radio-map-estimation-public\n",
44 | " !git clone https://github.com/nikitalokhmachev-ai/radio-map-estimation-public.git\n",
45 | " !pip install -q -r /content/radio-map-estimation-public/colab_requirements.txt"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "if using_colab:\n",
55 | " from google.colab import drive\n",
56 | " drive.mount('/content/drive')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "# Untar Validation Data"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "We visualize the inputs and outputs of the validation data, but you can use any data you choose."
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {
77 | "id": "5Jdtnv8-45Kd"
78 | },
79 | "outputs": [],
80 | "source": [
81 | "!tar -xkf '/Path/to/saved/tar/file' -C '/path/to/save/untarred/files'"
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "metadata": {},
87 | "source": [
88 | "# Import Packages"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {
95 | "id": "zWfrHtpz0pbf"
96 | },
97 | "outputs": [],
98 | "source": [
99 | "import torch\n",
100 | "import numpy as np\n",
101 | "import matplotlib.pyplot as plt\n",
102 | "\n",
103 | "import os\n",
104 | "import glob\n",
105 | "import joblib\n",
106 | "import random"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "# Import model architectures and data structures\n",
116 | "\n",
117 | "os.chdir('path/to/repository')\n",
118 | "from data_utils import MapDataset"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "# Set Hyperparameters"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "metadata": {},
132 | "outputs": [],
133 | "source": [
134 | "# Set random seed, define device\n",
135 | "\n",
136 | "seed = 3\n",
137 | "torch.manual_seed(seed)\n",
138 | "torch.use_deterministic_algorithms(True)\n",
139 | "np.random.seed(seed)\n",
140 | "random.seed(seed)\n",
141 | "\n",
142 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "metadata": {},
149 | "outputs": [],
150 | "source": [
151 | "# Specify folder containing trained models\n",
152 | "model_folder = '/folder/with/trained/models'\n",
153 | "\n",
154 | "# Specify path to untarred validation data\n",
155 | "val_data_folder = '/path/to/untarred/validation/data'\n",
156 | "\n",
157 | "# Specify path to data scaler and load\n",
158 | "scaler_path = 'scalers/minmax_scaler_zero_min134.joblib'\n",
159 | "with open(scaler_path, 'rb') as f:\n",
160 | " scaler = joblib.load(f)\n",
161 | "\n",
162 | "# Set folder to save visualizations\n",
163 | "viz_folder = '/Path/to/save/visualizations'\n",
164 | "if not os.path.exists(viz_folder):\n",
165 | " os.makedirs(viz_folder)"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "metadata": {},
171 | "source": [
172 | "# Visualize Input and Ground Truth"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "metadata": {},
178 | "source": [
179 | "The Sample Map and Environment Mask are used as inputs to the model. The complete Radio Map is the ground truth that the model seeks to recreate. We use the example map shown in the paper below, but this can be replaced with any image from the validation set or other dataset."
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {
186 | "id": "RAtgjS37EtE-"
187 | },
188 | "outputs": [],
189 | "source": [
190 | "# Example batch\n",
191 | "example_batch = os.path.join(val_data_folder, 'test_0.01%_batch_0.pickle')\n",
192 | "# Index of map within batch\n",
193 | "i=37\n",
194 | "\n",
195 | "# Load batch\n",
196 | "t_x_points, t_channel_pows, t_y_masks = np.load(example_batch, allow_pickle=True)\n",
197 | "# Select map within in batch\n",
198 | "t_x_points = t_x_points[i:i+1]\n",
199 | "t_channel_pows = t_channel_pows[i:i+1]\n",
200 | "t_y_masks = t_y_masks[i:i+1]\n",
201 | "\n",
202 | "# Manually preprocess map (this would normally be done by the MapDataset class)\n",
203 | "t_y_points = t_channel_pows * t_y_masks\n",
204 | "t_x_masks = t_x_points[:,1,:,:] == 1\n",
205 | "t_x_points[:,0,:,:] = scaler.transform(t_x_points[:,0,:,:]) * t_x_masks\n",
206 | "t_channel_pows = scaler.transform(t_channel_pows)\n",
207 | "t_y_points = scaler.transform(t_y_points)\n",
208 | "\n",
209 | "sample_map = t_x_points[0,0,:,:]\n",
210 | "env_mask = t_x_points[0,1,:,:]\n",
211 | "target = t_y_points[0,0,:,:]\n",
212 | "target[env_mask==-1] = 1\n",
213 | "\n",
214 | "# Visualize\n",
215 | "fig, axs = plt.subplots(1,3, figsize=(6,5))\n",
216 | "axs[0].imshow(sample_map, cmap='hot', vmin=0, vmax=1)\n",
217 | "axs[0].set_title('Sampled Map')\n",
218 | "axs[1].imshow(env_mask, cmap='binary')\n",
219 | "axs[1].set_title('Environment Mask')\n",
220 | "axs[2].imshow(target, cmap='hot', vmin=0, vmax=1)\n",
221 | "axs[2].set_title('Complete Radio Map')\n",
222 | "[ax.set_xticks([]) for ax in axs]\n",
223 | "[ax.set_yticks([]) for ax in axs]\n",
224 | "fig.tight_layout()\n",
225 | "fig.show()"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {},
231 | "source": [
232 | "# Visualize Output and Intermediate Layers"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "execution_count": null,
238 | "metadata": {},
239 | "outputs": [],
240 | "source": [
241 | "def get_model_output(x, channel_id, model, model_layer):\n",
242 | " #x: bs, c, h, w\n",
243 | " x = x.to(device)\n",
244 | " activation = {}\n",
245 | " def get_activation(name):\n",
246 | " def hook(model, input, output):\n",
247 | " activation[name] = output.detach()\n",
248 | " return hook\n",
249 | "\n",
250 | " model_layer.register_forward_hook(get_activation('out'))\n",
251 | " output = model(x)\n",
252 | "\n",
253 | " return activation['out'][0].permute(1,2,0).detach().cpu()[:,:,channel_id].unsqueeze(-1).numpy()\n",
254 | "\n",
255 | "def visualize_layer(x, model, model_layer, nrows, ncols, figsize=(15, 15), out_folder=None, filename=None):\n",
256 | " n_channels = model_layer.out_channels\n",
257 | " fig, axs = plt.subplots(nrows, ncols, figsize=figsize)\n",
258 | " for i in range(nrows):\n",
259 | " for j in range(ncols):\n",
260 | " channel_id = i * ncols + j\n",
261 | " if channel_id < n_channels:\n",
262 | " axs[i, j].imshow(get_model_output(x, channel_id, model, model_layer))\n",
263 | " axs[i, j].set_title(str(channel_id))\n",
264 | " axs[i, j].axis('off')\n",
265 | " else:\n",
266 | " axs[i, j].axis('off')\n",
267 | " plt.tight_layout()\n",
268 | " if out_folder and filename:\n",
269 | " plt.savefig(os.path.join(out_folder, filename))\n",
270 | " plt.show()\n",
271 | "\n",
272 | "def visualize_output(x, model, figsize=(5, 5), out_folder=None, filename=None):\n",
273 | " x_mask = x[:,1,:,:]\n",
274 | " plt.figure(figsize=figsize)\n",
275 | " plt.axis('off')\n",
276 | " plt.title('Model Output')\n",
277 | " prediction = model(x).reshape(1,32,32)\n",
278 | " prediction[x_mask==-1] = 1\n",
279 | " prediction = prediction.detach().cpu().numpy().transpose(1,2,0)\n",
280 | " plt.imshow(prediction, cmap='hot', vmin=0, vmax=1)\n",
281 | " plt.tight_layout()\n",
282 | " if out_folder and filename:\n",
283 | " plt.savefig(os.path.join(out_folder, filename))\n",
284 | " plt.show()"
285 | ]
286 | },
287 | {
288 | "cell_type": "markdown",
289 | "metadata": {},
290 | "source": [
291 | "The code below visualizes either the output of the model (i.e. the predicted map) or the model representation at an intermediate layer. The user first specifies the model. If visualizing an intermediate layer, the user also specifies the layer from either the encoder or decoder. Layer names and attributes are printed out in the list below. Note that only Conv2d or ConvTranspose2d layers can be visualized."
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {
298 | "id": "klmOrKSVwN58"
299 | },
300 | "outputs": [],
301 | "source": [
302 | "model_name = 'Baseline'\n",
303 | "model = torch.load(os.path.join(model_folder, f'{model_name}.pth'), weights_only=False, map_location=device)\n",
304 | "model.eval()\n",
305 | "\n",
306 | "encoder = model.encoder\n",
307 | "decoder = model.decoder\n",
308 | "print(encoder)\n",
309 | "print()\n",
310 | "print(decoder)"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "metadata": {},
317 | "outputs": [],
318 | "source": [
319 | "# Convert input numpy array to tensor\n",
320 | "x = torch.from_numpy(t_x_points).to(torch.float32).to(device)"
321 | ]
322 | },
323 | {
324 | "cell_type": "code",
325 | "execution_count": null,
326 | "metadata": {},
327 | "outputs": [],
328 | "source": [
329 | "# Select layer and visualize channel outputs\n",
330 | "model_layer = encoder.conv2d_1\n",
331 | "visualize_layer(x, model, model_layer, nrows=5, ncols=6)"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": null,
337 | "metadata": {},
338 | "outputs": [],
339 | "source": [
340 | "# Visualize model output\n",
341 | "visualize_output(x, model)"
342 | ]
343 | }
344 | ],
345 | "metadata": {
346 | "accelerator": "GPU",
347 | "colab": {
348 | "gpuType": "T4",
349 | "machine_shape": "hm",
350 | "provenance": []
351 | },
352 | "kernelspec": {
353 | "display_name": "Python 3",
354 | "name": "python3"
355 | },
356 | "language_info": {
357 | "name": "python"
358 | }
359 | },
360 | "nbformat": 4,
361 | "nbformat_minor": 0
362 | }
363 |
--------------------------------------------------------------------------------
/dataset/generate_dataset.py:
--------------------------------------------------------------------------------
1 | from insite_map_generator import InsiteMapGenerator
2 | from gudmundson_map_generator import GudmundsonMapGenerator
3 | from map_sampler import MapSampler
4 | import numpy as np
5 | from joblib import Parallel, delayed
6 | import time
7 | import os
8 | import sys
9 | import argparse
10 | import pickle
11 |
12 | def get_parser():
13 | parser = argparse.ArgumentParser(description='Generates train and test set maps. Saves maps as pickled list of three arrays: sampled map, target map, and target mask.')
14 | parser.add_argument('--num_maps', dest='num_maps', help='Total number of maps to generate.', type=int, default=250000)
15 | parser.add_argument('--buildings', dest='buildings', help='Include buildings in generated map (opposite command is "--no-buildings")', action='store_true')
16 | parser.add_argument('--no-buildings', dest='buildings', help='Do not include buildings in generated map (opposite command is "--buildings")', action='store_false')
17 | parser.add_argument('--batch_size', dest='batch_size', help='Number of maps to save together in a single file / array. \
18 | This is not necessarily the same as the train or test batch size, which can be set during the loading of the data.', type=int, default=512)
19 | parser.add_argument('--test_split', dest='test_split', help='Percent of maps to use for testing.', type=float, default=0.1)
20 | parser.add_argument('--num_cpus', dest='num_cpus', help='Number of CPUs to process maps with.', type=int, default=16)
21 | parser.add_argument('--input_dir', dest='input_dir', help='Directory where remcom_maps are saved if --building argument is used. If --no-building, this is ignored.', type=str, default='remcom_maps')
22 | parser.add_argument('--output_dir', dest='output_dir', help='Directory where generated maps will be saved.', type=str, default='generated_maps')
23 | return parser
24 |
25 | class DatasetGenerator():
26 |
27 | # Default values for all of these are stored in parser, but I repeat them here for clarity
28 | def __init__(self,
29 | num_maps=250000,
30 | buildings=True,
31 | batch_size=512,
32 | test_split=0.1,
33 | num_cpus=16,
34 | input_dir='remcom_maps',
35 | output_dir='generated_maps'):
36 | self.num_maps = num_maps
37 | self.buildings = buildings
38 | self.batch_size = batch_size
39 | self.test_split = test_split
40 | self.num_cpus = num_cpus
41 | self.input_dir = input_dir
42 | self.output_dir = output_dir
43 | self.time = time.strftime("%m_%d__%Hh_%Mm", time.gmtime())
44 | # generator and sampler are set in the "generate_train_maps" and "generate_test_maps" methods
45 | self.generator = None
46 | self.sampler = None
47 |
48 | def generate_one_map(self, ind_map):
49 | '''
50 | This method is called multiple times in parallel from "generate_n_maps".
51 |
52 | Parameters:
53 | ind_map (int): Dummy variable, used for Parallel processing
54 |
55 | Returns:
56 | data_point (dict):
57 | 'sampled_map' (ndarray): 2 x 32 x 32, first channel is sampled power,
58 | second channel is mask with -1 for buildings, 0 for unsampled locations, 1 for sampled locations
59 | 'target_map' (ndarray): 1 x 32 x 32, power at all locations on map
60 | 'target_mask' (ndarray): 1 x 32 x 32, mask with 0 for buildings, 1 elsewhere
61 | '''
62 | if self.generator is None or self.sampler is None:
63 | raise Exception('You must first call "self.generate_train_maps" or "self.generate_test_maps" to set up appropriate generator and sampler.')
64 |
65 | else:
66 | # generate full map, building map, and channel power (not used)
67 | target_map, building_map, channel_power = self.generator.generate()
68 | sampled_map, sample_mask = self.sampler.sample_map(target_map, building_map)
69 |
70 | # target mask is inverse of building map
71 | target_mask = 1 - building_map
72 |
73 | # reshaping and adding masks
74 | sampled_map, target_map, target_mask = self.format_preparation(sampled_map, building_map, sample_mask, target_map, target_mask)
75 |
76 | data_point = {"sampled_map": sampled_map, # Nf(+1) X Nx X Ny X
77 | "target_map": target_map, # Nf X Nx X Ny
78 | "target_mask": target_mask} # Nf X Nx X Ny
79 |
80 | return data_point
81 |
82 |
83 | def generate_n_maps(self, n_maps, output_dir, file_name):
84 | '''
85 | Generate n maps (sampled, target, and mask) and save them to the output directory.
86 |
87 | Parameters
88 | n_maps (int): Number of maps to generate in a given method call
89 | output_dir (str): Output directory to save maps
90 | file_name (str): File name format for maps
91 | '''
92 | remainder = n_maps
93 | batch = 0
94 | while remainder >= self.batch_size:
95 | start_time = time.time()
96 |
97 | data = Parallel(n_jobs=self.num_cpus, backend='threading')(delayed(self.generate_one_map)(ind_map)
98 | for ind_map in range(int(self.batch_size)))
99 |
100 | end_time = time.time()
101 | elapsed_time = end_time - start_time
102 | print(f'Elapsed time for {self.num_cpus} CPUs to generate and sample {self.batch_size} maps is',
103 | time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
104 |
105 | sampled_maps = np.array([data_point['sampled_map'] for data_point in data])
106 | target_maps = np.array([data_point['target_map'] for data_point in data])
107 | target_masks = np.array([data_point['target_mask'] for data_point in data])
108 |
109 | with open(os.path.join(output_dir, f'{file_name}{batch}.pickle'), 'wb') as f:
110 | pickle.dump([sampled_maps, target_maps, target_masks], f)
111 |
112 | remainder -= self.batch_size
113 | batch += 1
114 |
115 | if remainder > 0:
116 | start_time = time.time()
117 |
118 | data = Parallel(n_jobs=self.num_cpus, backend='threading')(delayed(self.generate_one_map)(ind_map)
119 | for ind_map in range(int(remainder)))
120 | end_time = time.time()
121 | elapsed_time = end_time - start_time
122 | print(f'Elapsed time for {self.num_cpus} CPUs to generate {remainder} maps is',
123 | time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
124 |
125 | sampled_maps = np.array([data_point['sampled_map'] for data_point in data])
126 | target_maps = np.array([data_point['target_map'] for data_point in data])
127 | target_masks = np.array([data_point['target_mask'] for data_point in data])
128 |
129 | with open(os.path.join(output_dir, f'{file_name}{batch}.pickle'), 'wb') as f:
130 | pickle.dump([sampled_maps, target_maps, target_masks], f)
131 |
132 |
133 | def generate_train_maps(self):
134 | '''
135 | This function sets self.generator and self.sampler to train modes, then calls generate_n_maps.
136 | If self.buildings=True, then self.generator is InsiteMapGenerator. If False, then self.generator is self.GudmundsonMapGenerator.
137 | '''
138 | # Create output directory with timestamp
139 | output_dir = os.path.join(self.output_dir, self.time, 'train')
140 | file_name = 'train_batch_'
141 | os.makedirs(output_dir)
142 |
143 | # Initialize Training Generator
144 | if self.buildings:
145 | self.generator = InsiteMapGenerator(
146 | # parameters for InsiteMapGenerator class
147 | num_tx_per_channel=2,
148 | l_file_num=np.arange(1,40),
149 | large_map_size=(244,246),
150 | filter_map=True,
151 | filter_size=3,
152 | inter_grid_points_dist_factor=1,
153 | input_dir=self.input_dir,
154 | # args and kwargs for MapGenerator class
155 | x_length=100,
156 | y_length=100,
157 | n_grid_points_x=32,
158 | n_grid_points_y=32,
159 | m_basis_functions=np.array([[1]]),
160 | noise_power_interval=None)
161 |
162 | else:
163 | self.generator = GudmundsonMapGenerator(
164 | # parameters for GudmundsonMapGenerator class
165 | v_central_frequencies=[1.4e9],
166 | tx_power=None,
167 | n_tx=2,
168 | tx_power_interval=[5, 11], #dBm
169 | path_loss_exp=3,
170 | corr_shad_sigma2=10,
171 | corr_base=0.95,
172 | b_shadowing=True,
173 | num_precomputed_shadowing_mats=500000,
174 | # args and kwargs for MapGenerator class
175 | x_length=100,
176 | y_length=100,
177 | n_grid_points_x=32,
178 | n_grid_points_y=32,
179 | m_basis_functions=np.array([[1]]),
180 | noise_power_interval=None)
181 |
182 | # Initialize Training Sampler
183 | self.sampler = MapSampler(v_sampling_factor=[0.01, 0.4])
184 |
185 | n_maps = self.num_maps - int(self.num_maps * self.test_split)
186 | self.generate_n_maps(n_maps, output_dir, file_name)
187 |
188 |
189 | def generate_test_maps(self):
190 | '''
191 | This function sets self.generator and self.sampler to test modes, then calls generate_n_maps.
192 | '''
193 | # Create output directory with timestamp
194 | output_dir = os.path.join(self.output_dir, self.time, 'test')
195 | print(output_dir)
196 | os.makedirs(output_dir)
197 |
198 | # Initialize Testing Generator
199 | if self.buildings:
200 | self.generator = InsiteMapGenerator(
201 | # parameters for InsiteMapGenerator class
202 | num_tx_per_channel=2,
203 | l_file_num=np.arange(41, 43),
204 | large_map_size=(244,246),
205 | filter_map=True,
206 | filter_size=3,
207 | inter_grid_points_dist_factor=1,
208 | input_dir=self.input_dir,
209 | # args and kwargs for MapGenerator class
210 | x_length=100,
211 | y_length=100,
212 | n_grid_points_x=32,
213 | n_grid_points_y=32,
214 | m_basis_functions=np.array([[1]]),
215 | noise_power_interval=None)
216 |
217 | else:
218 | self.generator = GudmundsonMapGenerator(
219 | # parameters for GudmundsonMapGenerator class
220 | v_central_frequencies=[1.4e9],
221 | tx_power=None,
222 | n_tx=2,
223 | tx_power_interval=[5, 11], #dBm
224 | path_loss_exp=3,
225 | corr_shad_sigma2=10,
226 | corr_base=0.95,
227 | b_shadowing=True,
228 | num_precomputed_shadowing_mats=500000,
229 | # args and kwargs for MapGenerator class
230 | x_length=100,
231 | y_length=100,
232 | n_grid_points_x=32,
233 | n_grid_points_y=32,
234 | m_basis_functions=np.array([[1]]),
235 | noise_power_interval=None)
236 |
237 | # Testing maps are sampled at set intervals, with an even number of maps per sampling rate
238 | #sampling_rate = np.concatenate((np.linspace(0.01, 0.2, 10, endpoint=False), np.linspace(0.2, 0.4, 7)), axis=0)
239 | sampling_rate = np.arange(0.02, 0.42, 0.02)
240 | n_maps = int(self.num_maps * self.test_split / len(sampling_rate))
241 | for rate in range(len(sampling_rate)):
242 | self.sampler = MapSampler(v_sampling_factor=sampling_rate[rate]) # set self.sampler at current sampling rate
243 | file_name = f'test_{sampling_rate[rate]*100:.0f}%_batch_'
244 | self.generate_n_maps(n_maps, output_dir, file_name)
245 |
246 |
247 | def format_preparation(self, sampled_map, building_map, sample_mask, target_map, target_mask,):
248 | """
249 | This method combines the building_map and sample_mask into a single mask that is concatenated onto sampled_map,
250 | then changes the dimensions of sampled_map, target_map, and target_mask so that the channels (Nf) are the first dimension.
251 |
252 | Returns:
253 | `sampled_map`: Nf(+1) x Nx x Ny array
254 | `target_map`: Nf x Nx x Ny array
255 | `target_mask`: Nf x Nx x Ny array
256 |
257 | """
258 | # put sampled_map and target_map into Nf x Nx X Ny order
259 | sampled_map = np.transpose(sampled_map, (2,0,1))
260 | target_map = np.transpose(target_map, (2,0,1))
261 |
262 | # target_mask is expanded to include Nf at the front, and repeated to have the same number of channels Nf as target_map
263 | target_mask = np.repeat(target_mask[np.newaxis, :, :], target_map.shape[0], axis=0)
264 |
265 | # expand sample_mask and building_map to 1 X Nx X Ny
266 | sample_mask = np.expand_dims(sample_mask, axis=0)
267 | building_map = np.expand_dims(building_map, axis=0)
268 |
269 | # combine sample_mask and building_map into a single mask (1 for samples, -1 for buildings, 0 for all else)
270 | sampled_map = np.concatenate((sampled_map, sample_mask - building_map), axis=0)
271 |
272 | return sampled_map, target_map, target_mask
273 |
274 |
275 | if __name__ == '__main__':
276 | parser = get_parser()
277 | try:
278 | args = parser.parse_args()
279 | except:
280 | parser.print_help()
281 | sys.exit(0)
282 |
283 | dataset_generator = DatasetGenerator(
284 | num_maps=args.num_maps,
285 | buildings=args.buildings,
286 | batch_size=args.batch_size,
287 | test_split=args.test_split,
288 | num_cpus=args.num_cpus,
289 | input_dir=args.input_dir,
290 | output_dir=args.output_dir)
291 |
292 | print(f'dataset_{dataset_generator.time}\n')
293 | print(args, '\n')
294 |
295 | print('Generating test set: \n')
296 | test_start = time.time()
297 | dataset_generator.generate_test_maps()
298 | test_end = time.time()
299 | duration = test_end - test_start
300 | print('\nTest set generated in', time.strftime("%H:%M:%S", time.gmtime(duration)), '\n')
301 |
302 | print('Generating train set: \n')
303 | train_start = time.time()
304 | dataset_generator.generate_train_maps()
305 | train_end = time.time()
306 | duration = train_end - train_start
307 | print('\nTrain set generated in', time.strftime("%H:%M:%S", time.gmtime(duration)))
308 |
--------------------------------------------------------------------------------
/notebooks/Visualize_Results.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Google Colab"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "You can use the button below to open this notebook in Google Colab. Note that changes made to the notebook in Colab will not be reflected in Github, nor can the notebook be saved on Colab without first making a copy. \n",
15 | "\n",
16 | "[](https://colab.research.google.com/github/nikitalokhmachev-ai/radio-map-estimation-public/blob/main/notebooks/Visualize_Results.ipynb)"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "If opened in Colab, set `using_colab` to `True` in the code block below, then run the second and (optionally) third blocks. The second block will install kaleido to visualize some of the results, then clone the github repository into Colab's local storage in order to load the models and other functions. The third block will connect to Google Drive (user login required), which allows the Colab notebook to read and write data to the drive (e.g. training data or evaluation results)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "using_colab = False"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "if using_colab:\n",
42 | " %cd /content/\n",
43 | " !rm -rf /content/radio-map-estimation-public\n",
44 | " !git clone https://github.com/nikitalokhmachev-ai/radio-map-estimation-public.git\n",
45 | " !pip install -q -r /content/radio-map-estimation-public/colab_requirements.txt"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "if using_colab:\n",
55 | " from google.colab import drive\n",
56 | " drive.mount('/content/drive')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {
62 | "id": "2yttrJ663TPK"
63 | },
64 | "source": [
65 | "# Import Packages"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {
72 | "id": "zWfrHtpz0pbf"
73 | },
74 | "outputs": [],
75 | "source": [
76 | "import torch\n",
77 | "import numpy as np\n",
78 | "import pandas as pd\n",
79 | "import plotly.express as px\n",
80 | "import plotly.graph_objects as go\n",
81 | "\n",
82 | "import os\n",
83 | "import glob\n",
84 | "import pickle"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "metadata": {},
91 | "outputs": [],
92 | "source": [
93 | "os.chdir('path/to/repository')\n",
94 | "from test_utils import get_sample_error, visualize_sample_error, get_average_error, visualize_average_error, visualize_hist"
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "metadata": {},
100 | "source": [
101 | "# Model and Result Paths"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "Specify paths to the folders where the trained models are saved, where the results are saved, and where the visualizations (graphs) should be saved. For the PIMRC paper, we included some graphs showing all models' performance, and some graphs showing just the performance of Dual Path or UNet models. Below, we specify a single folder for `all_results`, and then two folders for `dual_results` and `unet_results`. One way to accomplish this is to have the Dual Path and UNet result folders saved under an overarching folder for All results, then conclude the path to the All results folder with `\\**`, which indicates a recursive search within that folder when using the `glob` library."
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {
115 | "id": "BfeTolRbuONY"
116 | },
117 | "outputs": [],
118 | "source": [
119 | "# Specify folder containing trained models\n",
120 | "model_folder = '/Path/to/saved/models'\n",
121 | "\n",
122 | "# Specify folder containing all saved results\n",
123 | "all_results = '/Path/to/saved/results'\n",
124 | "\n",
125 | "# Specify folder containing Dual Path saved results\n",
126 | "dual_results = '/Path/to/dual_path/results'\n",
127 | "\n",
128 | "# Specify folder containing Skip Connection saved results\n",
129 | "unet_results = '/Path/to/UNet/results'\n",
130 | "\n",
131 | "# Set folder to save visualizations\n",
132 | "viz_folder = '/Path/to/save/visualizations'\n",
133 | "if not os.path.exists(viz_folder):\n",
134 | " os.makedirs(viz_folder)\n",
135 | "\n",
136 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "metadata": {},
142 | "source": [
143 | "# Display Names"
144 | ]
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "metadata": {},
149 | "source": [
150 | "This is optional code to specify how model names will appear in visualizations. \n",
151 | "\n",
152 | "`display_names` is a dictionary, the keys of which are the names that the individual model results are saved under (minus the \".pickle\" ending), and the values of which are how they will appear in visualizations. Below are the names of the models as they were saved and appear in the PIMRC paper. `display_names` is provided as an optional parameter to the `visualize_sample_error` function; if `None` (the default), the models will be named according to their filenames in the results folder.\n",
153 | "\n",
154 | "`consistent_colors` is a dictionary that attaches an index to each model name in order to ensure the same model is depicted using the same color in any line graphs. `consistent_colors` is provided as an optional parameter to the `visualize_sample_error` function; if `None` (the default), models will be assigned arbitrary colors that may vary between graphs."
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": null,
160 | "metadata": {
161 | "id": "eMTR-zzhtiF2"
162 | },
163 | "outputs": [],
164 | "source": [
165 | "display_names = {'Baseline':'Baseline', 'Dual Concat Mask Only':'Dualmask', 'Dual Concat Map Only':'Dualmap',\n",
166 | " 'Dual Concat Mask then Map':'Dualmask-map', 'Dual Concat Map then Mask':'Dualmap-mask',\n",
167 | " 'Dual Concat Input':'Dualinput', 'UNet Baseline':'Skip', 'UNet Concat Input':'Skipinput',\n",
168 | " 'UNet Concat Map Only':'Skipmap','UNet Concat Mask Only':'Skipmask',\n",
169 | " 'UNet Concat Map then Mask':'Skipmap-mask', 'UNet Concat Mask then Map':'Skipmask-map',\n",
170 | " 'UNet Concat Input': 'Skipinput', 'ResUNet':'Skipresidual'}\n",
171 | "consistent_colors = {v[1]:v[0] for v in enumerate(display_names.values())}"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {
177 | "id": "HTNtiFzt3cUT"
178 | },
179 | "source": [
180 | "# Results Analysis"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "## Average Error for All Models (Table)"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "metadata": {
194 | "id": "XH7fk2AWTilx"
195 | },
196 | "outputs": [],
197 | "source": [
198 | "avg_dfs = get_average_error(all_results)\n",
199 | "avg_dfs = avg_dfs.set_index('Model')\n",
200 | "avg_dfs = avg_dfs.sort_values(['Avg Error'])\n",
201 | "avg_dfs.index.name = None\n",
202 | "avg_dfs"
203 | ]
204 | },
205 | {
206 | "cell_type": "markdown",
207 | "metadata": {},
208 | "source": [
209 | "## Model Size Comparison (Table)"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": null,
215 | "metadata": {
216 | "id": "70tI0WIZNfNf"
217 | },
218 | "outputs": [],
219 | "source": [
220 | "mdl_names = avg_dfs.index\n",
221 | "\n",
222 | "def count_parameters(model):\n",
223 | " return sum(p.numel() for p in model.parameters())\n",
224 | "\n",
225 | "models = dict()\n",
226 | "for name in mdl_names:\n",
227 | " model = torch.load(os.path.join(model_folder, name + '.pth'), weights_only=False, map_location=device)\n",
228 | " models[name] = model\n",
229 | "\n",
230 | "params = {'Encoder':[], 'Decoder':[], 'Total':[]}\n",
231 | "for model in models.values():\n",
232 | " params['Encoder'].append(count_parameters(model.encoder))\n",
233 | " params['Decoder'].append(count_parameters(model.decoder))\n",
234 | " params['Total'].append(count_parameters(model))\n",
235 | " assert(params['Encoder'][-1] + params['Decoder'][-1] == params['Total'][-1])\n",
236 | "\n",
237 | "params_df = pd.DataFrame.from_dict(params, orient='columns')\n",
238 | "params_df.index = models.keys()\n",
239 | "params_df = params_df.sort_values(['Total'])\n",
240 | "params_df"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "## Model Size vs Performance"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "metadata": {
254 | "id": "dyQX6OkkylHG"
255 | },
256 | "outputs": [],
257 | "source": [
258 | "df_vis = params_df.join(avg_dfs)\n",
259 | "df_vis['TotalVis'] = df_vis['Total'] - df_vis['Total'].min() + 2000\n",
260 | "df_vis['Text'] = (df_vis['Total'] // 1000).astype(str) + 'K'\n",
261 | "df_vis['Model'] = df_vis.index\n",
262 | "df_vis['Neg Error'] = df_vis['Avg Error'] * -1\n",
263 | "df_vis=df_vis.sort_values(['Total', 'Neg Error'])\n",
264 | "df_vis.reindex([])\n",
265 | "df_vis = df_vis.replace({'Model': display_names})\n",
266 | "df_vis['Colors'] = df_vis['Model'].copy()\n",
267 | "df_vis.replace({'Colors': consistent_colors})\n",
268 | "\n",
269 | "fig = px.scatter(df_vis, x=\"Model\", y=\"Avg Error\", size=\"TotalVis\", color=\"Model\", size_max=90, text='Text', labels=display_names)\n",
270 | "fig.update_traces(textposition='top center')\n",
271 | "fig.update_layout(plot_bgcolor='rgba(0, 0, 0, 0)')\n",
272 | "fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')\n",
273 | "fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='LightGray')\n",
274 | "fig.update_yaxes(range=[1.64, 2.25])\n",
275 | "fig.update_layout(showlegend=False, yaxis_title='RMSE(dB)')\n",
276 | "fig.update_layout(width=1500, height=800)\n",
277 | "fig.update_layout(font=dict(size=34))\n",
278 | "\n",
279 | "fig.update_layout(shapes=[go.layout.Shape(type='rect', xref='paper', yref='paper', x0=0, y0=0, x1=1, y1=1, line={'width': 1, 'color': 'black', 'dash':'solid'})])\n",
280 | "fig.update_xaxes(\n",
281 | " ticks=\"outside\",\n",
282 | " tickson=\"labels\",\n",
283 | " ticklen=15,\n",
284 | " title=None)\n",
285 | "fig.update_yaxes(\n",
286 | " ticks=\"outside\",\n",
287 | " tickson=\"labels\",\n",
288 | " ticklen=15)\n",
289 | "\n",
290 | "fig.show()\n",
291 | "fig.write_image(os.path.join(viz_folder, 'All Models Size.pdf'))"
292 | ]
293 | },
294 | {
295 | "cell_type": "markdown",
296 | "metadata": {},
297 | "source": [
298 | "## Dual Path Models Average Error"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": null,
304 | "metadata": {
305 | "id": "m7oBnQEG-GR2"
306 | },
307 | "outputs": [],
308 | "source": [
309 | "dual_avg_df = get_average_error(dual_results)\n",
310 | "dual_avg_df = dual_avg_df.sort_values('Avg Error', ascending=False)\n",
311 | "\n",
312 | "# Full figure\n",
313 | "fig = visualize_average_error(dual_avg_df, display_names=display_names, baseline_name='Baseline', \n",
314 | " width=700, height=450, text_size=24)\n",
315 | "fig.write_image(os.path.join(viz_folder,'Dual Path Avg.pdf'))\n",
316 | "\n",
317 | "# Zoomed in figure (used in paper)\n",
318 | "fig = visualize_average_error(dual_avg_df, display_names=display_names, baseline_name='Baseline', \n",
319 | " width=700, height=450, text_size=24, y_range=[1.5, 2.4])\n",
320 | "fig.write_image(os.path.join(viz_folder,'Dual Path Avg Zoom.pdf'))"
321 | ]
322 | },
323 | {
324 | "cell_type": "markdown",
325 | "metadata": {},
326 | "source": [
327 | "## Dual Path Models per-Sampling Rate Error"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": null,
333 | "metadata": {
334 | "id": "xf7X6dEPGXK6"
335 | },
336 | "outputs": [],
337 | "source": [
338 | "dashes = ['dash','solid']\n",
339 | "markers = ['star', 'diamond', 'square']\n",
340 | "line_styles = [(d, m) for m in markers for d in dashes]\n",
341 | "\n",
342 | "# Full figure (used in paper)\n",
343 | "fig = visualize_sample_error(dual_results, display_names=display_names, consistent_colors=consistent_colors, line_styles=line_styles, width=700, height=450, text_size=23, marker_size=10)\n",
344 | "fig.write_image('Dual Path All.pdf')\n",
345 | "\n",
346 | "# Zoomed in figure\n",
347 | "fig = visualize_sample_error(dual_results, display_names=display_names, consistent_colors=consistent_colors, line_styles=line_styles, width=700, height=450, text_size=23, marker_size=10, y_range=[0.9, 3.3], x_range=[0,0.4])\n",
348 | "fig.write_image(os.path.join(viz_folder, 'Dual Path All Zoom.pdf'))"
349 | ]
350 | },
351 | {
352 | "cell_type": "markdown",
353 | "metadata": {},
354 | "source": [
355 | "## Dual Path Models Average Split by Sampling Rate"
356 | ]
357 | },
358 | {
359 | "cell_type": "code",
360 | "execution_count": null,
361 | "metadata": {},
362 | "outputs": [],
363 | "source": [
364 | "# Figure not used in paper\n",
365 | "fig = visualize_hist(dual_results, display_names=display_names, baseline_name='Baseline', \n",
366 | " text_size=23, width=700, height=450)\n",
367 | "fig.write_image(\"Dual Path Bins.pdf\")\n",
368 | "\n",
369 | "# Figure not used in paper\n",
370 | "fig = visualize_hist(dual_results, display_names=display_names, baseline_name='Baseline', \n",
371 | " text_size=23, width=700, height=450, y_range=[0.5, 3.2])\n",
372 | "fig.write_image(\"Dual Path Bins Zoom.pdf\")"
373 | ]
374 | },
375 | {
376 | "cell_type": "markdown",
377 | "metadata": {},
378 | "source": [
379 | "## UNet Models Average Error"
380 | ]
381 | },
382 | {
383 | "cell_type": "code",
384 | "execution_count": null,
385 | "metadata": {
386 | "id": "OaVoTPebHVA5"
387 | },
388 | "outputs": [],
389 | "source": [
390 | "unet_avg_df = get_average_error(unet_results)\n",
391 | "unet_avg_df = unet_avg_df.sort_values('Avg Error', ascending=False)\n",
392 | "\n",
393 | "# Figure not used in paper\n",
394 | "fig = visualize_average_error(unet_avg_df, display_names=display_names, baseline_name='Baseline', \n",
395 | " width=700, height=450, text_size=24)\n",
396 | "fig.write_image(os.path.join(viz_folder, 'UNet Avg.pdf'))\n",
397 | "\n",
398 | "# Figure not used in paper\n",
399 | "fig = visualize_average_error(unet_avg_df, display_names=display_names, baseline_name='Baseline', \n",
400 | " width=700, height=450, text_size=24, y_range=[1.5, 2.4])\n",
401 | "fig.write_image(os.path.join(viz_folder,'UNet Avg Zoom.pdf'))"
402 | ]
403 | },
404 | {
405 | "cell_type": "markdown",
406 | "metadata": {},
407 | "source": [
408 | "## UNet Models per-Sampling Rate Error"
409 | ]
410 | },
411 | {
412 | "cell_type": "code",
413 | "execution_count": null,
414 | "metadata": {
415 | "id": "M2oGLGYZE3mA"
416 | },
417 | "outputs": [],
418 | "source": [
419 | "dashes = ['solid', 'dash', 'dot']\n",
420 | "markers = ['circle', 'square', 'diamond']\n",
421 | "line_styles = [(d, m) for m in markers for d in dashes]\n",
422 | "\n",
423 | "# Full figure (used in paper)\n",
424 | "fig = visualize_sample_error(unet_results, display_names=display_names, consistent_colors=consistent_colors, line_styles=line_styles, text_size=24, width=700, height=450, marker_size=10)\n",
425 | "fig.write_image(os.path.join(viz_folder,\"Unet All.pdf\"))\n",
426 | "\n",
427 | "# Zoomed in figure\n",
428 | "fig = visualize_sample_error(unet_results, display_names=display_names, consistent_colors=consistent_colors, line_styles=line_styles, text_size=24, width=700, height=450, marker_size=10, y_range=[0.9, 3.3], x_range=[0,0.4])\n",
429 | "fig.write_image(os.path.join(viz_folder,\"Unet All Zoom.pdf\"))"
430 | ]
431 | },
432 | {
433 | "cell_type": "markdown",
434 | "metadata": {},
435 | "source": [
436 | "## UNet Models Average Error Split by Sampling Rate"
437 | ]
438 | },
439 | {
440 | "cell_type": "code",
441 | "execution_count": null,
442 | "metadata": {
443 | "id": "NH6sUy7p8Clv"
444 | },
445 | "outputs": [],
446 | "source": [
447 | "# Full figure\n",
448 | "fig = visualize_hist(unet_results, display_names=display_names, baseline_name='Baseline',\n",
449 | " text_size=23, width=700, height=450)\n",
450 | "fig.write_image(\"UNet Bins.pdf\")\n",
451 | "\n",
452 | "# Zoomed in figure (used in paper)\n",
453 | "fig = visualize_hist(unet_results, display_names=display_names, baseline_name='Baseline',\n",
454 | " text_size=23, width=700, height=450, y_range=[0.5, 3])\n",
455 | "fig.write_image(\"UNet Bins Zoom.pdf\")"
456 | ]
457 | },
458 | {
459 | "cell_type": "markdown",
460 | "metadata": {},
461 | "source": [
462 | "## All Models Average Error"
463 | ]
464 | },
465 | {
466 | "cell_type": "code",
467 | "execution_count": null,
468 | "metadata": {
469 | "id": "l-eqEUbArhd1"
470 | },
471 | "outputs": [],
472 | "source": [
473 | "vis_avg_df = avg_dfs.copy()\n",
474 | "vis_avg_df['Model'] = vis_avg_df.index\n",
475 | "vis_avg_df = vis_avg_df.sort_values('Avg Error', ascending=False)\n",
476 | "\n",
477 | "# Figure not used in paper\n",
478 | "fig = visualize_average_error(vis_avg_df, display_names=display_names, baseline_name='Baseline', \n",
479 | " width=1200, height=450, text_size=24)\n",
480 | "fig.write_image(os.path.join(viz_folder, \"All Models Average.pdf\"))"
481 | ]
482 | },
483 | {
484 | "cell_type": "markdown",
485 | "metadata": {},
486 | "source": [
487 | "## Dual Path vs UNet per Sampling Rate Error"
488 | ]
489 | },
490 | {
491 | "cell_type": "markdown",
492 | "metadata": {},
493 | "source": [
494 | "To compare Dual Path and UNet models without overly cluttering the graph, we visualize just the median performing models of each group and the Baseline. In fact, we include two medians for the Dual Path group, those that pass the sampled map to the Decoder (Top) and those that pass the environment mask (Bottom), since there is significant difference between these groups.\n",
495 | "\n",
496 | "To do this, we copy the median model performances to a new `group_folder` then pass this folder to the `visualize_sample_error` function along with a new `model_group_names` dictionary to rename each according to its group."
497 | ]
498 | },
499 | {
500 | "cell_type": "code",
501 | "execution_count": null,
502 | "metadata": {
503 | "id": "1LgsF1BUtJ16"
504 | },
505 | "outputs": [],
506 | "source": [
507 | "group_folder = '/Path/to/group/folder'\n",
508 | "model_group_names = {'Baseline':'Baseline', 'Dual Concat Mask Only':'Dual Path Models (Bottom)', \n",
509 | " 'Dual Concat Mask then Map':'Dual Path Models (Top)', 'UNet Concat Mask Only':'Skip Connection Models'}\n",
510 | "\n",
511 | "dashes = ['solid', 'dash', 'dot']\n",
512 | "markers = ['circle', 'square', 'diamond', 'star']\n",
513 | "line_styles = [(d, m) for d in dashes for m in markers]\n",
514 | "\n",
515 | "# Full figure (used in paper)\n",
516 | "fig = visualize_sample_error(group_folder, display_names=model_group_names, consistent_colors=consistent_colors, \n",
517 | " width=700, height=450, text_size=23, line_styles=line_styles, marker_size=10)\n",
518 | "fig.write_image(os.path.join(viz_folder, \"Model Groups All.pdf\"))\n",
519 | "\n",
520 | "# Zoomed in figure\n",
521 | "fig = visualize_sample_error(group_folder, display_names=model_group_names, consistent_colors=consistent_colors, \n",
522 | " width=700, height=450, text_size=23, line_styles=line_styles, marker_size=10, y_range=[0.9, 3.3], x_range=[0, 0.4])\n",
523 | "fig.write_image(os.path.join(viz_folder, \"Model Groups All Zoom.pdf\"))"
524 | ]
525 | }
526 | ],
527 | "metadata": {
528 | "colab": {
529 | "provenance": []
530 | },
531 | "gpuClass": "standard",
532 | "kernelspec": {
533 | "display_name": "Python 3",
534 | "name": "python3"
535 | },
536 | "language_info": {
537 | "name": "python"
538 | }
539 | },
540 | "nbformat": 4,
541 | "nbformat_minor": 0
542 | }
543 |
--------------------------------------------------------------------------------