├── .gitignore ├── Generators.py ├── LICENSE ├── README.md ├── data_loader.py ├── joint_model.py ├── modelADT.py ├── params.py ├── params_storage ├── README.md ├── params_fig10a.py ├── params_fig10b.py ├── params_fig11a.py ├── params_fig11b.py ├── params_fig13a_20MHz.py ├── params_fig13a_40MHz.py ├── params_fig13b.py ├── params_tab1_test2.py ├── params_tab1_test3.py └── params_tab1_test4.py ├── ref ├── atk_July22_1_ref.png ├── atk_July22_2_ref.png ├── atkinson.png ├── dloc_bib.md ├── jacobs.png ├── jacobs_aug16_1.png ├── jacobs_aug16_2.png ├── jacobs_aug16_3.png ├── jacobs_aug16_4_ref.png └── jacobs_default.png ├── requirements.txt ├── train_and_test.py ├── trainer.py ├── utils.py └── wild.md /.gitignore: -------------------------------------------------------------------------------- 1 | # custom gitignore 2 | *.DS_Store 3 | #Don't track content of these folders 4 | results/ 5 | logs/ 6 | runs/ 7 | matlab/test_* 8 | 9 | # Compiled source # 10 | ################### 11 | *.pt 12 | *.pth 13 | *.txt 14 | *.mat 15 | *.m~ 16 | *.DS_Store 17 | *.yaml 18 | 19 | # Byte-compiled / optimized / DLL files 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | 24 | # C extensions 25 | *.so 26 | 27 | # Distribution / packaging 28 | .Python 29 | build/ 30 | develop-eggs/ 31 | dist/ 32 | downloads/ 33 | eggs/ 34 | .eggs/ 35 | lib/ 36 | lib64/ 37 | parts/ 38 | sdist/ 39 | var/ 40 | wheels/ 41 | share/python-wheels/ 42 | *.egg-info/ 43 | .installed.cfg 44 | *.egg 45 | MANIFEST 46 | 47 | # PyInstaller 48 | # Usually these files are written by a python script from a template 49 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 50 | *.manifest 51 | *.spec 52 | 53 | # Installer logs 54 | pip-log.txt 55 | pip-delete-this-directory.txt 56 | 57 | # Unit test / coverage reports 58 | htmlcov/ 59 | .tox/ 60 | .nox/ 61 | .coverage 62 | .coverage.* 63 | .cache 64 | nosetests.xml 65 | coverage.xml 66 | *.cover 67 | *.py,cover 68 | .hypothesis/ 69 | .pytest_cache/ 70 | cover/ 71 | 72 | # Translations 73 | *.mo 74 | *.pot 75 | 76 | # Django stuff: 77 | *.log 78 | local_settings.py 79 | db.sqlite3 80 | db.sqlite3-journal 81 | 82 | # Flask stuff: 83 | instance/ 84 | .webassets-cache 85 | 86 | # Scrapy stuff: 87 | .scrapy 88 | 89 | # Sphinx documentation 90 | docs/_build/ 91 | 92 | # PyBuilder 93 | .pybuilder/ 94 | target/ 95 | 96 | # Jupyter Notebook 97 | .ipynb_checkpoints 98 | 99 | # IPython 100 | profile_default/ 101 | ipython_config.py 102 | 103 | # pyenv 104 | # For a library or package, you might want to ignore these files since the code is 105 | # intended to run in multiple environments; otherwise, check them in: 106 | # .python-version 107 | 108 | # pipenv 109 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 110 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 111 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 112 | # install all needed dependencies. 113 | #Pipfile.lock 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # matlab gitignore 159 | # Windows default autosave extension 160 | *.asv 161 | 162 | # OSX / *nix default autosave extension 163 | *.m~ 164 | 165 | # Compiled MEX binaries (all platforms) 166 | *.mex* 167 | 168 | # Packaged app and toolbox files 169 | *.mlappinstall 170 | *.mltbx 171 | 172 | # Generated helpsearch folders 173 | helpsearch*/ 174 | 175 | # Simulink code generation folders 176 | slprj/ 177 | sccprj/ 178 | 179 | # Matlab code generation folders 180 | codegen/ 181 | 182 | # Simulink autosave extension 183 | *.autosave 184 | 185 | # Simulink cache files 186 | *.slxc 187 | 188 | # Octave session info 189 | octave-workspace 190 | -------------------------------------------------------------------------------- /Generators.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Defines the Encoders and Decoder that make up the 4 | Enc+Dec/Enc+2Dec model defined in DLoc 5 | Idea and code originally from fstin Johnson's architecture. 6 | https://github.com/jcjohnson/fast-neural-style/ 7 | Code base expanded from pix2pix Phillip Isola's implementation 8 | https://github.com/phillipi/pix2pix 9 | 10 | You can add your costum network building blocks here to test various other architectures. 11 | ''' 12 | import torch 13 | import torch.nn as nn 14 | import functools 15 | 16 | 17 | # Tha base Encoder function defined for the DLoc's Encoder 18 | class ResnetEncoder(nn.Module): 19 | def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='zero'): 20 | assert(n_blocks >= 0) 21 | super(ResnetEncoder, self).__init__() 22 | self.input_nc = input_nc 23 | self.output_nc = output_nc 24 | self.ngf = ngf 25 | if type(norm_layer) == functools.partial: 26 | use_bias = norm_layer.func == nn.InstanceNorm2d 27 | else: 28 | use_bias = norm_layer == nn.InstanceNorm2d 29 | 30 | model = [nn.Conv2d(input_nc, input_nc, kernel_size=7, padding=3, 31 | bias=use_bias), 32 | norm_layer(input_nc), 33 | nn.Tanh()] 34 | 35 | model += [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3, 36 | bias=use_bias), 37 | norm_layer(ngf), 38 | nn.ReLU(True)] 39 | 40 | n_downsampling = 2 41 | for i in range(n_downsampling): 42 | mult = 2**i 43 | model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, 44 | stride=2, padding=1, bias=use_bias), 45 | norm_layer(ngf * mult * 2), 46 | nn.ReLU(True)] 47 | 48 | mult = 2**n_downsampling 49 | for i in range(n_blocks): 50 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] 51 | 52 | self.model = nn.Sequential(*model) 53 | 54 | def forward(self, input): 55 | return self.model(input) 56 | 57 | 58 | # Tha base Decoder function defined for the DLoc's Decoders. 59 | # Depending upon the ModelADT wraper around the decoder, 60 | # the decoder would either be a Location Decoder or a Consistency decoder. 61 | # For more details refer to ModelADT.py wrapper implementation and params.py 62 | class ResnetDecoder(nn.Module): 63 | def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=9, padding_type='zero', encoder_blocks=6): 64 | assert(n_blocks >= 0) 65 | super(ResnetDecoder, self).__init__() 66 | self.input_nc = input_nc 67 | self.output_nc = output_nc 68 | self.ngf = ngf 69 | if type(norm_layer) == functools.partial: 70 | use_bias = norm_layer.func == nn.InstanceNorm2d 71 | else: 72 | use_bias = norm_layer == nn.InstanceNorm2d 73 | 74 | model = [] 75 | n_downsampling = 2 76 | for i in range(n_downsampling): 77 | mult = 2**i 78 | 79 | mult = 2**n_downsampling 80 | for i in range(n_blocks): 81 | if i <= encoder_blocks: 82 | continue 83 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] 84 | 85 | for i in range(n_downsampling): 86 | mult = 2**(n_downsampling - i) 87 | model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), 88 | kernel_size=3, stride=2, 89 | padding=1, output_padding=0, 90 | bias=use_bias), 91 | norm_layer(int(ngf * mult / 2)), 92 | nn.ReLU(True)] 93 | # model += [nn.ReflectionPad2d(3)] 94 | model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=[3,3])] 95 | model += [nn.Sigmoid()] 96 | 97 | self.model = nn.Sequential(*model) 98 | 99 | def forward(self, input): 100 | #print("decoder.input = ", input.shape) 101 | return self.model(input) 102 | # In[3]: 103 | 104 | 105 | # Define a resnet block 106 | class ResnetBlock(nn.Module): 107 | def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): 108 | super(ResnetBlock, self).__init__() 109 | self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) 110 | 111 | def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): 112 | conv_block = [] 113 | p = 0 114 | if padding_type == 'reflect': 115 | conv_block += [nn.ReflectionPad2d(1)] 116 | elif padding_type == 'replicate': 117 | conv_block += [nn.ReplicationPad2d(1)] 118 | elif padding_type == 'zero': 119 | p = 1 120 | else: 121 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 122 | 123 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), 124 | norm_layer(dim), 125 | nn.ReLU(True)] 126 | if use_dropout: 127 | conv_block += [nn.Dropout(0.25)] 128 | 129 | p = 0 130 | if padding_type == 'reflect': 131 | conv_block += [nn.ReflectionPad2d(1)] 132 | elif padding_type == 'replicate': 133 | conv_block += [nn.ReplicationPad2d(1)] 134 | elif padding_type == 'zero': 135 | p = 1 136 | else: 137 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 138 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), 139 | norm_layer(dim)] 140 | 141 | return nn.Sequential(*conv_block) 142 | 143 | def forward(self, x): 144 | out = x + self.conv_block(x) 145 | return out -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 WCSNG @ UC San Diego 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DLoc Network Architecture Codes 2 | 3 | This repository contains the PyTorch implementation of DLoc from [Deep Learning based Wireless Localization for Indoor Navigation](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894). 4 | 5 | The datasets (features) required to run these codes can be downloaded from the [WILD](https://wcsng.ucsd.edu/wild/) website. You can also download the raw channels from the [WILD](https://wcsng.ucsd.edu/wild/) webpage to run your own algorithms on them. 6 | 7 | ## Requirements 8 | 9 | To install requirements: 10 | 11 | ```setup 12 | pip install -r requirements.txt 13 | ``` 14 | #### Note: 15 | The requirements have been tested on Ubuntu 18.04 Docker Image with PyTorch version 1.4.0 16 | 17 | ## Training and Evlautaion 18 | 19 | To train the model(s) in the paper and evaluate them, run this command: 20 | 21 | ```train_test 22 | python train_and_test.py 23 | ``` 24 | 25 | The file automatically imports the parameters from [params.py](params.py). 26 | 27 | The parameters and their descriptions can be found in the comments of the example implementation of the [params.py](params.py) file. 28 | 29 | To recreate the results from the [paper](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894) refer to the [README](./params_storage/README.md) of the **params_storage** folder. 30 | 31 | ##### MATLAB codes to transform the raw CSI channels opensource at [WILD](https://github.com/ucsdwcsng/DLoc_pt_code/blob/main/wild.md) can be accessed at [CSI-to-Features](https://github.com/ucsdwcsng/CSI_to_DLocFeatures) 32 | -------------------------------------------------------------------------------- /data_loader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # A simple data loader that imports the train and test mat files 3 | # from the `filename` and converts them to torch.tesnors() 4 | # to be loaded for training and testing DLoc network 5 | # `features_wo_offset`: targets for the consistency decoder 6 | # `features_w_offset` : inputs for the network/encoder 7 | # `labels_gaussian_2d`: targets for the location decoder 8 | import torch 9 | import h5py 10 | import scipy.io 11 | import numpy as np 12 | 13 | def load_data(filename): 14 | print('Loading '+filename) 15 | arrays = {} 16 | f = h5py.File(filename,'r') 17 | features_wo_offset = torch.tensor(np.transpose(np.array(f.get('features_wo_offset'), dtype=np.float32)), dtype=torch.float32) 18 | features_w_offset = torch.tensor(np.transpose(np.array(f.get('features_w_offset'), dtype=np.float32)), dtype=torch.float32) 19 | labels_gaussian_2d = torch.tensor(np.transpose(np.array(f.get('labels_gaussian_2d'), dtype=np.float32)), dtype=torch.float32) 20 | 21 | return features_wo_offset,features_w_offset, labels_gaussian_2d -------------------------------------------------------------------------------- /joint_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Defines a PyTorch graph for forward and backward propogation 4 | within the network encoders and decoders. 5 | ''' 6 | import torch 7 | from utils import * 8 | from Generators import * 9 | from data_loader import * 10 | from params import * 11 | 12 | 13 | ''' 14 | Class Definition for 1 Encoder and 1 Decoder joint model 15 | ''' 16 | class Enc_Dec_Network(): 17 | 18 | def initialize(self, opt, encoder, decoder, frozen_dec=False, frozen_enc=False, gpu_ids='1'): 19 | self.opt = opt 20 | self.isTrain = opt.isTrain 21 | self.encoder = encoder 22 | self.decoder = decoder 23 | self.frozen_dec = frozen_dec 24 | self.frozen_enc = frozen_enc 25 | self.device = torch.device('cuda:{}'.format(gpu_ids[0])) # if self.gpu_ids else torch.device('cpu') 26 | # self.encoder.net = encoder.net.to(self.device) 27 | # self.decoder.net = decoder.net.to(self.device) 28 | 29 | def set_input(self, input, target, convert_enc=True, shuffle_channel=True): 30 | self.input = input.to(self.device) 31 | self.target = target.to(self.device) 32 | self.encoder.set_data(self.input, self.input, convert=convert_enc, shuffle_channel=shuffle_channel) 33 | 34 | def save_networks(self, epoch): 35 | self.encoder.save_networks(epoch) 36 | self.decoder.save_networks(epoch) 37 | 38 | def save_outputs(self): 39 | self.encoder.save_outputs() 40 | self.decoder.save_outputs() 41 | 42 | def update_learning_rate(self): 43 | self.encoder.update_learning_rate() 44 | 45 | def forward(self): 46 | self.encoder.forward() 47 | self.decoder.set_data(self.encoder.output, self.target) 48 | self.decoder.forward() 49 | 50 | def test(self): 51 | self.encoder.test() 52 | self.decoder.set_data(self.encoder.output, self.target) 53 | self.decoder.test() 54 | 55 | def backward(self): 56 | self.decoder.backward() 57 | # self.encoder.backward() 58 | 59 | def optimize_parameters(self): 60 | self.forward() 61 | self.backward() 62 | if not self.frozen_enc: 63 | self.encoder.optimizer.step() 64 | if not self.frozen_dec: 65 | self.decoder.optimizer.step() 66 | self.encoder.optimizer.zero_grad() 67 | self.decoder.optimizer.zero_grad() 68 | 69 | def eval(self): 70 | self.encoder.forward() 71 | self.decoder.set_data(self.encoder.output, self.target) 72 | self.decoder.forward() 73 | 74 | ''' 75 | Class Definition for the final DLoc architecture with 76 | 1 Encoder and 2 Decoders joint model 77 | ''' 78 | class Enc_2Dec_Network(): 79 | 80 | def initialize(self, opt , encoder, decoder, offset_decoder, frozen_dec=False, frozen_enc=False, gpu_ids='1'): 81 | print('initializing Encoder and 2 Decoders Model') 82 | self.opt = opt 83 | self.isTrain = opt.isTrain 84 | self.encoder = encoder 85 | self.decoder = decoder 86 | self.offset_decoder = offset_decoder 87 | self.frozen_dec = frozen_dec 88 | self.frozen_enc = frozen_enc 89 | self.device = torch.device('cuda:{}'.format(gpu_ids[0])) # if self.gpu_ids else torch.device('cpu') 90 | # self.encoder.net = encoder.net.to(self.device) 91 | # self.decoder.net = decoder.net.to(self.device) 92 | self.results_save_dir = opt.results_dir 93 | 94 | def set_input(self, input, target ,offset_target ,convert_enc=True, shuffle_channel=True): 95 | # features_w_offset, labels_gaussian_2d, features_wo_offset 96 | # input, target, offset_target 97 | self.input = input.to(self.device) 98 | self.target = target.to(self.device) 99 | self.offset_target = offset_target.to(self.device) 100 | self.encoder.set_data(self.input, self.input, convert=convert_enc, shuffle_channel=shuffle_channel) 101 | 102 | def save_networks(self, epoch): 103 | self.encoder.save_networks(epoch) 104 | self.decoder.save_networks(epoch) 105 | self.offset_decoder.save_networks(epoch) 106 | 107 | def save_outputs(self): 108 | self.encoder.save_outputs() 109 | self.decoder.save_outputs() 110 | self.offset_decoder.save_outputs() 111 | 112 | def update_learning_rate(self): 113 | self.encoder.update_learning_rate() 114 | self.decoder.update_learning_rate() 115 | self.offset_decoder.update_learning_rate() 116 | 117 | def forward(self): 118 | self.encoder.forward() 119 | self.decoder.set_data(self.encoder.output, self.target) 120 | self.offset_decoder.set_data(self.encoder.output, self.offset_target) 121 | self.decoder.forward() 122 | self.offset_decoder.forward() 123 | 124 | # Test the network once set into Evaluation mode! 125 | def test(self): 126 | self.encoder.test() 127 | self.decoder.set_data(self.encoder.output, self.target) 128 | self.offset_decoder.set_data(self.encoder.output, self.offset_target) 129 | self.decoder.test() 130 | self.offset_decoder.test() 131 | 132 | def backward(self): 133 | self.decoder.backward() 134 | self.offset_decoder.backward() 135 | # self.encoder.backward() 136 | 137 | def optimize_parameters(self): 138 | self.forward() 139 | self.backward() 140 | if not self.frozen_enc: 141 | self.encoder.optimizer.step() 142 | if not self.frozen_dec: 143 | self.decoder.optimizer.step() 144 | self.offset_decoder.optimizer.step() 145 | self.encoder.optimizer.zero_grad() 146 | self.decoder.optimizer.zero_grad() 147 | self.offset_decoder.optimizer.zero_grad() 148 | 149 | # set the models to evaluation mode 150 | def eval(self): 151 | self.encoder.eval() 152 | self.decoder.eval() 153 | self.offset_decoder.eval() -------------------------------------------------------------------------------- /modelADT.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Defines a generic wrapper class for all the network models 4 | Utilizes params.py to create, initiate, load and train the network. 5 | ''' 6 | import torch 7 | from collections import OrderedDict 8 | import scipy.io 9 | from torch.autograd import Variable 10 | import os 11 | from Generators import * 12 | from utils import * 13 | 14 | class ModelADT(): 15 | def name(self): 16 | return 'Pix2PixModel' 17 | 18 | def initialize(self, opt): 19 | 20 | self.opt = opt 21 | # self.gpu_ids = opt.gpu_ids 22 | gpu_ids = [] 23 | for i in range(torch.cuda.device_count()): 24 | gpu_ids.append(str(i)) 25 | print(gpu_ids) 26 | self.gpu_ids = gpu_ids 27 | self.isTrain = opt.isTrain 28 | self.loss_weight = opt.lambda_L 29 | self.reg_loss_weight = opt.lambda_reg 30 | self.cross_loss_weight = opt.lambda_cross 31 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) # if self.gpu_ids else torch.device('cpu') 32 | 33 | print(self.device) 34 | self.model_name = self.opt.name 35 | self.save_dir = os.path.join(self.opt.checkpoints_save_dir, self.model_name) 36 | self.load_dir = os.path.join(self.opt.checkpoints_load_dir, self.model_name) 37 | self.results_save_dir = opt.results_dir 38 | 39 | self.loss_names = [] 40 | self.visual_names = [] 41 | self.image_paths = [] 42 | 43 | self.loss_names = ['loss_criterion'] 44 | self.visual_names = ['output'] 45 | 46 | 47 | self.net = get_model_funct(self.opt.net)(self.opt, self.gpu_ids) 48 | self.net = self.net.to(self.device) 49 | 50 | if self.isTrain: 51 | if self.opt.loss_type == "L2": 52 | self.loss_criterion = torch.nn.MSELoss() 53 | elif self.opt.loss_type == "L1": 54 | self.loss_criterion = torch.nn.L1Loss() 55 | elif self.opt.loss_type == "L1_sumL2": 56 | self.loss_criterion = torch.nn.L1Loss() 57 | elif self.opt.loss_type == "L2_sumL2": 58 | self.loss_criterion = torch.nn.MSELoss() 59 | elif self.opt.loss_type == "L2_sumL1": 60 | self.loss_criterion = torch.nn.MSELoss() 61 | elif self.opt.loss_type == "L2_offset_loss": 62 | self.loss_criterion = torch.nn.MSELoss() 63 | elif self.opt.loss_type == "L1_offset_loss": 64 | self.loss_criterion = torch.nn.L1Loss() 65 | elif self.opt.loss_type == "L1_sumL2_cross": 66 | self.loss_criterion = torch.nn.MSELoss() 67 | elif self.opt.loss_type == "L2_sumL2_cross": 68 | self.loss_criterion = torch.nn.MSELoss() 69 | self.cross_loss_criterion = torch.nn.NLLLoss() 70 | 71 | # initialize optimizers 72 | self.optimizers = [] 73 | self.optimizer = torch.optim.Adam(self.net.parameters(),lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) 74 | self.optimizers.append(self.optimizer) 75 | 76 | # load and print networks; create schedulers 77 | def setup(self, opt, parser=None): 78 | if self.isTrain: 79 | self.schedulers = [get_scheduler(optimizer, opt) for optimizer in self.optimizers] 80 | 81 | if not self.isTrain or opt.continue_train: 82 | self.load_networks(opt.starting_epoch_count) 83 | self.print_networks(opt.verbose) 84 | 85 | # make models eval mode during test time 86 | def eval(self): 87 | self.net.eval() 88 | 89 | # used in test time, wrapping `forward` in no_grad() so we don't save 90 | # intermediate steps for backprop 91 | def test(self): 92 | with torch.no_grad(): 93 | self.forward() 94 | 95 | def save_outputs(self): 96 | if not os.path.exists(self.results_save_dir): 97 | os.makedirs(self.results_save_dir, exist_ok=True) 98 | to_save_dict = {} 99 | for tensor in self.visual_names: 100 | tensor_val = getattr(self, tensor).data.cpu().numpy() 101 | to_save_dict[tensor] = tensor_val 102 | scipy.io.savemat(self.results_save_dir+".mat", mdict=to_save_dict) 103 | 104 | 105 | # update learning rate (called once every epoch) 106 | def update_learning_rate(self): 107 | for scheduler in self.schedulers: 108 | scheduler.step() 109 | lr = self.optimizers[0].param_groups[0]['lr'] 110 | print('learning rate = %.7f' % lr) 111 | 112 | # return visualization images. train.py will display these images, and save the images to a html 113 | def get_current_visuals(self): 114 | visual_ret = OrderedDict() 115 | for name in self.visual_names: 116 | if isinstance(name, str): 117 | visual_ret[name] = getattr(self, name) 118 | return visual_ret 119 | 120 | # return traning losses/errors. train.py will print out these errors as debugging information 121 | def get_current_losses(self): 122 | errors_ret = OrderedDict() 123 | for name in self.loss_names: 124 | if isinstance(name, str): 125 | # float(...) works for both scalar tensor and float number 126 | errors_ret[name] = float(getattr(self, 'loss_' + name)) 127 | return errors_ret 128 | 129 | # save models to the disk 130 | def save_networks(self, epoch): 131 | name = self.model_name 132 | save_filename = '%s_net_%s.pth' % (epoch, name) 133 | if not os.path.exists(self.save_dir): 134 | os.makedirs(self.save_dir) 135 | save_path = os.path.join(self.save_dir, save_filename) 136 | net = self.net 137 | print(save_path) 138 | print('net'+name) 139 | if len(self.gpu_ids) > 0 and torch.cuda.is_available() and hasattr(net, 'module'): 140 | torch.save(net.module.cpu().state_dict(), save_path) 141 | else: 142 | torch.save(net.cpu().state_dict(), save_path) 143 | net.to(self.device) 144 | 145 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 146 | key = keys[i] 147 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 148 | if module.__class__.__name__.startswith('InstanceNorm') and (key == 'running_mean' or key == 'running_var'): 149 | if getattr(module, key) is None: 150 | state_dict.pop('.'.join(keys)) 151 | if module.__class__.__name__.startswith('InstanceNorm') and (key == 'num_batches_tracked'): 152 | state_dict.pop('.'.join(keys)) 153 | else: 154 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 155 | 156 | # load models from the disk 157 | def load_networks(self, epoch, load_dir=""): 158 | """ 159 | epoch (int/str): epoch index / "best" / "latest" 160 | """ 161 | assert isinstance(epoch,int) or epoch=="best" or epoch=="latest" 162 | load_filename = f'{epoch}_net_{self.model_name}.pth' 163 | 164 | if load_dir: 165 | # use given load dir 166 | load_path = os.path.join(load_dir, self.model_name, load_filename) 167 | else: 168 | # use default load dir 169 | load_path = os.path.join(self.load_dir, load_filename) 170 | 171 | net = self.net 172 | if isinstance(net, torch.nn.DataParallel): 173 | net = net.module 174 | print('loading the model from %s' % load_path) 175 | # if you are using PyTorch newer than 0.4 (e.g., built from 176 | # GitHub source), you can remove str() on self.device 177 | state_dict = torch.load(load_path) 178 | if hasattr(state_dict, '_metadata'): 179 | del state_dict._metadata 180 | 181 | # patch InstanceNorm checkpoints prior to 0.4 182 | print(state_dict.keys()) 183 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop 184 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 185 | net.load_state_dict(state_dict) 186 | net = net.to(self.device) 187 | 188 | # print network information 189 | def print_networks(self, verbose): 190 | print('---------- Networks initialized -------------') 191 | net = self.net 192 | name = self.model_name 193 | num_params = 0 194 | for param in net.parameters(): 195 | num_params += param.numel() 196 | if verbose: 197 | print(net) 198 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 199 | print('-----------------------------------------------') 200 | 201 | # set requies_grad=Fasle to avoid computation 202 | def set_requires_grad(self, nets, requires_grad=False): 203 | if not isinstance(nets, list): 204 | nets = [nets] 205 | for net in nets: 206 | if net is not None: 207 | for param in net.parameters(): 208 | param.requires_grad = requires_grad 209 | 210 | # Set the input and target data for the network to train on/evaluate against 211 | def set_data(self, input, target, convert=False, shuffle_channel=False): 212 | shape_in = input.shape 213 | if shuffle_channel: 214 | self.input = input[:,torch.randperm(shape_in[1]),:,:] 215 | else: 216 | self.input = input 217 | self.target = target 218 | if convert: 219 | self.input, self.target = Variable(self.input), Variable(self.target) 220 | 221 | self.input = self.input.to(self.device) 222 | self.target = self.target.to(self.device) 223 | 224 | # Define the forward pass to compute loss 225 | def forward(self): 226 | self.output = self.net(self.input) 227 | if self.opt.loss_type != "NoLoss": 228 | self.loss = self.loss_weight*self.loss_criterion(self.output, self.target) 229 | 230 | if self.opt.loss_type == "L1_sumL2" or self.opt.loss_type == "L2_sumL2": 231 | self.loss += self.reg_loss_weight*torch.norm(self.output).div(self.output.numel()) 232 | self.reg_loss = self.reg_loss_weight*torch.norm(self.output).div(self.output.numel()) 233 | 234 | if self.opt.loss_type == "L2_sumL1": 235 | self.loss += self.reg_loss_weight*torch.norm(self.output,p=1).div(self.output.numel()) 236 | self.reg_loss = self.reg_loss_weight*torch.norm(self.output,p=1).div(self.output.numel()) 237 | 238 | if self.opt.loss_type == "L1_sumL2_cross" or self.opt.loss_type == "L2_sumL2_cross": 239 | self.loss += self.reg_loss_weight*torch.norm(self.output).div(self.output.numel()) 240 | self.loss += self.cross_loss_weight*self.cross_loss_criterion(self.output.flatten(start_dim=1),self.target.flatten(start_dim=1)) 241 | 242 | if self.opt.loss_type == "L1_offset_loss" or self.opt.loss_type == "L2_offset_loss": 243 | self.loss += self.reg_loss_weight*torch.norm(self.output).div(self.output.numel()) 244 | 245 | def backward(self): 246 | self.loss.backward(retain_graph=True) 247 | 248 | def optimize_parameters(self): 249 | self.forward() 250 | self.backward() 251 | self.optimizer.step() 252 | self.optimizer.zero_grad() 253 | -------------------------------------------------------------------------------- /params.py: -------------------------------------------------------------------------------- 1 | ''' 2 | All the other python files import this file by default 3 | to define the required parameters to create/load/initiate/train/test/save/log 4 | the networks, models, logs and results 5 | ''' 6 | from easydict import EasyDict as edict 7 | import time 8 | from os.path import join 9 | opt_exp = edict() 10 | 11 | # ---------- Global Experiment param -------------- 12 | opt_exp.isTrain = True #type=bool, default=True, help='enables backpropogation, else the network is only used for evlauation') 13 | opt_exp.continue_train = False #type=bool, default=False, help='continue training: load the latest model') 14 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 15 | opt_exp.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 16 | opt_exp.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 17 | opt_exp.n_epochs = 50 #type=int, default=50, help='# of Epochs to run the training for') 18 | opt_exp.gpu_ids = ['1','2','3','0'] #type=tuple of char, default=['1','2','3','0'], help='gpu ids: e.g. ['0'] ['0','1','2'], ['0','2']. CPU implementation is not supported. gpu_ids[0] is used for loading the network and the rest for DataParellilization') 19 | opt_exp.data = "rw_to_rw" #type=str, default='rw_to_rw', help='Dataset loader, switch case system [rw_to_rw|rw_to_rw_atk|rw_to_rw_env2|rw_to_rw_env3|rw_to_rw_env4|rw_to_rw_40|rw_to_rw_20|data_segment]') 20 | opt_exp.n_decoders = 2 #type=int, default=2, help='# of Decoders to be used [1:Only Location Decoder|2:Both Location and Consistency Decoder]') 21 | 22 | opt_exp.batch_size = 32 #type=int, default=32, help='batch size for training and testing the network') 23 | opt_exp.ds_step_trn = 1 #type=int, default=1, help='data sub-sampling number for the training data') 24 | opt_exp.ds_step_tst = 1 #type=int, default=1, help='data sub-sampling number for the testing data') 25 | opt_exp.weight_decay = 1e-5 #type=float, default=1e-5, help='weight decay parameter for the Adam optimizer') 26 | 27 | # ------ name of experiment ---------- 28 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train_and_test.py is ran 29 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) # trained models are saved here 30 | opt_exp.results_dir = opt_exp.checkpoints_dir # the resulting images from the offset decoder and the decoder are saved here 31 | opt_exp.log_dir = opt_exp.checkpoints_dir # the logs of the median, 90th, 99th percentile errors, compensation ecoder and location decoder losses are saved for each epoch and each batch here 32 | opt_exp.load_dir = opt_exp.checkpoints_dir # when loading a pre-trained model it is loaded from here 33 | 34 | # ---------- offset encoder param -------------- 35 | opt_encoder = edict() 36 | opt_encoder.parent_exp = opt_exp 37 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 38 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 39 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_encoder', help='selects model to use for netG') 40 | opt_encoder.net = 'G' #type=str, default='G', help='selects model to use for netG') 41 | opt_encoder.resnet_blocks = 6 #type=int, default=6, help='# of resent blocks to use') 42 | opt_encoder.no_dropout = False #type=bool, default=False help='no dropout for the generator') 43 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 44 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 45 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 46 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 47 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 48 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 49 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 50 | opt_encoder.lambda_L = 1 #type=float, default=1, help='weightage given to the Generator') 51 | opt_encoder.lambda_cross = 1e-5 #type=float, default=1e-4, help='weight for cross entropy loss') 52 | opt_encoder.lambda_reg = 5e-4 #type=float, default=5e-4, help='regularization for the two encoder case') 53 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 54 | 55 | 56 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 57 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 58 | opt_encoder.save_latest_freq = opt_encoder.parent_exp.save_latest_freq 59 | opt_encoder.save_epoch_freq = opt_encoder.parent_exp.save_epoch_freq 60 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 61 | opt_encoder.isTrain = opt_encoder.parent_exp.isTrain #type=bool, default=True, help='whether to train the network encoder or not') 62 | opt_encoder.continue_train = False #type=bool, default=False, help='continue training: load the latest model') 63 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 64 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 65 | opt_encoder.loss_type = "NoLoss" #type=string, default='NoLoss', help='Loss type for the network to enforce ['NoLoss'|'L1'|'L2'|'L1_sumL2'|'L2_sumL2'|'L2_sumL1'|'L2_offset_loss'|'L1_offset_loss'|'L1_sumL2_cross'|'L2_sumL2_cross']') 66 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 67 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 68 | 69 | 70 | 71 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=tuple of char, default=['1','2','3','0'], help='gpu ids: e.g. ['0'] ['0','1','2'], ['0','2']. CPU implementation is not supported. gpu_ids[0] is used for loading the network and the rest for DataParellilization') 72 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 73 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 74 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 75 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 76 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 77 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 78 | opt_encoder.verbose = False #type=bool, default=False, help='if specified, print more debugging information') 79 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 80 | 81 | 82 | # ---------- decoder param -------------- 83 | opt_decoder = edict() 84 | opt_decoder.parent_exp = opt_exp 85 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 86 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 87 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_decoder', help='selects model to use for netG') 88 | opt_decoder.net = 'G' #type=str, default='G', help='selects model to use for netG')opt_decoder.no_dropout = False #type=bool, default=False, help='no dropout for the generator') 89 | opt_decoder.resnet_blocks = 9 #type=int, default=9, help='total number of resent blocks including the ones in the encoder') 90 | opt_decoder.encoder_res_blocks = opt_encoder.resnet_blocks 91 | opt_decoder.no_dropout = False #type=bool, default=False, help='To not appply dropout layer') 92 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 93 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 94 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 95 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 96 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 97 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 98 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 99 | opt_decoder.lambda_L = 1 #type=float, default=1, help='weightage given to the Generator') 100 | opt_decoder.lambda_cross = 1e-5 #type=float, default=1e-5, help='weight given to cross entropy loss') 101 | opt_decoder.lambda_reg = 5e-4 #type=float, default=5e-4, help='regularization weight') 102 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 103 | 104 | 105 | opt_decoder.input_nc = 4 #type=int, default=4, help='# of input image channels') 106 | opt_decoder.output_nc = 1 #type=int, default=1, help='# of output image channels') 107 | opt_decoder.save_latest_freq = opt_decoder.parent_exp.save_latest_freq 108 | opt_decoder.save_epoch_freq = opt_decoder.parent_exp.save_epoch_freq 109 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 110 | opt_decoder.isTrain = opt_decoder.parent_exp.isTrain #type=bool, default=True, help='whether to train the network or not') 111 | opt_decoder.continue_train = False #type=bool, default=False, help='continue training: load the latest model') 112 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 113 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 114 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 115 | opt_decoder.loss_type = "L2_sumL1" #type=string, default='L2_sumL1', help='Loss type for the netowkr to enforce ['NoLoss'|'L1'|'L2'|'L1_sumL2'|'L2_sumL2'|'L2_sumL1'|'L2_offset_loss'|'L1_offset_loss'|'L1_sumL2_cross'|'L2_sumL2_cross']') 116 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 117 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 118 | 119 | 120 | 121 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=tuple of char, default=['1','2','3','0'], help='gpu ids: e.g. ['0'] ['0','1','2'], ['0','2']. CPU implementation is not supported. gpu_ids[0] is used for loading the network and the rest for DataParellilization') 122 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 123 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 124 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 125 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 126 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 127 | opt_decoder.verbose = False #type=bool, default=False, help='if specified, print more debugging information') 128 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 129 | 130 | 131 | # ---------- offset decoder param -------------- 132 | opt_offset_decoder = edict() 133 | opt_offset_decoder.parent_exp = opt_exp 134 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 135 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 136 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_decoder', help='selects model to use for netG') 137 | opt_offset_decoder.net = 'G' #type=str, default='G', help='selects model to use for netG') 138 | opt_offset_decoder.resnet_blocks = 12 #type=int, default=12, help='total number of resent blocks including the ones in the encoder') 139 | opt_offset_decoder.encoder_res_blocks = opt_encoder.resnet_blocks 140 | opt_offset_decoder.no_dropout = False #type=bool, default=False, help='To not appply dropout layer') 141 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 142 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 143 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 144 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 145 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 146 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 147 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 148 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 149 | opt_offset_decoder.lambda_cross = 0 #type=float, default=1e-5, help='weight given to cross entropy loss') 150 | opt_offset_decoder.lambda_reg = 0 #type=float, default=5e-4, help='regularization weight') 151 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 152 | 153 | 154 | opt_offset_decoder.input_nc = 4 #type=int, default=4, help='# of input image channels') 155 | opt_offset_decoder.output_nc = 4 #type=int, default=4, help='# of output image channels') 156 | opt_offset_decoder.save_latest_freq = opt_offset_decoder.parent_exp.save_latest_freq 157 | opt_offset_decoder.save_epoch_freq = opt_offset_decoder.parent_exp.save_epoch_freq 158 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 159 | opt_offset_decoder.isTrain = opt_offset_decoder.parent_exp.isTrain #type=bool, default=True, help='whether to train the network or not') 160 | opt_offset_decoder.continue_train = False #type=bool, default=False, help='continue training: load the latest model') 161 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 162 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 163 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 164 | opt_offset_decoder.loss_type = "L2_offset_loss" #type=string, default='L2_offset_loss', help='Loss type for the netowkr to enforce ['NoLoss'|'L1'|'L2'|'L1_sumL2'|'L2_sumL2'|'L2_sumL1'|'L2_offset_loss'|'L1_offset_loss'|'L1_sumL2_cross'|'L2_sumL2_cross']') 165 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 166 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 167 | 168 | 169 | 170 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default=['1','2','3','0'], help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 171 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 172 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 174 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 175 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 176 | opt_offset_decoder.verbose = False#type=bool, default=False, help='if specified, print more debugging information') 177 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 178 | -------------------------------------------------------------------------------- /params_storage/README.md: -------------------------------------------------------------------------------- 1 | The *.py* files in this folder are the parameter files that can be used to load the pre-tuned parameters to re-produce the results shown in the [paper](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894). 2 | 3 | The [train_and_test.py](../train_and_test.py) loads the required parameters to run, load and save the network and results from the [params.py](../params.py). The [params.py](../params.py) file shows an example implemetation of these parameters with description of each parameter's use in the network. This folder contains the pre-tuned parameter files for re-producing the results shown in the [paper](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894). 4 | 5 | ### Description of the parameter files 6 | 7 | Here is a description of what parameters were used for each result presented in the [paper](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894). 8 | 9 | Please refer to [DLoc paper](https://dl.acm.org/doi/pdf/10.1145/3372224.3380894) to see the figure numbers and the appropriate results that you can reproduce 10 | 11 | - **params_fig10a.py**: Generate the DLoc results to reproduce the plot in Figure 10a 12 | - **params_fig10b.py**: Generate the DLoc results to reproduce the plot in Figure 10b 13 | - **params_fig11a.py**: Generate the DLoc results to reproduce the plot in Figure 11a for without copensation decoder 14 | - **params_fig11b.py**: Generate the DLoc results to reproduce the plot in Figure 11b for without copensation decoder 15 | - **params_fig13a_20MHz.py**: Generate the DLoc results to reproduce the plot in Figure 13a for the 20MHz Bandwidth 16 | - **params_fig13a_40MHz.py**: Generate the DLoc results to reproduce the plot in Figure 13a for the 40MHz Bandwidth 17 | - **params_fig13b.py**: Generate the DLoc results to reproduce the plot in Figure 13b for the Disjoint dataset 18 | - **params_tab1_test2.py**: Generate the DLoc results to reproduce the results in Table1, where the network is trained on Env-1/3/4 and Tested on Env-2 19 | - **params_tab1_test3.py**: Generate the DLoc results to reproduce the results in Table1, where the network is trained on Env-1/2/4 and Tested on Env-3 20 | - **params_tab1_test4.py**: Generate the DLoc results to reproduce the results in Table1, where the network is trained on Env-1/2/3 and Tested on Env-4 21 | 22 | ### Loading the parameters 23 | 24 | As mentioned earlier, [train_and_test.py](../train_and_test.py) file loads the required parameters from [params.py](../params.py). So to load the pre-tuned parameters to re-generate the results from the paper, please copy the correponding parameter files in this folder to params.py file in the parent folder. 25 | 26 | ```params 27 | cp ../params.py 28 | ``` 29 | 30 | Then you can run the [train_and_test.py](../train_and_test.py) script to re-produce the results from the paper. 31 | -------------------------------------------------------------------------------- /params_storage/params_fig10a.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | 7 | # ---------- Global Experiment param -------------- 8 | opt_exp.isTrain = True 9 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 10 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 11 | opt_exp.n_epochs = 50 12 | opt_exp.gpu_ids = ['1','2','3','0'] 13 | opt_exp.data = "rw_to_rw_atk" 14 | opt_exp.n_decoders = 2 15 | 16 | opt_exp.batch_size = 32 17 | opt_exp.ds_step_trn = 1 18 | opt_exp.ds_step_tst = 1 19 | opt_exp.weight_decay = 1e-5 20 | opt_exp.confidence = False 21 | 22 | # ------ name of experiment ---------- 23 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 24 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 25 | opt_exp.results_dir = opt_exp.checkpoints_dir 26 | opt_exp.log_dir = opt_exp.checkpoints_dir 27 | opt_exp.load_dir = opt_exp.checkpoints_dir 28 | 29 | # ---------- offset decoder param -------------- 30 | opt_encoder = edict() 31 | opt_encoder.parent_exp = opt_exp 32 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 33 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 34 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 36 | opt_encoder.resnet_blocks = 6 37 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 38 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 39 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 40 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 41 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 42 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 43 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 44 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 45 | opt_encoder.lambda_L = 1 # weightage given to the Generator 46 | opt_encoder.lambda_cross = 1e-6 47 | opt_encoder.lambda_reg = 5e-4 48 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 49 | 50 | 51 | opt_encoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 52 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 53 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 54 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 55 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 56 | opt_encoder.isTrain = True 57 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 58 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 59 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 60 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 61 | opt_encoder.loss_type = "NoLoss" 62 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 63 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 64 | 65 | 66 | 67 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 68 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 69 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 71 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 72 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 73 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 74 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 75 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 76 | 77 | 78 | # ---------- decoder param -------------- 79 | opt_decoder = edict() 80 | opt_decoder.parent_exp = opt_exp 81 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 82 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 83 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 84 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 85 | opt_decoder.resnet_blocks = 9 86 | opt_decoder.encoder_res_blocks = 6 87 | opt_decoder.no_dropout = False 88 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 89 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 90 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 91 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 92 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 93 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 94 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 95 | opt_decoder.lambda_L = 1 # weightage given to the Generator 96 | opt_decoder.lambda_cross = 1e-6 97 | opt_decoder.lambda_reg = 5e-4 98 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 99 | 100 | 101 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 103 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 104 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 105 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 106 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 107 | opt_decoder.isTrain = True 108 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 109 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 110 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 111 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 112 | opt_decoder.loss_type = "L2_sumL1" 113 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 114 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 115 | 116 | 117 | 118 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 119 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 120 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 122 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 123 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 124 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 125 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 126 | 127 | 128 | # ---------- offset decoder param -------------- 129 | opt_offset_decoder = edict() 130 | opt_offset_decoder.parent_exp = opt_exp 131 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 132 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 133 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 134 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 135 | opt_offset_decoder.resnet_blocks = 12 136 | opt_offset_decoder.encoder_res_blocks = 6 137 | opt_offset_decoder.no_dropout = False 138 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 139 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 140 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 141 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 142 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 143 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 144 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 145 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 146 | opt_offset_decoder.lambda_cross = 0 147 | opt_offset_decoder.lambda_reg = 0 148 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 149 | 150 | 151 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 153 | opt_offset_decoder.output_nc = 3 #type=int, default=3, help='# of output image channels') 154 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 155 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 156 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 157 | opt_offset_decoder.isTrain = True 158 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 159 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 160 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 161 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 162 | opt_offset_decoder.loss_type = "L2_offset_loss" 163 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 164 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 165 | 166 | 167 | 168 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 169 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 170 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 172 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 173 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 174 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 175 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 176 | -------------------------------------------------------------------------------- /params_storage/params_fig10b.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_fig11a.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_atk" 13 | opt_exp.n_decoders = 1 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | 29 | # ---------- offset decoder param -------------- 30 | opt_encoder = edict() 31 | opt_encoder.parent_exp = opt_exp 32 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 33 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 34 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 36 | opt_encoder.resnet_blocks = 6 37 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 38 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 39 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 40 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 41 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 42 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 43 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 44 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 45 | opt_encoder.lambda_L = 1 # weightage given to the Generator 46 | opt_encoder.lambda_cross = 1e-6 47 | opt_encoder.lambda_reg = 5e-4 48 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 49 | 50 | 51 | opt_encoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 52 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 53 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 54 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 55 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 56 | opt_encoder.isTrain = True 57 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 58 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 59 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 60 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 61 | opt_encoder.loss_type = "NoLoss" 62 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 63 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 64 | 65 | 66 | 67 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 68 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 69 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 71 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 72 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 73 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 74 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 75 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 76 | 77 | 78 | # ---------- decoder param -------------- 79 | opt_decoder = edict() 80 | opt_decoder.parent_exp = opt_exp 81 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 82 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 83 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 84 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 85 | opt_decoder.resnet_blocks = 9 86 | opt_decoder.encoder_res_blocks = 6 87 | opt_decoder.no_dropout = False 88 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 89 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 90 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 91 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 92 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 93 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 94 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 95 | opt_decoder.lambda_L = 1 # weightage given to the Generator 96 | opt_decoder.lambda_cross = 1e-6 97 | opt_decoder.lambda_reg = 5e-4 98 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 99 | 100 | 101 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 103 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 104 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 105 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 106 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 107 | opt_decoder.isTrain = True 108 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 109 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 110 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 111 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 112 | opt_decoder.loss_type = "L2_sumL1" 113 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 114 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 115 | 116 | 117 | 118 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 119 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 120 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 122 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 123 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 124 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 125 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 126 | 127 | 128 | # ---------- offset decoder param -------------- 129 | opt_offset_decoder = edict() 130 | opt_offset_decoder.parent_exp = opt_exp 131 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 132 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 133 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 134 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 135 | opt_offset_decoder.resnet_blocks = 12 136 | opt_offset_decoder.encoder_res_blocks = 6 137 | opt_offset_decoder.no_dropout = False 138 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 139 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 140 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 141 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 142 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 143 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 144 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 145 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 146 | opt_offset_decoder.lambda_cross = 0 147 | opt_offset_decoder.lambda_reg = 0 148 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 149 | 150 | 151 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.input_nc = 3 #type=int, default=3, help='# of input image channels') 153 | opt_offset_decoder.output_nc = 3 #type=int, default=3, help='# of output image channels') 154 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 155 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 156 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 157 | opt_offset_decoder.isTrain = True 158 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 159 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 160 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 161 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 162 | opt_offset_decoder.loss_type = "L2_offset_loss" 163 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 164 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 165 | 166 | 167 | 168 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 169 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 170 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 172 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 173 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 174 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 175 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 176 | -------------------------------------------------------------------------------- /params_storage/params_fig11b.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw" 13 | opt_exp.n_decoders = 1 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_fig13a_20MHz.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_20" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_fig13a_40MHz.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_40" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | 29 | # ---------- offset decoder param -------------- 30 | opt_encoder = edict() 31 | opt_encoder.parent_exp = opt_exp 32 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 33 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 34 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 36 | opt_encoder.resnet_blocks = 6 37 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 38 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 39 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 40 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 41 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 42 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 43 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 44 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 45 | opt_encoder.lambda_L = 1 # weightage given to the Generator 46 | opt_encoder.lambda_cross = 1e-5 47 | opt_encoder.lambda_reg = 5e-4 48 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 49 | 50 | 51 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 52 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 53 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 54 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 55 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 56 | opt_encoder.isTrain = True 57 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 58 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 59 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 60 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 61 | opt_encoder.loss_type = "NoLoss" 62 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 63 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 64 | 65 | 66 | 67 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 68 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 69 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 71 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 72 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 73 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 74 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 75 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 76 | 77 | 78 | # ---------- decoder param -------------- 79 | opt_decoder = edict() 80 | opt_decoder.parent_exp = opt_exp 81 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 82 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 83 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 84 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 85 | opt_decoder.resnet_blocks = 9 86 | opt_decoder.encoder_res_blocks = 6 87 | opt_decoder.no_dropout = False 88 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 89 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 90 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 91 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 92 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 93 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 94 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 95 | opt_decoder.lambda_L = 1 # weightage given to the Generator 96 | opt_decoder.lambda_cross = 1e-5 97 | opt_decoder.lambda_reg = 5e-4 98 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 99 | 100 | 101 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 103 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 104 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 105 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 106 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 107 | opt_decoder.isTrain = True 108 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 109 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 110 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 111 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 112 | opt_decoder.loss_type = "L2_sumL1" 113 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 114 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 115 | 116 | 117 | 118 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 119 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 120 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 122 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 123 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 124 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 125 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 126 | 127 | 128 | # ---------- offset decoder param -------------- 129 | opt_offset_decoder = edict() 130 | opt_offset_decoder.parent_exp = opt_exp 131 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 132 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 133 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 134 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 135 | opt_offset_decoder.resnet_blocks = 12 136 | opt_offset_decoder.encoder_res_blocks = 6 137 | opt_offset_decoder.no_dropout = False 138 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 139 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 140 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 141 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 142 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 143 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 144 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 145 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 146 | opt_offset_decoder.lambda_cross = 0 147 | opt_offset_decoder.lambda_reg = 0 148 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 149 | 150 | 151 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 153 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 154 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 155 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 156 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 157 | opt_offset_decoder.isTrain = True 158 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 159 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 160 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 161 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 162 | opt_offset_decoder.loss_type = "L2_offset_loss" 163 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 164 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 165 | 166 | 167 | 168 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 169 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 170 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 172 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 173 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 174 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 175 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 176 | -------------------------------------------------------------------------------- /params_storage/params_fig13b.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "data_segment" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_tab1_test2.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_env2" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_tab1_test3.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_env3" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | 21 | # ------ name of experiment ---------- 22 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 23 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 24 | opt_exp.results_dir = opt_exp.checkpoints_dir 25 | opt_exp.log_dir = opt_exp.checkpoints_dir 26 | opt_exp.load_dir = opt_exp.checkpoints_dir 27 | 28 | # ---------- offset decoder param -------------- 29 | opt_encoder = edict() 30 | opt_encoder.parent_exp = opt_exp 31 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 32 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 33 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 35 | opt_encoder.resnet_blocks = 6 36 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 37 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 38 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 40 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 41 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 42 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 43 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 44 | opt_encoder.lambda_L = 1 # weightage given to the Generator 45 | opt_encoder.lambda_cross = 1e-5 46 | opt_encoder.lambda_reg = 5e-4 47 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 48 | 49 | 50 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 51 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 52 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 53 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 54 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 55 | opt_encoder.isTrain = True 56 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 57 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 58 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 59 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 60 | opt_encoder.loss_type = "NoLoss" 61 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 62 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 63 | 64 | 65 | 66 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 67 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 68 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 70 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 71 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 72 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 73 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 74 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 75 | 76 | 77 | # ---------- decoder param -------------- 78 | opt_decoder = edict() 79 | opt_decoder.parent_exp = opt_exp 80 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 81 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 82 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 83 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 84 | opt_decoder.resnet_blocks = 9 85 | opt_decoder.encoder_res_blocks = 6 86 | opt_decoder.no_dropout = False 87 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 88 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 89 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 90 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 91 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 92 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 93 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 94 | opt_decoder.lambda_L = 1 # weightage given to the Generator 95 | opt_decoder.lambda_cross = 1e-5 96 | opt_decoder.lambda_reg = 5e-4 97 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 98 | 99 | 100 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 102 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 103 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 104 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 105 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 106 | opt_decoder.isTrain = True 107 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 108 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 109 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 110 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 111 | opt_decoder.loss_type = "L2_sumL1" 112 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 113 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 114 | 115 | 116 | 117 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 118 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 119 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 121 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 122 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 123 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 124 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 125 | 126 | 127 | # ---------- offset decoder param -------------- 128 | opt_offset_decoder = edict() 129 | opt_offset_decoder.parent_exp = opt_exp 130 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 131 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 132 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 133 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 134 | opt_offset_decoder.resnet_blocks = 12 135 | opt_offset_decoder.encoder_res_blocks = 6 136 | opt_offset_decoder.no_dropout = False 137 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 138 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 139 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 140 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 141 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 142 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 143 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 144 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 145 | opt_offset_decoder.lambda_cross = 0 146 | opt_offset_decoder.lambda_reg = 0 147 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 148 | 149 | 150 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 152 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 153 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 154 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 155 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 156 | opt_offset_decoder.isTrain = True 157 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 158 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 159 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 160 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 161 | opt_offset_decoder.loss_type = "L2_offset_loss" 162 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 163 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 164 | 165 | 166 | 167 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 168 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 169 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 171 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 172 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 173 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 174 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 175 | -------------------------------------------------------------------------------- /params_storage/params_tab1_test4.py: -------------------------------------------------------------------------------- 1 | from easydict import EasyDict as edict 2 | import time 3 | from os.path import join 4 | opt_exp = edict() 5 | 6 | # ---------- Global Experiment param -------------- 7 | opt_exp.isTrain = True 8 | opt_exp.continue_train = False #action='store_true', help='continue training: load the latest model') 9 | opt_exp.starting_epoch_count = 0 #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 10 | opt_exp.n_epochs = 50 11 | opt_exp.gpu_ids = ['1','2','3','0'] 12 | opt_exp.data = "rw_to_rw_env4" 13 | opt_exp.n_decoders = 2 14 | 15 | opt_exp.batch_size = 32 16 | opt_exp.ds_step_trn = 1 17 | opt_exp.ds_step_tst = 1 18 | opt_exp.weight_decay = 1e-5 19 | opt_exp.confidence = False 20 | # ------ name of experiment ---------- 21 | opt_exp.save_name = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) # experiment name when train.py is ran 22 | opt_exp.checkpoints_dir = join('./runs', opt_exp.save_name) #models are saved here 23 | opt_exp.results_dir = opt_exp.checkpoints_dir 24 | opt_exp.log_dir = opt_exp.checkpoints_dir 25 | opt_exp.load_dir = opt_exp.checkpoints_dir 26 | 27 | # ---------- offset decoder param -------------- 28 | opt_encoder = edict() 29 | opt_encoder.parent_exp = opt_exp 30 | opt_encoder.batch_size = opt_encoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 31 | opt_encoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 32 | opt_encoder.base_model = 'resnet_encoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 33 | opt_encoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG') 34 | opt_encoder.resnet_blocks = 6 35 | opt_encoder.no_dropout = False #action='store_true', help='no dropout for the generator') 36 | opt_encoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 37 | opt_encoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 38 | opt_encoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 39 | opt_encoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 40 | opt_encoder.lr = 0.00001 #type=float, default=0.0002, help='initial learning rate for adam') 41 | opt_encoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 42 | opt_encoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 43 | opt_encoder.lambda_L = 1 # weightage given to the Generator 44 | opt_encoder.lambda_cross = 1e-5 45 | opt_encoder.lambda_reg = 5e-4 46 | opt_encoder.weight_decay = opt_encoder.parent_exp.weight_decay 47 | 48 | 49 | opt_encoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 50 | opt_encoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 51 | opt_encoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 52 | opt_encoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 53 | opt_encoder.n_epochs = opt_encoder.parent_exp.n_epochs 54 | opt_encoder.isTrain = True 55 | opt_encoder.continue_train = False #action='store_true', help='continue training: load the latest model') 56 | opt_encoder.starting_epoch_count = opt_encoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 57 | # opt_encoder.phase = opt_encoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 58 | opt_encoder.name = 'encoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 59 | opt_encoder.loss_type = "NoLoss" 60 | opt_encoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 61 | opt_encoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 62 | 63 | 64 | 65 | opt_encoder.gpu_ids = opt_encoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 66 | opt_encoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 67 | opt_encoder.checkpoints_load_dir = opt_encoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 68 | opt_encoder.checkpoints_save_dir = opt_encoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 69 | opt_encoder.results_dir = opt_encoder.parent_exp.results_dir 70 | opt_encoder.log_dir = opt_encoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 71 | opt_encoder.max_dataset_size = float("inf") #type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 72 | opt_encoder.verbose = False #action='store_true', help='if specified, print more debugging information') 73 | opt_encoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 74 | 75 | 76 | # ---------- decoder param -------------- 77 | opt_decoder = edict() 78 | opt_decoder.parent_exp = opt_exp 79 | opt_decoder.batch_size = opt_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 80 | opt_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 81 | opt_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 82 | opt_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 83 | opt_decoder.resnet_blocks = 9 84 | opt_decoder.encoder_res_blocks = 6 85 | opt_decoder.no_dropout = False 86 | opt_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 87 | opt_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 88 | opt_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 89 | opt_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 90 | opt_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 91 | opt_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 92 | opt_decoder.lr_decay_iters = 20 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 93 | opt_decoder.lambda_L = 1 # weightage given to the Generator 94 | opt_decoder.lambda_cross = 1e-5 95 | opt_decoder.lambda_reg = 5e-4 96 | opt_decoder.weight_decay = opt_decoder.parent_exp.weight_decay 97 | 98 | 99 | #opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 100 | opt_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 101 | opt_decoder.output_nc = 1 #type=int, default=3, help='# of output image channels') 102 | opt_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 103 | opt_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 104 | opt_decoder.n_epochs = opt_decoder.parent_exp.n_epochs 105 | opt_decoder.isTrain = True 106 | opt_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 107 | opt_decoder.starting_epoch_count = opt_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 108 | # opt_decoder.phase = opt_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 109 | opt_decoder.name = 'decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 110 | opt_decoder.loss_type = "L2_sumL1" 111 | opt_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 112 | opt_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 113 | 114 | 115 | 116 | opt_decoder.gpu_ids = opt_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 117 | opt_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 118 | opt_decoder.checkpoints_load_dir = opt_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 119 | opt_decoder.checkpoints_save_dir = opt_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 120 | opt_decoder.results_dir = opt_decoder.parent_exp.results_dir 121 | opt_decoder.log_dir = opt_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 122 | opt_decoder.verbose = False #action='store_true', help='if specified, print more debugging information') 123 | opt_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 124 | 125 | 126 | # ---------- offset decoder param -------------- 127 | opt_offset_decoder = edict() 128 | opt_offset_decoder.parent_exp = opt_exp 129 | opt_offset_decoder.batch_size = opt_offset_decoder.parent_exp.batch_size #type=int, default=1, help='input batch size') 130 | opt_offset_decoder.ngf = 64 #type=int, default=64, help='# of gen filters in first conv layer') 131 | opt_offset_decoder.base_model = 'resnet_decoder' #type=str, default='resnet_9blocks', help='selects model to use for netG') 132 | opt_offset_decoder.net = 'G' #type=str, default='resnet_9blocks', help='selects model to use for netG')opt_offset_decoder.no_dropout = False #action='store_true', help='no dropout for the generator') 133 | opt_offset_decoder.resnet_blocks = 12 134 | opt_offset_decoder.encoder_res_blocks = 6 135 | opt_offset_decoder.no_dropout = False 136 | opt_offset_decoder.init_type = 'xavier' #type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') 137 | opt_offset_decoder.init_gain = 0.02 #type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 138 | opt_offset_decoder.norm = 'instance' #type=str, default='instance', help='instance normalization or batch normalization') 139 | opt_offset_decoder.beta1 = 0.5 #type=float, default=0.5, help='momentum term of adam') 140 | opt_offset_decoder.lr = opt_encoder.lr #type=float, default=0.0002, help='initial learning rate for adam') 141 | opt_offset_decoder.lr_policy = 'step' #type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') 142 | opt_offset_decoder.lr_decay_iters = 50 #type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 143 | opt_offset_decoder.lambda_L = 1 # weightage given to the Generator 144 | opt_offset_decoder.lambda_cross = 0 145 | opt_offset_decoder.lambda_reg = 0 146 | opt_offset_decoder.weight_decay = opt_offset_decoder.parent_exp.weight_decay 147 | 148 | 149 | #opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 150 | opt_offset_decoder.input_nc = 4 #type=int, default=3, help='# of input image channels') 151 | opt_offset_decoder.output_nc = 4 #type=int, default=3, help='# of output image channels') 152 | opt_offset_decoder.save_latest_freq = 5000 #type=int, default=5000, help='frequency of saving the latest results') 153 | opt_offset_decoder.save_epoch_freq = 1 #type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 154 | opt_offset_decoder.n_epochs = opt_offset_decoder.parent_exp.n_epochs 155 | opt_offset_decoder.isTrain = True 156 | opt_offset_decoder.continue_train = False #action='store_true', help='continue training: load the latest model') 157 | opt_offset_decoder.starting_epoch_count = opt_offset_decoder.parent_exp.starting_epoch_count #type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 158 | # opt_offset_decoder.phase = opt_offset_decoder.parent_exp.phase #type=str, default='train', help='train, val, test, etc') 159 | opt_offset_decoder.name = 'offset_decoder' #type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 160 | opt_offset_decoder.loss_type = "L2_offset_loss" 161 | opt_offset_decoder.niter = 20 #type=int, default=100, help='# of iter at starting learning rate') 162 | opt_offset_decoder.niter_decay = 100 #type=int, default=100, help='# of iter to linearly decay learning rate to zero') 163 | 164 | 165 | 166 | opt_offset_decoder.gpu_ids = opt_offset_decoder.parent_exp.gpu_ids #type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 167 | opt_offset_decoder.num_threads = 4 #default=4, type=int, help='# threads for loading data') 168 | opt_offset_decoder.checkpoints_load_dir = opt_offset_decoder.parent_exp.load_dir #type=str, default='./checkpoints', help='models are saved here') 169 | opt_offset_decoder.checkpoints_save_dir = opt_offset_decoder.parent_exp.checkpoints_dir #type=str, default='./checkpoints', help='models are saved here') 170 | opt_offset_decoder.results_dir = opt_offset_decoder.parent_exp.results_dir 171 | opt_offset_decoder.log_dir = opt_offset_decoder.parent_exp.log_dir #type=str, default='./checkpoints', help='models are saved here') 172 | opt_offset_decoder.verbose = False#action='store_true', help='if specified, print more debugging information') 173 | opt_offset_decoder.suffix ='' #default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') 174 | -------------------------------------------------------------------------------- /ref/atk_July22_1_ref.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/atk_July22_1_ref.png -------------------------------------------------------------------------------- /ref/atk_July22_2_ref.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/atk_July22_2_ref.png -------------------------------------------------------------------------------- /ref/atkinson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/atkinson.png -------------------------------------------------------------------------------- /ref/dloc_bib.md: -------------------------------------------------------------------------------- 1 | @inproceedings{ayyalasomayajula2020deep, 2 | title={Deep learning based wireless localization for indoor navigation}, 3 | author={Ayyalasomayajula, Roshan and Arun, Aditya and Wu, Chenfeng and Sharma, Sanatan and Sethi, Abhishek Rajkumar and Vasisht, Deepak and Bharadia, Dinesh}, 4 | booktitle={Proceedings of the 26th Annual International Conference on Mobile Computing and Networking}, 5 | pages={1--14}, 6 | year={2020} 7 | } 8 | -------------------------------------------------------------------------------- /ref/jacobs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs.png -------------------------------------------------------------------------------- /ref/jacobs_aug16_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs_aug16_1.png -------------------------------------------------------------------------------- /ref/jacobs_aug16_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs_aug16_2.png -------------------------------------------------------------------------------- /ref/jacobs_aug16_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs_aug16_3.png -------------------------------------------------------------------------------- /ref/jacobs_aug16_4_ref.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs_aug16_4_ref.png -------------------------------------------------------------------------------- /ref/jacobs_default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsdwcsng/DLoc_pt_code/a74c2998d677cd4d79404742e1f9123b758260ef/ref/jacobs_default.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | easydict==1.9 2 | h5py==2.7.0 3 | hdf5storage==0.1.15 4 | ipython==5.8.0 5 | ipython-genutils==0.2.0 6 | Markdown==2.6.9 7 | MarkupSafe==1.1.1 8 | mat4py==0.4.2 9 | mxnet-cu80==0.11.0 10 | numpy==1.16.5 11 | protobuf==3.11.2 12 | pycuda==2017.1.1 13 | python-dateutil==2.8.1 14 | pytools==2017.4 15 | scikit-cuda==0.5.1 16 | scipy==1.1.0 17 | sklearn==0.0 18 | torch==1.4.0 19 | torchvision==0.2.1 20 | virtualenv==16.7.9 21 | -------------------------------------------------------------------------------- /train_and_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Script for both training and evaluating the DLoc network 4 | Automatically imports the parameters from params.py. 5 | For further details onto which params file to load 6 | read the README in `params_storage` folder. 7 | ''' 8 | 9 | import torch 10 | import warnings 11 | with warnings.catch_warnings(): 12 | warnings.filterwarnings("ignore",category=FutureWarning) 13 | from utils import * 14 | from modelADT import ModelADT 15 | from Generators import * 16 | from data_loader import load_data 17 | from joint_model import Enc_2Dec_Network 18 | from joint_model import Enc_Dec_Network 19 | from params import * 20 | import trainer 21 | torch.manual_seed(0) 22 | np.random.seed(0) 23 | 24 | ''' 25 | Defining the paths from where to Load Data. 26 | Assumes that the data is stored in a subfolder called data in the current data folder 27 | ''' 28 | 29 | #####################################Final Simple Space Results################################################ 30 | if "data" in opt_exp and opt_exp.data == "rw_to_rw_atk": 31 | # Training and testing data loaded for the Final results For Env-1 (The smaller space) in the paper (Figure 10a) 32 | trainpath = ['./data/dataset_non_fov_train_July18.mat', 33 | './data/dataset_fov_train_July18.mat'] 34 | testpath = ['./data/dataset_non_fov_test_July18.mat', 35 | './data/dataset_fov_test_July18.mat'] 36 | print('Real World to Real World experiments started') 37 | 38 | #####################################Final Complex Space Results################################################ 39 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw": 40 | # Training and testing data loaded for the Final results For Env-2 (The larger space) in the paper (Figure 10b) 41 | trainpath = ['./data/dataset_jacobs_July28.mat', 42 | './data/dataset_non_fov_train_jacobs_July28_2.mat', 43 | './data/dataset_fov_train_jacobs_July28_2.mat'] 44 | testpath = ['./data/dataset_fov_test_jacobs_July28_2.mat', 45 | './data/dataset_non_fov_test_jacobs_July28_2.mat'] 46 | print('Real World to Real World experiments started') 47 | 48 | #########################################Generalization across Scenarios########################################### 49 | 50 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw_env2": 51 | # Training and testing data loaded for the Final results For Env-2 52 | # for Generalization across scenarios (Table-1) train on 1/3/4 and test on 2 53 | trainpath = ['./data/dataset_jacobs_July28.mat', 54 | './data/dataset_non_fov_train_jacobs_July28_2.mat', 55 | './data/dataset_fov_train_jacobs_July28_2.mat', 56 | './data/dataset_train_jacobs_Aug16_3.mat', 57 | './data/dataset_train_jacobs_Aug16_4_ref.mat'] 58 | testpath = ['./data/dataset_train_jacobs_Aug16_1.mat'] 59 | print('Real World to Real World experiments started') 60 | 61 | 62 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw_env3": 63 | # Training and testing data loaded for the Final results For Env-2 64 | # for Generalization across scenarios (Table-1) train on 1/2/4 and test on 3 65 | trainpath = ['./data/dataset_jacobs_July28.mat', 66 | './data/dataset_non_fov_train_jacobs_July28_2.mat', 67 | './data/dataset_fov_train_jacobs_July28_2.mat', 68 | './data/dataset_train_jacobs_Aug16_1.mat', 69 | './data/dataset_train_jacobs_Aug16_4_ref.mat'] 70 | testpath = ['./data/dataset_train_jacobs_Aug16_3.mat'] 71 | print('Real World to Real World experiments started') 72 | 73 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw_env4": 74 | # Training and testing data loaded for the Final results For Env-2 75 | # for Generalization across scenarios (Table-1) train on 1/2/3 and test on 4 76 | trainpath = ['./data/dataset_jacobs_July28.mat', 77 | './data/dataset_non_fov_train_jacobs_July28_2.mat', 78 | './data/dataset_fov_train_jacobs_July28_2.mat', 79 | './data/dataset_train_jacobs_Aug16_1.mat', 80 | './data/dataset_train_jacobs_Aug16_3.mat'] 81 | testpath = ['./data/dataset_train_jacobs_Aug16_4_ref.mat'] 82 | print('Real World to Real World experiments started') 83 | 84 | ######################################Generalization Across Bandwidth########################################## 85 | 86 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw_40": 87 | # Training and testing data loaded for the Generalization results For Env-2 (The larger space) in the paper (Figure 13a) at 40MHz 88 | trainpath = ['./data/dataset40_jacobs_July28.mat', 89 | './data/dataset40_non_fov_train_jacobs_July28_2.mat', 90 | './data/dataset40_fov_train_jacobs_July28_2.mat'] 91 | testpath = ['./data/dataset40_fov_test_jacobs_July28_2.mat', 92 | './data/dataset40_non_fov_test_jacobs_July28_2.mat'] 93 | print('Real World to Real World experiments started') 94 | 95 | elif "data" in opt_exp and opt_exp.data == "rw_to_rw_20": 96 | # Training and testing data loaded for the Generalization results For Env-2 (The larger space) in the paper (Figure 13a) at 20MHz 97 | trainpath = ['./data/dataset20_jacobs_July28.mat', 98 | './data/dataset20_non_fov_train_jacobs_July28_2.mat', 99 | './data/dataset20_fov_train_jacobs_July28_2.mat'] 100 | testpath = ['./data/dataset20_fov_test_jacobs_July28_2.mat', 101 | './data/dataset20_non_fov_test_jacobs_July28_2.mat'] 102 | print('Real World to Real World experiments started') 103 | 104 | ######################################Generalization Across Space########################################## 105 | 106 | elif "data" in opt_exp and opt_exp.data == "data_segment": 107 | # Training and testing data loaded for the Final results For Env-2 108 | # for Disjoint Training and Testing(The larger space) in the paper (Figure 13b) 109 | trainpath = ['./data/dataset_train_jacobs_July28.mat', 110 | './data/dataset_train_jacobs_July28_2.mat'] 111 | testpath = ['./data/dataset_test_jacobs_July28.mat', 112 | './data/dataset_test_jacobs_July28_2.mat'] 113 | print('non-FOV to non-FOV experiments started') 114 | 115 | ###################################################################################################################### 116 | ''' 117 | Loading Training and Evaluation Data into their respective Dataloaders 118 | ''' 119 | # load traning data 120 | B_train,A_train,labels_train = load_data(trainpath[0]) 121 | 122 | for i in range(len(trainpath)-1): 123 | f,f1,l = load_data(trainpath[i+1]) 124 | B_train = torch.cat((B_train, f), 0) 125 | A_train = torch.cat((A_train, f1), 0) 126 | labels_train = torch.cat((labels_train, l), 0) 127 | 128 | labels_train = torch.unsqueeze(labels_train, 1) 129 | 130 | train_data = torch.utils.data.TensorDataset(B_train, A_train, labels_train) 131 | train_loader =torch.utils.data.DataLoader(train_data, batch_size=opt_exp.batch_size, shuffle=True) 132 | 133 | print(f"A_train.shape: {A_train.shape}") 134 | print(f"B_train.shape: {B_train.shape}") 135 | print(f"labels_train.shape: {labels_train.shape}") 136 | print('# training mini batch = %d' % len(train_loader)) 137 | 138 | # load testing data 139 | B_test,A_test,labels_test = load_data(testpath[0]) 140 | 141 | for i in range(len(testpath)-1): 142 | f,f1,l = load_data(testpath[i+1]) 143 | B_test = torch.cat((B_test, f), 0) 144 | A_test = torch.cat((A_test, f1), 0) 145 | labels_test = torch.cat((labels_test, l), 0) 146 | 147 | labels_test = torch.unsqueeze(labels_test, 1) 148 | 149 | # create data loader 150 | test_data = torch.utils.data.TensorDataset(B_test, A_test, labels_test) 151 | test_loader =torch.utils.data.DataLoader(test_data, batch_size=opt_exp.batch_size, shuffle=False) 152 | print(f"A_test.shape: {A_test.shape}") 153 | print(f"B_test.shape: {B_test.shape}") 154 | print(f"labels_test.shape: {labels_test.shape}") 155 | print('# testing mini batch = %d' % len(test_loader)) 156 | print('Test Data Loaded') 157 | 158 | ''' 159 | Initiate the Network and build the graph 160 | ''' 161 | 162 | # init encoder 163 | enc_model = ModelADT() 164 | enc_model.initialize(opt_encoder) 165 | enc_model.setup(opt_encoder) 166 | 167 | # init decoder1 168 | dec_model = ModelADT() 169 | dec_model.initialize(opt_decoder) 170 | dec_model.setup(opt_decoder) 171 | 172 | if opt_exp.n_decoders == 2: 173 | # init decoder2 174 | offset_dec_model = ModelADT() 175 | offset_dec_model.initialize(opt_offset_decoder) 176 | offset_dec_model.setup(opt_offset_decoder) 177 | 178 | # join all models 179 | print('Making the joint_model') 180 | joint_model = Enc_2Dec_Network() 181 | joint_model.initialize(opt_exp, enc_model, dec_model, offset_dec_model, gpu_ids=opt_exp.gpu_ids) 182 | 183 | elif opt_exp.n_decoders == 1: 184 | # join all models 185 | print('Making the joint_model') 186 | joint_model = Enc_Dec_Network() 187 | joint_model.initialize(opt_exp, enc_model, dec_model, gpu_ids=opt_exp.gpu_ids) 188 | 189 | else: 190 | print('Incorrect number of Decoders specified in the parameters') 191 | return -1 192 | 193 | if opt_exp.isFrozen: 194 | enc_model.load_networks(opt_encoder.starting_epoch_count) 195 | dec_model.load_networks(opt_decoder.starting_epoch_count) 196 | if opt_exp.n_decoders == 2: 197 | offset_dec_model.load_networks(opt_offset_decoder.starting_epoch_count) 198 | 199 | # train the model 200 | ''' 201 | Trainig the network 202 | ''' 203 | trainer.train(joint_model, train_loader, test_loader) 204 | 205 | ''' 206 | Model Evaluation at the best epoch 207 | ''' 208 | 209 | epoch = "best" # int/"best"/"last" 210 | # load network 211 | enc_model.load_networks(epoch, load_dir=eval_name) 212 | dec_model.load_networks(epoch, load_dir=eval_name) 213 | if opt_exp.n_decoders == 2: 214 | offset_dec_model.load_networks(epoch, load_dir=eval_name) 215 | joint_model.initialize(opt_exp, enc_model, dec_model, offset_dec_model, gpu_ids = opt_exp.gpu_ids) 216 | elif opt_exp.n_decoders == 1: 217 | joint_model.initialize(opt_exp, enc_model, dec_model, gpu_ids = opt_exp.gpu_ids) 218 | 219 | # pass data through model 220 | total_loss, median_error = trainer.test(joint_model, 221 | test_loader, 222 | save_output=True, 223 | save_dir=eval_name, 224 | save_name=f"decoder_test_result_epoch_{epoch}", 225 | log=False) 226 | print(f"total_loss: {total_loss}, median_error: {median_error}") 227 | -------------------------------------------------------------------------------- /trainer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Scripts for the training and testing functions 3 | train() function is called for training the network 4 | test() function is called to evaluate the network 5 | Both the function logs and saves the results in the files 6 | as mentioned in the params.py file 7 | ''' 8 | import warnings 9 | with warnings.catch_warnings(): 10 | warnings.filterwarnings("ignore",category=FutureWarning) 11 | import numpy as np 12 | import os 13 | import time 14 | import hdf5storage 15 | from utils import * 16 | from Generators import * 17 | from params import * 18 | 19 | def train(model, train_loader, test_loader): 20 | """Traning pipeline 21 | 22 | Args: 23 | model (torch.module): pytorch model 24 | train_loader (torch.dataloader): dataloader 25 | test_loader (torch.dataloader): dataloader 26 | """ 27 | # set data index 28 | offset_output_index=0 29 | input_index=1 30 | output_index=2 31 | 32 | # initialization 33 | total_steps = 0 34 | print('Training called') 35 | stopping_count = 0 36 | 37 | for epoch in range(model.opt.starting_epoch_count+1, model.opt.n_epochs+1): # opt.niter + opt.niter_decay + 1): 38 | epoch_start_time = time.time() 39 | epoch_loss = 0 40 | epoch_offset_loss = 0 41 | error =[] 42 | 43 | for i, data in enumerate(train_loader): 44 | total_steps += model.opt.batch_size 45 | if opt_exp.n_decoders == 2: 46 | model.set_input(data[input_index], data[output_index], data[offset_output_index], shuffle_channel=False) 47 | elif opt_exp.n_decoders == 1: 48 | model.set_input(data[input_index], data[output_index], shuffle_channel=False) 49 | model.optimize_parameters() 50 | dec_outputs = model.decoder.output 51 | # print(f"dec_outputs size is : {dec_outputs.shape}") 52 | error.extend(localization_error(dec_outputs.data.cpu().numpy(),data[output_index].cpu().numpy(),scale=0.1)) 53 | 54 | write_log([str(model.decoder.loss.item())], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='loss') 55 | if opt_exp.n_decoders == 2: 56 | write_log([str(model.offset_decoder.loss.item())], model.offset_decoder.model_name, log_dir=model.offset_decoder.opt.log_dir, log_type='offset_loss') 57 | if total_steps % model.decoder.opt.save_latest_freq == 0: 58 | print('saving the latest model (epoch %d, total_steps %d)' % 59 | (epoch, total_steps)) 60 | model.save_networks('latest') 61 | 62 | epoch_loss += model.decoder.loss.item() 63 | if opt_exp.n_decoders == 2: 64 | epoch_offset_loss += model.offset_decoder.loss.item() 65 | 66 | median_error_tr = np.median(error) 67 | error_90th_tr = np.percentile(error,90) 68 | error_99th_tr = np.percentile(error,99) 69 | nighty_percentile_error_tr = np.percentile(error,90) 70 | epoch_loss /= i 71 | if opt_exp.n_decoders == 2: 72 | epoch_offset_loss /= i 73 | write_log([str(epoch_loss)], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='epoch_decoder_loss') 74 | if opt_exp.n_decoders == 2: 75 | write_log([str(epoch_offset_loss)], model.offset_decoder.model_name, log_dir=model.offset_decoder.opt.log_dir, log_type='epoch_offset_decoder_loss') 76 | write_log([str(median_error_tr)], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='train_median_error') 77 | write_log([str(error_90th_tr)], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='train_90th_error') 78 | write_log([str(error_99th_tr)], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='train_99th_error') 79 | write_log([str(nighty_percentile_error_tr)], model.decoder.model_name, log_dir=model.decoder.opt.log_dir, log_type='train_90_error') 80 | if (epoch==1): 81 | min_eval_loss, median_error = test(model, test_loader, save_output=False) 82 | else: 83 | new_eval_loss, new_med_error = test(model, test_loader, save_output=False) 84 | if (median_error>=new_med_error): 85 | stopping_count = stopping_count+1 86 | median_error = new_med_error 87 | 88 | # generated_outputs = temp_generator_outputs 89 | if epoch % model.encoder.opt.save_epoch_freq == 0: 90 | print('saving the model at the end of epoch %d, iters %d' %(epoch, total_steps)) 91 | model.save_networks('latest') 92 | model.save_networks(epoch) 93 | if (stopping_count==2): 94 | print('Saving best model at %d epoch' %(epoch)) 95 | model.save_networks('best') 96 | stopping_count=0 97 | 98 | print('End of epoch %d / %d \t Time Taken: %d sec' % 99 | (epoch, model.decoder.opt.niter + model.decoder.opt.niter_decay, time.time() - epoch_start_time)) 100 | model.decoder.update_learning_rate() 101 | model.encoder.update_learning_rate() 102 | if opt_exp.n_decoders == 2: 103 | model.offset_decoder.update_learning_rate() 104 | 105 | 106 | def test(model, test_loader, save_output=True, save_name="decoder_test_result", save_dir="", log=True): 107 | """Test and evaluation pipeline 108 | 109 | Args: 110 | model (torch.module): pytorch model 111 | test_loader (torch.dataloader): dataloader 112 | save_output (bool, optional): whether to save output to mat file. Defaults to True. 113 | save_name (str, optional): name of the mat file. Defaults to "decoder_test_result". 114 | save_dir (str, optional): directory where output mat file is saved. Defaults to "". 115 | log (bool, optional): whether to log output. Defaults to True. 116 | 117 | Returns: 118 | tuple: (total_loss -> float, median_error -> float) 119 | """ 120 | print('Evaluation Called') 121 | model.eval() 122 | 123 | # set data index 124 | offset_output_index=0 125 | input_index=1 126 | output_index=2 127 | 128 | # create containers 129 | generated_outputs = [] 130 | offset_outputs = [] 131 | total_loss = 0 132 | total_offset_loss = 0 133 | error =[] 134 | for i, data in enumerate(test_loader): 135 | if opt_exp.n_decoders == 2: 136 | model.set_input(data[input_index], data[output_index], data[offset_output_index], shuffle_channel=False) 137 | elif opt_exp.n_decoders == 1: 138 | model.set_input(data[input_index], data[output_index], shuffle_channel=False) 139 | model.test() 140 | 141 | # get model outputs 142 | gen_outputs = model.decoder.output # gen_outputs.size = (N,1,H,W) 143 | if opt_exp.n_decoders == 2: 144 | off_outputs = model.offset_decoder.output # off_outputs.size = (N,n_ap,H,W) 145 | 146 | generated_outputs.extend(gen_outputs.data.cpu().numpy()) 147 | if opt_exp.n_decoders == 2: 148 | offset_outputs.extend(off_outputs.data.cpu().numpy()) 149 | error.extend(localization_error(gen_outputs.data.cpu().numpy(),data[output_index].cpu().numpy(),scale=0.1)) 150 | total_loss += model.decoder.loss.item() 151 | if opt_exp.n_decoders == 2: 152 | total_offset_loss += model.offset_decoder.loss.item() 153 | total_loss /= i 154 | if opt_exp.n_decoders == 2: 155 | total_offset_loss /= i 156 | median_error = np.median(error) 157 | nighty_percentile_error = np.percentile(error,90) 158 | error_99th = np.percentile(error,99) 159 | 160 | if log: 161 | write_log([str(median_error)], model.decoder.model_name, log_dir=model.opt.log_dir, log_type='test_median_error') 162 | write_log([str(nighty_percentile_error)], model.decoder.model_name, log_dir=model.opt.log_dir, log_type='test_90_error') 163 | write_log([str(error_99th)], model.decoder.model_name, log_dir=model.opt.log_dir, log_type='test_99_error') 164 | write_log([str(total_loss)], model.decoder.model_name, log_dir=model.opt.log_dir, log_type='test_loss') 165 | if opt_exp.n_decoders == 2: 166 | write_log([str(total_offset_loss)], model.decoder.model_name, log_dir=model.opt.log_dir, log_type='test_offset_loss') 167 | 168 | if save_output: 169 | if not save_dir: 170 | save_dir = model.decoder.results_save_dir # default save directory 171 | 172 | if not os.path.exists(save_dir): 173 | os.makedirs(save_dir, exist_ok=True) 174 | 175 | save_path = f"{save_dir}/{save_name}.mat" 176 | hdf5storage.savemat(save_path, 177 | mdict={"outputs":generated_outputs,"wo_outputs":offset_outputs, "error": error}, 178 | appendmat=True, 179 | format='7.3', 180 | truncate_existing=True) 181 | print(f"result saved in {save_path}") 182 | return total_loss, median_error -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Contains the utilities used for 4 | loading, initating and running up the networks 5 | for all training, validation and evaluation. 6 | ''' 7 | import torch 8 | import torch.nn as nn 9 | from torch.nn import init 10 | import functools 11 | from torch.optim import lr_scheduler 12 | import numpy as np 13 | import os 14 | from Generators import * 15 | 16 | def write_log(log_values, model_name, log_dir="", log_type='loss', type_write='a'): 17 | if not os.path.exists(log_dir): 18 | os.makedirs(log_dir) 19 | with open(log_dir+"/"+model_name+"_"+log_type+".txt", type_write) as f: 20 | f.write(','.join(log_values)+"\n") 21 | 22 | def get_model_funct(model_name): 23 | if model_name == "G": 24 | return define_G 25 | 26 | def define_G(opt, gpu_ids): 27 | net = None 28 | input_nc = opt.input_nc 29 | output_nc = opt.output_nc 30 | ngf = opt.ngf 31 | net_type = opt.base_model 32 | norm = opt.norm 33 | use_dropout = opt.no_dropout 34 | init_type = opt.init_type 35 | init_gain = opt.init_gain 36 | 37 | norm_layer = get_norm_layer(norm_type=norm) 38 | 39 | if net_type == 'resnet_encoder': 40 | n_blocks = opt.resnet_blocks 41 | net = ResnetEncoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks) 42 | elif net_type == 'resnet_decoder': 43 | n_blocks = opt.resnet_blocks 44 | net = ResnetDecoder(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks, encoder_blocks=opt.encoder_res_blocks) 45 | else: 46 | raise NotImplementedError('Generator model name [%s] is not recognized' % net_type) 47 | return init_net(net, init_type, init_gain, gpu_ids) 48 | 49 | 50 | def get_scheduler(optimizer, opt): 51 | if opt.starting_epoch_count=='best' and opt.lr_policy == 'lambda': 52 | def lambda_rule(epoch): 53 | print("lambda update %s, %s, %s", (epoch, 0)) 54 | lr_l = 1.0 - max(0, epoch + 1 - opt.niter) / float(opt.niter_decay + 1) 55 | return lr_l 56 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 57 | elif opt.starting_epoch_count!='best' and opt.lr_policy == 'lambda': 58 | def lambda_rule(epoch): 59 | print("lambda update %s, %s, %s", (epoch, opt.starting_epoch_count)) 60 | lr_l = 1.0 - max(0, epoch + 1 + opt.starting_epoch_count - opt.niter) / float(opt.niter_decay + 1) 61 | return lr_l 62 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 63 | elif opt.starting_epoch_count!='best' and opt.lr_policy == 'step': 64 | scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.9) 65 | elif opt.starting_epoch_count!='best' and opt.lr_policy == 'plateau': 66 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 67 | elif opt.starting_epoch_count!='best' and opt.lr_policy == 'cosine': 68 | scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) 69 | else: 70 | return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) 71 | return scheduler 72 | 73 | 74 | def get_norm_layer(norm_type='instance'): 75 | if norm_type == 'batch': 76 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True) 77 | elif norm_type == 'instance': 78 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) 79 | elif norm_type == 'none': 80 | norm_layer = None 81 | else: 82 | raise NotImplementedError('normalization layer [%s] is not found' % norm_type) 83 | return norm_layer 84 | 85 | 86 | def init_weights(net, init_type='normal', gain=1): 87 | def init_func(m): 88 | classname = m.__class__.__name__ 89 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 90 | if init_type == 'normal': 91 | init.normal_(m.weight.data, 0, gain) 92 | elif init_type == 'xavier': 93 | init.xavier_normal_(m.weight.data, gain=gain) 94 | elif init_type == 'kaiming': 95 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 96 | elif init_type == 'orthogonal': 97 | init.orthogonal_(m.weight.data, gain=gain) 98 | else: 99 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 100 | if hasattr(m, 'bias') and m.bias is not None: 101 | init.constant_(m.bias.data, 0.0) 102 | elif classname.find('BatchNorm2d') != -1: 103 | init.normal_(m.weight.data, 1.0, gain) 104 | init.constant_(m.bias.data, 0.0) 105 | 106 | print('initialize network with %s' % init_type) 107 | net.apply(init_func) 108 | 109 | 110 | def init_net(net, init_type='normal', init_gain=1, gpu_ids=[]): 111 | if len(gpu_ids) > 0: 112 | assert(torch.cuda.is_available()) 113 | device_ = torch.device('cuda:{}'.format(gpu_ids[0])) 114 | # net.to(d) 115 | gpu_ids_int = list(map(int,gpu_ids)) 116 | net = torch.nn.DataParallel(net, gpu_ids_int) 117 | net.to(device_) 118 | init_weights(net, init_type, gain=init_gain) 119 | return net 120 | 121 | def localization_error(output_predictions,input_labels,scale=0.1): 122 | """ 123 | output_predictions: (N,1,H,W), model prediction 124 | input_labels: (N,1,H,W), ground truth target 125 | """ 126 | image_size = output_predictions.shape 127 | error = np.zeros(image_size[0]) 128 | 129 | for i in range(image_size[0]): 130 | label_temp = input_labels[i,:,:,:].squeeze() # ground truth label 131 | pred_temp = output_predictions[i,:,:,:].squeeze() # model prediction 132 | label_index = np.asarray(np.unravel_index(np.argmax(label_temp), label_temp.shape)) 133 | prediction_index = np.asarray(np.unravel_index(np.argmax(pred_temp),pred_temp.shape)) 134 | error[i] = np.sqrt( np.sum( np.power(np.multiply( label_index-prediction_index, scale ), 2)) ) 135 | 136 | return error 137 | 138 | class Flatten(nn.Module): 139 | def __init__(self): 140 | super(Flatten, self).__init__() 141 | 142 | def forward(self, x): 143 | return x.view(x.size(0), -1) 144 | 145 | def RGB2Gray(img): 146 | return 0.2125*img[:,:,0] + 0.7154*img[:,:,1] + 0.0721*img[:,:,2] 147 | -------------------------------------------------------------------------------- /wild.md: -------------------------------------------------------------------------------- 1 | # Wireless Indoor Localization Dataset (WILD) 2 | ``` 3 | Authors: Roshan Ayyalasomayajula, Aditya Arun, Chenfeng Wu, Dinesh Bharadia 4 | ``` 5 | 6 | --- 7 | 8 |
9 |
10 |

Documentation  Downloads  LICENSE  Updates  REFERENCE

11 |
12 |
13 | 14 | --- 15 | 16 | ## Updates ## 17 | December, 2022 18 |

2nd version of WILD dataset has been released through a Kaggle Competition.

19 | June 20, 2020 20 |

First release of the Location labeled WiFi CSI data and features data used in DLoc.

21 | Jan, 2020 22 |

DLoc has been accepted in Mobicom 2020.

23 | 24 | --- 25 | 26 | ## Documentation ## 27 | 28 | ### Two Different Environments 29 |
30 | 31 |
32 | 33 |

1. Complex High-multipath and NLOS environment (1500 sq. ft.) with 5 different setups in Jacobs Hall UCSD

34 | 35 | 36 |

**jacobs_Jul28**: 18m X 8m setup with 4 APs in Jacobs Hall ground floor for data collected on July 28, 2019.

37 | 38 |

**jacobs_Jul28_2**: 18m X 8m setup with 4 APs in Jacobs Hall ground floor for data collected on July 28, 2019, one hour after **jacobs_Jul28**.

39 | 40 |

**jacobs_Aug16_1**: 18m X 8m setup with 4 APs in Jacobs Hall ground floor for data collected on August 16, 2019, with extra furniture placed randomly.

41 | 42 |

**jacobs_Aug16_3**: 18m X 8m setup with 4 APs in Jacobs Hall ground floor for data collected on August 16, 2019, with extra furniture placed randomly.

43 | 44 |

**jacobs_Aug16_4_ref**: 18m X 8m setup with 4 APs in Jacobs Hall ground floor for data collected on August 16, 2019, with extra furniture placed randomly with an added reflector. (*a huge aluminum plated board*)

45 |
46 | 47 |
48 |

2. Simple LOS-based environment (500 sq. ft.) with 3 different setups in Atkison Hall UCSD

49 | 50 | 51 |

**July16**: 8m X 5m setup with 3 APs in Atkinson Hall ground floor for data collected on July 16, 2019.

52 | 53 |

**July18**: 8m X 5m setup with 3 APs in Atkinson Hall ground floor for data collected on July 18, 2019.

54 | 55 |

**July22_2_ref**: 8m X 5m setup with 3 APs and 2 additional reflectors (*a huge aluminum plated board*) placed in Atkinson Hall ground floor for data collected on July 22, 2019.

56 |
57 |
58 | 59 | --- 60 | 61 | We provide both the CSI data for all the above setups and the post-processed features for running our DLoc network. **All the corresponding links can be found below.** 62 | 63 | ### Channels 64 | 65 | The CSI data is named as **channels_.mat**. These MATLAB files are stored using **HDF5** file structure and contain the following variables: 66 | 67 | - **channels**: *[ n_datapoints x n_frequency x n_ant X n_ap ]* 4D complex channel matrix. 68 | - **RSSI**: *[ n_datapoints x n_ap ]* 2D recieved signal strenght matrix. 69 | - **labels**: *[ n_datapoints x 2 ]* 2D XY labels. 70 | - **opt**: various options specific to the data generated 71 | -*opt.freq* : *[n_frequencyx1]* 1D vector that describes the frequency of the subcarriers 72 | -*opt.lambda*: *[n_frequencyx1]* 1D vector that describes the wavelength of the subcarriers 73 | -*ant_sep*: antenna separation used on all of our APs 74 | - **ap**: *n_ap* cell matrix. Each element corresponds to *[ n_ant x 2 ]* XY locations of the n_ant on each AP. 75 | - **ap_aoa**: *[ n_ap x 1]* vectors that contain the rotation that needs to be added to the AoA measured at each AP (assumes that the AoA is measured about the normal to the AP's antenna array) 76 | - **d1**: The sampled x-axis of the space under consideration 77 | - **d2**: The sampled y-axis of the space under consideration 78 | 79 | ### Features 80 | 81 | The 2D heatmap features data used in [DLoc](https://wcsng.ucsd.edu/dloc) is named as **features_.mat**. These MATLAB files are stored using **HDF5** file structure and contain the following variables: 82 | 83 | - **features_with_offset**: *[ n_datapoints x n_ap x n_d1_points X n_d2_points ]* 4D feature matrix for n_ap **with offsets** in time 84 | - **features_without_offset**: *[ n_datapoints x n_ap x n_d1_points X n_d2_points ]* 4D feature matrix for n_ap **without offsets** in time 85 | - **labels_gaussian_2d**: *[ n_datapoints x n_d1_points X n_d2_points ]* 3D labels matrix that contains the target images for the location network. 86 | - **labels**: *[ n_datapoints x 2 ]* 2D XY labels. 87 | 88 | --- 89 | 90 | ## Downloads ## 91 | 92 |
93 | 94 |
95 | 96 | ### Channel Downloads: 97 | 98 | Cumulative Downloads: [All channels](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EfERb1sUk65CjMrSIyD1b9kB0MFKf-d57gO3d7jRses6BQ?download=1) 99 | 100 | Individual Downloads: 101 | - [jacobs_Jul28](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/Ede931QqxmxFmHwiYz_H5dwBHVH8SnB02BjfAAWpD9FXXQ?download=1) 102 | - [jacobs_Jul28_2](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/ESiVhglOHNNPh7h5IjGSz3ABzuzyDVI-XCzWBJFouu5IoA?download=1) 103 | - [jacobs_Aug16_1](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/ER16mpDpebhMof2Gqd-hwoEB5koMPqkf7WKFbnGzsXaoRQ?download=1) 104 | - [jacobs_Aug16_3](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EbkEtMzmNU5Em9knfGr2iLABOyNOEjfXwXeBRncGYQABww?download=1) 105 | - [jacobs_Aug16_4_ref](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EdxQp9YoxtNBm3pZeMfiw1gBSYaC9FoXUaukNSEn8dV9Mw?download=1) 106 | - [July16](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EUSLpysLge9EsVAe-r96ToUB_DWHcmMs2-kM_ANeFYWYcg?download=1) 107 | - [July18](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/Eela0I6LUQJNpwj_nBSD4B0BMnbt2ZQnrgqIKFTkznWcaw?download=1) 108 | - [July22_2_ref](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EZySLl-lUIBIiGdpR9tjgksBIEP2jqq4pRshkHxekcPNaA?download=1) 109 | 110 |
111 | 112 |
113 | 114 | ### Feature Downloads: 115 | 116 | Cumulative Donloads: [Features Atkinson](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/ET3bKqoYpExCmanla2bLUJEBlC8TXhLL9U2ygNLzMWOrYg?download=1). [Features Jacobs-1](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EbkyAttzBLFMtPlYgkxGMaYB_J6tBExvs1qw8DazkzSdQQ?download=1), [Features Jacobs-2](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/Eeku7wzVdL9FrvhMasCr3D8Ba_YeoZZo7pvU3wkCMglaVA?download=1), [Features Jacobs-3](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/Eb82hHQ5SVdJqqFi4fkC1dYB9o76v43jNT1Df5WHC4tm5A?download=1). 117 | 118 | Individual Downloads: 119 | 120 | - [jacobs_Jul28](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EYRJAe2dHaRNt6AGTpA9bEkBp7N0lEYScmEzT4HNaNbx1Q?download=1) 121 | - [jacobs_Jul28_2](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EeQf1sXiWehGsD5BCQ08ui8BiDOdhNyq_f7Bf3OHe-lXZw?download=1) 122 | - [jacobs_Aug16_1](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EQ3xv70aECdDguiYTA3px0cBUQCJc5T7WFFrjeb67Ww2CA?download=1) 123 | - [jacobs_Aug16_3](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EUxwVT0kyFBKh2LY45vfrMYBlDApo4Alyr3xzyxOUsf0cQ?download=1) 124 | - [jacobs_Aug16_4_ref](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/ETdyRRQe7UlJg5Aa5VsFf1ABsLj-aQWnRINB2VpHX_6XNw?download=1) 125 | - [July16](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/Eakj2NQkpHRAjOtNUeV6y58B0tgFRVDuRpRnA6os5EXhBw?download=1) 126 | - [July18](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EVI3UwbFH9ZMuk3N0sORXpgBXswXPXJWb5VMl6HW-Tl5ng?download=1) 127 | - [July22_2_ref](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EVkpRrl4ZaxBvyqXN5nWOLYBfPWIYfSvhWN6YeNfKdOXFA?download=1) 128 | 129 |
130 | 131 |
132 | 133 | ### Data Split: 134 | 135 | Dataset Split IDs to replicate results from [DLoc](https://wcsng.ucsd.edu/dloc/) using the open-sourced [code](https://github.com/ucsdwcsng/DLoc_pt_code) can be downloaded at [split_ids](https://ucsdcloud-my.sharepoint.com/:u:/g/personal/sayyalas_ucsd_edu/EUHy8MzAmkJAtV5K9CdmpzUBGUs8zU6vl-FQNCmtyNv2Fg?download=1). 136 | 137 | Folder Metadata: 138 | data_split_idx 139 | 140 | - data_split_ids_.mat 141 | - fov_test_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_fov_test_<dataset_name>.mat** 142 | - fov_train_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_fov_train_<dataset_name>.mat** 143 | - non_fov_test_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_non_fov_test_<dataset_name>.mat** 144 | - non_fov_train_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_non_fov_train_<dataset_name>.mat** 145 | - data_split_ids_<dataset_name>_space_gen.mat 146 | - test_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_test_<dataset_name>.mat**. Usually test_idx = [fov_test_idx;non_fov_test_idx] 147 | - train_idx: MATLAB indices of the points that are selected in **dataset_<dataset_name>.mat** to generate **dataset_test_<dataset_name>.mat**. Usually train_idx = [fov_train_idx;non_fov_train_idx] 148 | 149 |
150 | 151 | 152 | 153 |
154 | 155 | #### All the dataset downloads are **PASSWORD** protected. To get the password, please read and agree to the [terms and conditions](https://forms.gle/6mvdGq9Nw69Tnhn99). You can then go ahead and download datasets from the links above. 156 | 157 | --- 158 | 159 | ## CITATION ## 160 | 161 | - Ayyalasomayajula R, Arun A, Wu C, Sharma S, Sethi AR, Vasisht D, Bharadia D. Deep learning based wireless localization for indoor navigation. In Proceedings of the 26th Annual International Conference on Mobile Computing and Networking 2020 Apr 16 (pp. 1-14). 162 | 163 | [Bibtex](/ref/dloc_bib.md) 164 | 165 | --------------------------------------------------------------------------------