├── util ├── graph │ ├── __init__.py │ ├── __pycache__ │ │ ├── core.cpython-39.pyc │ │ └── __init__.cpython-39.pyc │ └── core.py ├── features │ ├── __init__.py │ ├── __pycache__ │ │ ├── core.cpython-37.pyc │ │ ├── core.cpython-38.pyc │ │ ├── core.cpython-39.pyc │ │ ├── parking.cpython-37.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── __init__.cpython-39.pyc │ │ ├── building.cpython-37.pyc │ │ ├── building.cpython-38.pyc │ │ └── building.cpython-39.pyc │ ├── core.py │ └── building.py ├── spatial │ ├── __init__.py │ ├── __pycache__ │ │ ├── core.cpython-39.pyc │ │ └── __init__.cpython-39.pyc │ └── core.py ├── __init__.py ├── __pycache__ │ ├── html.cpython-37.pyc │ ├── html.cpython-38.pyc │ ├── html.cpython-39.pyc │ ├── util.cpython-37.pyc │ ├── util.cpython-38.pyc │ ├── util.cpython-39.pyc │ ├── merge.cpython-39.pyc │ ├── tiles.cpython-37.pyc │ ├── tiles.cpython-38.pyc │ ├── tiles.cpython-39.pyc │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── __init__.cpython-39.pyc │ ├── extract.cpython-37.pyc │ ├── extract.cpython-38.pyc │ ├── extract.cpython-39.pyc │ ├── image_pool.cpython-39.pyc │ ├── visualizer.cpython-37.pyc │ ├── visualizer.cpython-38.pyc │ └── visualizer.cpython-39.pyc ├── image_pool.py ├── html.py ├── merge.py ├── util.py ├── get_data.py ├── tiles.py └── visualizer.py ├── images ├── logo.jpg └── Summary.jpg ├── data ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── __init__.cpython-39.pyc │ ├── base_dataset.cpython-37.pyc │ ├── base_dataset.cpython-38.pyc │ ├── base_dataset.cpython-39.pyc │ ├── image_folder.cpython-37.pyc │ ├── image_folder.cpython-38.pyc │ ├── image_folder.cpython-39.pyc │ ├── single_dataset.cpython-37.pyc │ ├── single_dataset.cpython-38.pyc │ ├── single_dataset.cpython-39.pyc │ ├── aligned_dataset.cpython-37.pyc │ ├── aligned_dataset.cpython-38.pyc │ ├── aligned_dataset.cpython-39.pyc │ ├── unaligned_dataset.cpython-37.pyc │ └── unaligned_dataset.cpython-39.pyc ├── single_dataset.py ├── image_folder.py ├── aligned_dataset.py ├── __init__.py └── base_dataset.py ├── models ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── __init__.cpython-39.pyc │ ├── networks.cpython-37.pyc │ ├── networks.cpython-38.pyc │ ├── networks.cpython-39.pyc │ ├── base_model.cpython-37.pyc │ ├── base_model.cpython-38.pyc │ ├── base_model.cpython-39.pyc │ ├── test_model.cpython-37.pyc │ ├── test_model.cpython-38.pyc │ ├── test_model.cpython-39.pyc │ ├── pix2pix_model.cpython-37.pyc │ ├── pix2pix_model.cpython-38.pyc │ ├── pix2pix_model.cpython-39.pyc │ └── cycle_gan_model.cpython-39.pyc ├── __init__.py ├── test_model.py ├── template_model.py ├── pix2pix_model.py ├── cycle_gan_model.py ├── base_model.py └── networks.py ├── options ├── __init__.py ├── test_options.py ├── train_options.py └── base_options.py ├── .gitignore ├── .polyaxonignore ├── environment.yml ├── CITATION.cff ├── extract.py ├── predict.py ├── test.py ├── LICENSE ├── README.md └── train.py /util/graph/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /util/features/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /util/spatial/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/images/logo.jpg -------------------------------------------------------------------------------- /images/Summary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/images/Summary.jpg -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/html.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/html.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/html.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/html.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/util.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/util.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/merge.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/merge.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/tiles.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/tiles.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/tiles.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/tiles.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/tiles.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/tiles.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/extract.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/extract.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/extract.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/extract.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/extract.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/extract.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/networks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/networks.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/networks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/networks.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/networks.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/networks.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/image_pool.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/image_pool.cpython-39.pyc -------------------------------------------------------------------------------- /util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /util/__pycache__/visualizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/visualizer.cpython-38.pyc -------------------------------------------------------------------------------- /util/__pycache__/visualizer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/__pycache__/visualizer.cpython-39.pyc -------------------------------------------------------------------------------- /util/graph/__pycache__/core.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/graph/__pycache__/core.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/base_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/base_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/base_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/base_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/image_folder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/image_folder.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/image_folder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/image_folder.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/image_folder.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/image_folder.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/single_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/single_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/single_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/single_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/single_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/single_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/base_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/base_model.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/base_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/base_model.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/base_model.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/base_model.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/test_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/test_model.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/test_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/test_model.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/test_model.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/test_model.cpython-39.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/core.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/core.cpython-37.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/core.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/core.cpython-38.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/core.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/core.cpython-39.pyc -------------------------------------------------------------------------------- /util/graph/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/graph/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /util/spatial/__pycache__/core.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/spatial/__pycache__/core.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/aligned_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/aligned_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/aligned_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/aligned_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /data/__pycache__/aligned_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/aligned_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/pix2pix_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/pix2pix_model.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/pix2pix_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/pix2pix_model.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/pix2pix_model.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/pix2pix_model.cpython-39.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/parking.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/parking.cpython-37.pyc -------------------------------------------------------------------------------- /util/spatial/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/spatial/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /data/__pycache__/unaligned_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/unaligned_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/unaligned_dataset.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/data/__pycache__/unaligned_dataset.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/cycle_gan_model.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/models/__pycache__/cycle_gan_model.cpython-39.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/building.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/building.cpython-37.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/building.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/building.cpython-38.pyc -------------------------------------------------------------------------------- /util/features/__pycache__/building.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ualsg/GANmapper/HEAD/util/features/__pycache__/building.cpython-39.pyc -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.pyc 3 | debug* 4 | .polyaxon/ 5 | datasets/ 6 | checkpoints/ 7 | results/ 8 | build/ 9 | dist/ 10 | polyaxon/ 11 | notebook/ 12 | *.png 13 | .polyaxonignore 14 | -------------------------------------------------------------------------------- /.polyaxonignore: -------------------------------------------------------------------------------- 1 | 2 | .git 3 | .eggs 4 | eggs 5 | lib 6 | lib64 7 | parts 8 | sdist 9 | var 10 | *.pyc 11 | *.swp 12 | .DS_Store 13 | ./.polyaxon 14 | datasets 15 | checkpoints 16 | ./datasets 17 | ./checkpoints 18 | results -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: GANmapper 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - python 7 | - pytorch 8 | - torchvision 9 | - cudatoolkit=10.2 10 | - scipy 11 | - pip 12 | - pip: 13 | - dominate==2.4.0 14 | - Pillow==6.1.0 15 | - numpy==1.16.4 16 | - visdom==0.1.8 17 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - family-names: "Wu" 5 | given-names: "Abraham Noah" 6 | orcid: "https://orcid.org/0000-0001-9586-3201" 7 | affiliation: "National University of Singapore, Singapore" 8 | - family-names: "Biljecki" 9 | given-names: "Filip" 10 | orcid: "https://orcid.org/0000-0002-6229-7749" 11 | affiliation: "National University of Singapore, Singapore" 12 | title: "GANmapper: geographical data translation" 13 | version: 1.0 14 | date-released: 2022-03-08 15 | url: "https://github.com/ualsg/GANmapper" 16 | preferred-citation: 17 | type: article 18 | authors: 19 | - family-names: "Wu" 20 | given-names: "Abraham Noah" 21 | orcid: "https://orcid.org/0000-0001-9586-3201" 22 | affiliation: "National University of Singapore, Singapore" 23 | - family-names: "Biljecki" 24 | given-names: "Filip" 25 | orcid: "https://orcid.org/0000-0002-6229-7749" 26 | affiliation: "National University of Singapore, Singapore" 27 | doi: "10.1080/13658816.2022.2041643" 28 | journal: "International Journal of Geographical Information Science" 29 | title: "GANmapper: geographical data translation" 30 | year: 2022 31 | -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | """This class includes test options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) # define shared options 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 14 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 15 | # Dropout and Batchnorm has different behavioir during training and test. 16 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 17 | parser.add_argument('--num_test', type=int, default=10000, help='how many test images to run') 18 | # rewrite devalue values 19 | parser.set_defaults(model='test') 20 | # To avoid cropping, the load_size should be the same as crop_size 21 | parser.set_defaults(load_size=parser.get_default('crop_size')) 22 | self.isTrain = False 23 | return parser 24 | -------------------------------------------------------------------------------- /data/single_dataset.py: -------------------------------------------------------------------------------- 1 | from data.base_dataset import BaseDataset, get_transform 2 | from data.image_folder import make_dataset 3 | from PIL import Image 4 | 5 | 6 | class SingleDataset(BaseDataset): 7 | """This dataset class can load a set of images specified by the path --dataroot /path/to/data. 8 | 9 | It can be used for generating CycleGAN results only for one side with the model option '-model test'. 10 | """ 11 | 12 | def __init__(self, opt): 13 | """Initialize this dataset class. 14 | 15 | Parameters: 16 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 17 | """ 18 | BaseDataset.__init__(self, opt) 19 | self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) 20 | input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc 21 | self.transform = get_transform(opt, grayscale=(input_nc == 1)) 22 | 23 | def __getitem__(self, index): 24 | """Return a data point and its metadata information. 25 | 26 | Parameters: 27 | index - - a random integer for data indexing 28 | 29 | Returns a dictionary that contains A and A_paths 30 | A(tensor) - - an image in one domain 31 | A_paths(str) - - the path of the image 32 | """ 33 | A_path = self.A_paths[index] 34 | A_img = Image.open(A_path).convert('RGB') 35 | A = self.transform(A_img) 36 | return {'A': A, 'A_paths': A_path} 37 | 38 | def __len__(self): 39 | """Return the total number of images in the dataset.""" 40 | return len(self.A_paths) 41 | -------------------------------------------------------------------------------- /extract.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import cv2 3 | import numpy as np 4 | import geojson 5 | from util.tiles import tiles_from_slippy_map 6 | from util.features.building import Building_features 7 | import argparse 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument("tile_dir", type=str, help="img dir containing predicted tiles") 11 | parser.add_argument("out", type=str, help="path to GeoJSON to save merged features to") 12 | parser.add_argument("--input_folder_name", type=str, default='input', help="input folder name in the same root folder as predicted tile") 13 | 14 | 15 | def convert_binary(img_path): 16 | '''converts RGB imgs to binary images of (0,255) only 17 | 18 | ''' 19 | img = cv2.imread(img_path) 20 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 21 | _, img = cv2.threshold(img, 10, 255, cv2.THRESH_BINARY) 22 | return img 23 | 24 | def mask_to_feature(mask_dir): 25 | 26 | handler = Building_features() 27 | 28 | tiles = list(tiles_from_slippy_map(mask_dir)) 29 | 30 | for tile, path in tqdm(tiles, ascii=True, unit="mask"): 31 | predicted_tile = convert_binary(path) 32 | street_tile = convert_binary(path.replace("fake", "input")) 33 | # get only building footprints by finding difference of street networks and predicted imgs 34 | building_only = cv2.absdiff(street_tile, predicted_tile) 35 | mask = (building_only == 255).astype(np.uint8) 36 | handler.apply(tile, mask) 37 | 38 | # output feature collection 39 | feature = handler.jsonify() 40 | 41 | return feature 42 | 43 | if __name__=="__main__": 44 | args = parser.parse_args() 45 | features = mask_to_feature(args.tile_dir) 46 | with open(args.out, "w") as fp: 47 | geojson.dump(features, fp) -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | import os 2 | from options.test_options import TestOptions 3 | from data import create_dataset 4 | from models import create_model 5 | from util.visualizer import save_images_predict 6 | from util import html 7 | from tqdm import tqdm 8 | 9 | if __name__ == '__main__': 10 | opt = TestOptions().parse() # get test options 11 | # hard-code some parameters for test 12 | opt.num_threads = 0 # test code only supports num_threads = 0 13 | opt.batch_size = 1 # test code only supports batch_size = 1 14 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 15 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 16 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 17 | opt.norm = 'batch' 18 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 19 | model = create_model(opt) # create a model given opt.model and other options 20 | model.setup(opt) # regular setup: load and print networks; create schedulers 21 | 22 | # create a website 23 | web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory 24 | if opt.load_iter > 0: # load_iter is 0 by default 25 | web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter) 26 | print('creating web directory', web_dir) 27 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) 28 | 29 | if opt.eval: 30 | model.eval() 31 | 32 | for data in tqdm(dataset): 33 | model.set_input(data) # unpack data from data loader 34 | model.test() # run inference 35 | visuals = model.get_current_visuals() # get image results 36 | img_path = model.get_image_paths() # get image paths (original dir) 37 | save_images_predict(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 38 | webpage.save() # save the HTML 39 | -------------------------------------------------------------------------------- /data/image_folder.py: -------------------------------------------------------------------------------- 1 | """A modified image folder class 2 | 3 | We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) 4 | so that this class can load images from both current directory and its subdirectories. 5 | """ 6 | 7 | import torch.utils.data as data 8 | 9 | from PIL import Image 10 | import os 11 | 12 | IMG_EXTENSIONS = [ 13 | '.jpg', '.JPG', '.jpeg', '.JPEG', 14 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 15 | '.tif', '.TIF', '.tiff', '.TIFF', 16 | ] 17 | 18 | 19 | def is_image_file(filename): 20 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 21 | 22 | def make_dataset(dir, max_dataset_size=float("inf")): 23 | images = [] 24 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 25 | 26 | for root, _, fnames in sorted(os.walk(dir)): 27 | for fname in fnames: 28 | if is_image_file(fname): 29 | path = os.path.join(root, fname) 30 | images.append(path) 31 | return images[:min(max_dataset_size, len(images))] 32 | 33 | 34 | def default_loader(path): 35 | return Image.open(path).convert('RGB') 36 | 37 | 38 | class ImageFolder(data.Dataset): 39 | 40 | def __init__(self, root, transform=None, return_paths=False, 41 | loader=default_loader): 42 | imgs = make_dataset(root) 43 | if len(imgs) == 0: 44 | raise(RuntimeError("Found 0 images in: " + root + "\n" 45 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 46 | 47 | self.root = root 48 | self.imgs = imgs 49 | self.transform = transform 50 | self.return_paths = return_paths 51 | self.loader = loader 52 | 53 | def __getitem__(self, index): 54 | path = self.imgs[index] 55 | img = self.loader(path) 56 | if self.transform is not None: 57 | img = self.transform(img) 58 | if self.return_paths: 59 | return img, path 60 | else: 61 | return img 62 | 63 | def __len__(self): 64 | return len(self.imgs) 65 | -------------------------------------------------------------------------------- /util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | """This class implements an image buffer that stores previously generated images. 7 | 8 | This buffer enables us to update discriminators using a history of generated images 9 | rather than the ones produced by the latest generators. 10 | """ 11 | 12 | def __init__(self, pool_size): 13 | """Initialize the ImagePool class 14 | 15 | Parameters: 16 | pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created 17 | """ 18 | self.pool_size = pool_size 19 | if self.pool_size > 0: # create an empty pool 20 | self.num_imgs = 0 21 | self.images = [] 22 | 23 | def query(self, images): 24 | """Return an image from the pool. 25 | 26 | Parameters: 27 | images: the latest generated images from the generator 28 | 29 | Returns images from the buffer. 30 | 31 | By 50/100, the buffer will return input images. 32 | By 50/100, the buffer will return images previously stored in the buffer, 33 | and insert the current images to the buffer. 34 | """ 35 | if self.pool_size == 0: # if the buffer size is 0, do nothing 36 | return images 37 | return_images = [] 38 | for image in images: 39 | image = torch.unsqueeze(image.data, 0) 40 | if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer 41 | self.num_imgs = self.num_imgs + 1 42 | self.images.append(image) 43 | return_images.append(image) 44 | else: 45 | p = random.uniform(0, 1) 46 | if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer 47 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 48 | tmp = self.images[random_id].clone() 49 | self.images[random_id] = image 50 | return_images.append(tmp) 51 | else: # by another 50% chance, the buffer will return the current image 52 | return_images.append(image) 53 | return_images = torch.cat(return_images, 0) # collect all the images and return 54 | return return_images 55 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from options.test_options import TestOptions 3 | from data import create_dataset 4 | from models import create_model 5 | from util.visualizer import save_images 6 | from util import html 7 | from tqdm import tqdm 8 | 9 | if __name__ == '__main__': 10 | opt = TestOptions().parse() # get test options 11 | # hard-code some parameters for test 12 | opt.num_threads = 0 # test code only supports num_threads = 0 13 | opt.batch_size = 1 # test code only supports batch_size = 1 14 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 15 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 16 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 17 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 18 | model = create_model(opt) # create a model given opt.model and other options 19 | model.setup(opt) # regular setup: load and print networks; create schedulers 20 | 21 | # create a website 22 | web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory 23 | if opt.load_iter > 0: # load_iter is 0 by default 24 | web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter) 25 | print('creating web directory', web_dir) 26 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) 27 | # test with eval mode. This only affects layers like batchnorm and dropout. 28 | # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. 29 | # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. 30 | if opt.eval: 31 | model.eval() 32 | 33 | for i, data in tqdm(enumerate(dataset)): 34 | if i >= opt.num_test: # only apply our model to opt.num_test images. 35 | break 36 | model.set_input(data) # unpack data from data loader 37 | model.test() # run inference 38 | visuals = model.get_current_visuals() # get image results 39 | img_path = model.get_image_paths() # get image paths (original dir) 40 | if i % 20 == 0: # save images to an HTML file 41 | print('processing (%04d)-th image... %s' % (i, img_path)) 42 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 43 | webpage.save() # save the HTML 44 | -------------------------------------------------------------------------------- /data/aligned_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from data.base_dataset import BaseDataset, get_params, get_transform 3 | from data.image_folder import make_dataset 4 | from PIL import Image 5 | 6 | 7 | class AlignedDataset(BaseDataset): 8 | """A dataset class for paired image dataset. 9 | 10 | It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. 11 | During test time, you need to prepare a directory '/path/to/data/test'. 12 | """ 13 | 14 | def __init__(self, opt): 15 | """Initialize this dataset class. 16 | 17 | Parameters: 18 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 19 | """ 20 | BaseDataset.__init__(self, opt) 21 | self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory 22 | self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths 23 | assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image 24 | self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc 25 | self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc 26 | 27 | def __getitem__(self, index): 28 | """Return a data point and its metadata information. 29 | 30 | Parameters: 31 | index - - a random integer for data indexing 32 | 33 | Returns a dictionary that contains A, B, A_paths and B_paths 34 | A (tensor) - - an image in the input domain 35 | B (tensor) - - its corresponding image in the target domain 36 | A_paths (str) - - image paths 37 | B_paths (str) - - image paths (same as A_paths) 38 | """ 39 | # read a image given a random integer index 40 | AB_path = self.AB_paths[index] 41 | AB = Image.open(AB_path).convert('RGB') 42 | # split AB image into A and B 43 | w, h = AB.size 44 | w2 = int(w / 2) 45 | A = AB.crop((0, 0, w2, h)) 46 | B = AB.crop((w2, 0, w, h)) 47 | 48 | # apply the same transform to both A and B 49 | transform_params = get_params(self.opt, A.size) 50 | A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) 51 | B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) 52 | 53 | A = A_transform(A) 54 | B = B_transform(B) 55 | 56 | return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path} 57 | 58 | def __len__(self): 59 | """Return the total number of images in the dataset.""" 60 | return len(self.AB_paths) 61 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021, Abraham Noah Wu 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | 26 | --------------------------- LICENSE FOR cyclegan -------------------------------- 27 | 28 | Copyright (c) 2017, Jun-Yan Zhu and Taesung Park 29 | All rights reserved. 30 | 31 | Redistribution and use in source and binary forms, with or without 32 | modification, are permitted provided that the following conditions are met: 33 | 34 | * Redistributions of source code must retain the above copyright notice, this 35 | list of conditions and the following disclaimer. 36 | 37 | * Redistributions in binary form must reproduce the above copyright notice, 38 | this list of conditions and the following disclaimer in the documentation 39 | and/or other materials provided with the distribution. 40 | 41 | --------------------------- LICENSE FOR pix2pix -------------------------------- 42 | BSD License 43 | 44 | For pix2pix software 45 | Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu 46 | All rights reserved. 47 | 48 | Redistribution and use in source and binary forms, with or without 49 | modification, are permitted provided that the following conditions are met: 50 | 51 | * Redistributions of source code must retain the above copyright notice, this 52 | list of conditions and the following disclaimer. 53 | 54 | * Redistributions in binary form must reproduce the above copyright notice, 55 | this list of conditions and the following disclaimer in the documentation 56 | and/or other materials provided with the distribution. 57 | 58 | -------------------------------------------------------------------------------- /util/spatial/core.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import pyproj 4 | import shapely.ops 5 | 6 | from rtree.index import Index, Property 7 | 8 | 9 | def project(shape, source, target): 10 | """Projects a geometry from one coordinate system into another. 11 | 12 | Args: 13 | shape: the geometry to project. 14 | source: the source EPSG spatial reference system identifier. 15 | target: the target EPSG spatial reference system identifier. 16 | 17 | Returns: 18 | The projected geometry in the target coordinate system. 19 | """ 20 | 21 | transformer = pyproj.Transformer.from_crs(source, target) 22 | return shapely.ops.transform(transformer.transform, shape) 23 | 24 | 25 | def union(shapes): 26 | """Returns the union of all shapes. 27 | 28 | Args: 29 | shapes: the geometries to merge into one. 30 | 31 | Returns: 32 | The union of all shapes as one shape. 33 | """ 34 | 35 | assert shapes 36 | 37 | def fn(lhs, rhs): 38 | return lhs.union(rhs) 39 | 40 | return functools.reduce(fn, shapes) 41 | 42 | ea_transformer = pyproj.Transformer.from_crs("epsg:4326", "esri:54009") 43 | wgs_ellipsoid_transformer = pyproj.Transformer.from_crs("epsg:4326", "epsg:3395") 44 | ellipsoid_wgs_transformer = pyproj.Transformer.from_crs("epsg:3395", "epsg:4326") 45 | 46 | def project_ea(shape): 47 | return shapely.ops.transform(ea_transformer.transform, shape) 48 | 49 | def project_wgs_el(shape): 50 | return shapely.ops.transform(wgs_ellipsoid_transformer.transform, shape) 51 | 52 | def project_el_wgs(shape): 53 | return shapely.ops.transform(ellipsoid_wgs_transformer.transform, shape) 54 | 55 | 56 | def iou(lhs, rhs): 57 | """Calculates intersection over union metric between two shapes.. 58 | 59 | Args: 60 | lhs: first shape for IoU calculation. 61 | rhs: second shape for IoU calculation. 62 | 63 | Returns: 64 | IoU metric in range [0, 1] 65 | """ 66 | 67 | # equal-area projection for comparing shape areas 68 | lhs = project_ea(lhs) 69 | rhs = project_ea(rhs) 70 | 71 | intersection = lhs.intersection(rhs) 72 | union = lhs.union(rhs) 73 | 74 | rv = intersection.area / union.area 75 | assert 0 <= rv <= 1 76 | 77 | return rv 78 | 79 | 80 | def make_index(shapes): 81 | """Creates an index for fast and efficient spatial queries. 82 | 83 | Args: 84 | shapes: shapely shapes to bulk-insert bounding boxes for into the spatial index. 85 | 86 | Returns: 87 | The spatial index created from the shape's bounding boxes. 88 | """ 89 | 90 | # Todo: benchmark these for our use-cases 91 | prop = Property() 92 | prop.dimension = 2 93 | prop.leaf_capacity = 1000 94 | prop.fill_factor = 0.9 95 | 96 | def bounded(): 97 | for i, shape in enumerate(shapes): 98 | yield (i, shape.bounds, None) 99 | 100 | return Index(bounded(), properties=prop) 101 | -------------------------------------------------------------------------------- /util/graph/core.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | 4 | class UndirectedGraph: 5 | """Simple undirected graph. 6 | 7 | Note: stores edges; can not store vertices without edges. 8 | """ 9 | 10 | def __init__(self): 11 | """Creates an empty `UndirectedGraph` instance. 12 | """ 13 | 14 | # Todo: We might need a compressed sparse row graph (i.e. adjacency array) 15 | # to make this scale. Let's circle back when we run into this limitation. 16 | self.edges = collections.defaultdict(set) 17 | 18 | def add_edge(self, s, t): 19 | """Adds an edge to the graph. 20 | 21 | Args: 22 | s: the source vertex. 23 | t: the target vertex. 24 | 25 | Note: because this is an undirected graph for every edge `s, t` an edge `t, s` is added. 26 | """ 27 | 28 | self.edges[s].add(t) 29 | self.edges[t].add(s) 30 | 31 | def targets(self, v): 32 | """Returns all outgoing targets for a vertex. 33 | 34 | Args: 35 | v: the vertex to return targets for. 36 | 37 | Returns: 38 | A list of all outgoing targets for the vertex. 39 | """ 40 | 41 | return self.edges[v] 42 | 43 | def vertices(self): 44 | """Returns all vertices in the graph. 45 | 46 | Returns: 47 | A set of all vertices in the graph. 48 | """ 49 | 50 | return self.edges.keys() 51 | 52 | def empty(self): 53 | """Returns true if the graph is empty, false otherwise. 54 | 55 | Returns: 56 | True if the graph has no edges or vertices, false otherwise. 57 | """ 58 | return len(self.edges) == 0 59 | 60 | def dfs(self, v): 61 | """Applies a depth-first search to the graph. 62 | 63 | Args: 64 | v: the vertex to start the depth-first search at. 65 | 66 | Yields: 67 | The visited graph vertices in depth-first search order. 68 | 69 | Note: does not include the start vertex `v` (except if an edge targets it). 70 | """ 71 | 72 | stack = [] 73 | stack.append(v) 74 | 75 | seen = set() 76 | 77 | while stack: 78 | s = stack.pop() 79 | 80 | if s not in seen: 81 | seen.add(s) 82 | 83 | for t in self.targets(s): 84 | stack.append(t) 85 | 86 | yield s 87 | 88 | def components(self): 89 | """Computes connected components for the graph. 90 | 91 | Yields: 92 | The connected component sub-graphs consisting of vertices; in no particular order. 93 | """ 94 | 95 | seen = set() 96 | 97 | for v in self.vertices(): 98 | if v not in seen: 99 | component = set(self.dfs(v)) 100 | component.add(v) 101 | 102 | seen.update(component) 103 | 104 | yield component 105 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | """This package contains modules related to objective functions, optimizations, and network architectures. 2 | 3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. 4 | You need to implement the following five functions: 5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 6 | -- : unpack data from dataset and apply preprocessing. 7 | -- : produce intermediate results. 8 | -- : calculate loss, gradients, and update network weights. 9 | -- : (optionally) add model-specific options and set default options. 10 | 11 | In the function <__init__>, you need to define four lists: 12 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 13 | -- self.model_names (str list): define networks used in our training. 14 | -- self.visual_names (str list): specify the images that you want to display and save. 15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. 16 | 17 | Now you can use the model class by specifying flag '--model dummy'. 18 | See our template model class 'template_model.py' for more details. 19 | """ 20 | 21 | import importlib 22 | from models.base_model import BaseModel 23 | 24 | 25 | def find_model_using_name(model_name): 26 | """Import the module "models/[model_name]_model.py". 27 | 28 | In the file, the class called DatasetNameModel() will 29 | be instantiated. It has to be a subclass of BaseModel, 30 | and it is case-insensitive. 31 | """ 32 | model_filename = "models." + model_name + "_model" 33 | modellib = importlib.import_module(model_filename) 34 | model = None 35 | target_model_name = model_name.replace('_', '') + 'model' 36 | for name, cls in modellib.__dict__.items(): 37 | if name.lower() == target_model_name.lower() \ 38 | and issubclass(cls, BaseModel): 39 | model = cls 40 | 41 | if model is None: 42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 43 | exit(0) 44 | 45 | return model 46 | 47 | 48 | def get_option_setter(model_name): 49 | """Return the static method of the model class.""" 50 | model_class = find_model_using_name(model_name) 51 | return model_class.modify_commandline_options 52 | 53 | 54 | def create_model(opt): 55 | """Create a model given the option. 56 | 57 | This function warps the class CustomDatasetDataLoader. 58 | This is the main interface between this package and 'train.py'/'test.py' 59 | 60 | Example: 61 | >>> from models import create_model 62 | >>> model = create_model(opt) 63 | """ 64 | model = find_model_using_name(opt.model) 65 | instance = model(opt) 66 | print("model [%s] was created" % type(instance).__name__) 67 | return instance 68 | -------------------------------------------------------------------------------- /models/test_model.py: -------------------------------------------------------------------------------- 1 | from .base_model import BaseModel 2 | from . import networks 3 | 4 | 5 | class TestModel(BaseModel): 6 | """ This TesteModel can be used to generate CycleGAN results for only one direction. 7 | This model will automatically set '--dataset_mode single', which only loads the images from one collection. 8 | 9 | See the test instruction for more details. 10 | """ 11 | @staticmethod 12 | def modify_commandline_options(parser, is_train=True): 13 | """Add new dataset-specific options, and rewrite default values for existing options. 14 | 15 | Parameters: 16 | parser -- original option parser 17 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 18 | 19 | Returns: 20 | the modified parser. 21 | 22 | The model can only be used during test time. It requires '--dataset_mode single'. 23 | You need to specify the network using the option '--model_suffix'. 24 | """ 25 | assert not is_train, 'TestModel cannot be used during training time' 26 | parser.set_defaults(dataset_mode='single') 27 | parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.') 28 | 29 | return parser 30 | 31 | def __init__(self, opt): 32 | """Initialize the pix2pix class. 33 | 34 | Parameters: 35 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 36 | """ 37 | assert(not opt.isTrain) 38 | BaseModel.__init__(self, opt) 39 | # specify the training losses you want to print out. The training/test scripts will call 40 | self.loss_names = [] 41 | # specify the images you want to save/display. The training/test scripts will call 42 | self.visual_names = ['real', 'fake'] 43 | # specify the models you want to save to the disk. The training/test scripts will call and 44 | self.model_names = ['G' + opt.model_suffix] # only generator is needed. 45 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, 46 | opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 47 | 48 | # assigns the model to self.netG_[suffix] so that it can be loaded 49 | # please see 50 | setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self. 51 | 52 | def set_input(self, input): 53 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 54 | 55 | Parameters: 56 | input: a dictionary that contains the data itself and its metadata information. 57 | 58 | We need to use 'single_dataset' dataset mode. It only load images from one domain. 59 | """ 60 | self.real = input['A'].to(self.device) 61 | self.image_paths = input['A_paths'] 62 | 63 | def forward(self): 64 | """Run forward pass.""" 65 | self.fake = self.netG(self.real) # G(real) 66 | 67 | def optimize_parameters(self): 68 | """No optimization for test model.""" 69 | pass 70 | -------------------------------------------------------------------------------- /util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br 3 | import os 4 | 5 | 6 | class HTML: 7 | """This HTML class allows us to save images and write texts into a single HTML file. 8 | 9 | It consists of functions such as (add a text header to the HTML file), 10 | (add a row of images to the HTML file), and (save the HTML to the disk). 11 | It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. 12 | """ 13 | 14 | def __init__(self, web_dir, title, refresh=0): 15 | """Initialize the HTML classes 16 | 17 | Parameters: 18 | web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: 32 | with self.doc.head: 33 | meta(http_equiv="refresh", content=str(refresh)) 34 | 35 | def get_image_dir(self): 36 | """Return the directory that stores images""" 37 | return self.img_dir 38 | 39 | def add_header(self, text): 40 | """Insert a header to the HTML file 41 | 42 | Parameters: 43 | text (str) -- the header text 44 | """ 45 | with self.doc: 46 | h3(text) 47 | 48 | def add_images(self, ims, txts, links, width=400): 49 | """add images to the HTML file 50 | 51 | Parameters: 52 | ims (str list) -- a list of image paths 53 | txts (str list) -- a list of image names shown on the website 54 | links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page 55 | """ 56 | self.t = table(border=1, style="table-layout: fixed;") # Insert a table 57 | self.doc.add(self.t) 58 | with self.t: 59 | with tr(): 60 | for im, txt, link in zip(ims, txts, links): 61 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 62 | with p(): 63 | with a(href=os.path.join('images', link)): 64 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 65 | br() 66 | p(txt) 67 | 68 | def save(self): 69 | """save the current content to the HMTL file""" 70 | html_file = '%s/index.html' % self.web_dir 71 | f = open(html_file, 'wt') 72 | f.write(self.doc.render()) 73 | f.close() 74 | 75 | 76 | if __name__ == '__main__': # we show an example usage here. 77 | html = HTML('web/', 'test_html') 78 | html.add_header('hello world') 79 | 80 | ims, txts, links = [], [], [] 81 | for n in range(4): 82 | ims.append('image_%d.png' % n) 83 | txts.append('text_%d' % n) 84 | links.append('image_%d.png' % n) 85 | html.add_images(ims, txts, links) 86 | html.save() 87 | -------------------------------------------------------------------------------- /util/merge.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | import geojson 5 | 6 | from tqdm import tqdm 7 | import shapely.geometry 8 | 9 | from util.spatial.core import make_index, union, project_ea, project_wgs_el, project_el_wgs 10 | from util.graph.core import UndirectedGraph 11 | 12 | 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--features", type=str, help="GeoJSON file to read features from") 15 | parser.add_argument("--threshold", type=int, required=True, help="minimum distance to adjacent features, in m") 16 | parser.add_argument("--out", type=str, help="path to GeoJSON to save merged features to") 17 | args = parser.parse_args() 18 | 19 | 20 | def main(args): 21 | with open(args.features) as fp: 22 | collection = geojson.load(fp) 23 | 24 | shapes = [shapely.geometry.shape(feature["geometry"]) for feature in collection["features"]] 25 | del collection 26 | 27 | graph = UndirectedGraph() 28 | idx = make_index(shapes) 29 | 30 | def buffered(shape, args): 31 | projected = project_wgs_el(shape) 32 | buffered = projected.buffer(args.threshold) 33 | unprojected = project_el_wgs(buffered) 34 | return unprojected 35 | 36 | def unbuffered(shape,args): 37 | projected = project_wgs_el(shape) 38 | unbuffered = projected.buffer(-1 * args.threshold) 39 | unprojected = project_el_wgs(unbuffered) 40 | return unprojected 41 | 42 | for i, shape in enumerate(tqdm(shapes, desc="Building graph", unit="shapes", ascii=True)): 43 | embiggened = buffered(shape, args) 44 | 45 | graph.add_edge(i, i) 46 | 47 | nearest = [j for j in idx.intersection(embiggened.bounds, objects=False) if i != j] 48 | 49 | for t in nearest: 50 | if embiggened.intersects(shapes[t]): 51 | graph.add_edge(i, t) 52 | 53 | components = list(graph.components()) 54 | assert sum([len(v) for v in components]) == len(shapes), "components capture all shape indices" 55 | 56 | features = [] 57 | 58 | for component in tqdm(components, desc="Merging components", unit="component", ascii=True): 59 | embiggened = [buffered(shapes[v], args) for v in component] 60 | merged = unbuffered(union(embiggened), args) 61 | 62 | if merged.is_valid: 63 | # Orient exterior ring of the polygon in counter-clockwise direction. 64 | if isinstance(merged, shapely.geometry.polygon.Polygon): 65 | merged = shapely.geometry.polygon.orient(merged, sign=1.0) 66 | elif isinstance(merged, shapely.geometry.multipolygon.MultiPolygon): 67 | merged = [shapely.geometry.polygon.orient(geom, sign=1.0) for geom in merged.geoms] 68 | merged = shapely.geometry.MultiPolygon(merged) 69 | else: 70 | print("Warning: merged feature is neither Polygon nor MultiPoylgon, skipping", file=sys.stderr) 71 | continue 72 | 73 | # equal-area projection; round to full m^2, we're not that precise anyway 74 | area = int(round(project_ea(merged).area)) 75 | 76 | feature = geojson.Feature(geometry=shapely.geometry.mapping(merged), properties={"area": area}) 77 | features.append(feature) 78 | else: 79 | print("Warning: merged feature is not valid, skipping", file=sys.stderr) 80 | 81 | collection = geojson.FeatureCollection(features) 82 | 83 | with open(args.out, "w") as fp: 84 | geojson.dump(collection, fp) 85 | 86 | if __name__=="__main__": 87 | args = parser.parse_args() 88 | 89 | main(args) -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TrainOptions(BaseOptions): 5 | """This class includes training options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) 12 | # visdom and HTML visualization parameters 13 | parser.add_argument('--display_freq', type=int, default=10, help='frequency of showing training results on screen') 14 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') 15 | parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') 16 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') 17 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') 18 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') 19 | parser.add_argument('--update_html_freq', type=int, default=2000, help='frequency of saving training results to html') 20 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 21 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 22 | # network saving and loading parameters 23 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 24 | parser.add_argument('--save_epoch_freq', type=int, default=20, help='frequency of saving checkpoints at the end of epochs') 25 | 26 | parser.add_argument('--val_metric_freq', type=int, default=1, help='whether saves model by iteration') 27 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') 28 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 29 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 30 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 31 | # training parameters 32 | parser.add_argument('--n_epochs', type=int, default=150, help='number of epochs with the initial learning rate') 33 | parser.add_argument('--n_epochs_decay', type=int, default=150, help='number of epochs to linearly decay learning rate to zero') 34 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 35 | parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') 36 | parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') 37 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 38 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') 39 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 40 | 41 | self.isTrain = True 42 | return parser 43 | -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | """This module contains simple helper functions """ 2 | from __future__ import print_function 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | import os 7 | 8 | 9 | def tensor2im(input_image, imtype=np.uint8): 10 | """"Converts a Tensor array into a numpy image array. 11 | 12 | Parameters: 13 | input_image (tensor) -- the input image tensor array 14 | imtype (type) -- the desired type of the converted numpy array 15 | """ 16 | if not isinstance(input_image, np.ndarray): 17 | if isinstance(input_image, torch.Tensor): # get the data from a variable 18 | image_tensor = input_image.data 19 | else: 20 | return input_image 21 | image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array 22 | if image_numpy.shape[0] == 1: # grayscale to RGB 23 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 24 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling 25 | else: # if it is a numpy array, do nothing 26 | image_numpy = input_image 27 | return image_numpy.astype(imtype) 28 | 29 | 30 | def diagnose_network(net, name='network'): 31 | """Calculate and print the mean of average absolute(gradients) 32 | 33 | Parameters: 34 | net (torch network) -- Torch network 35 | name (str) -- the name of the network 36 | """ 37 | mean = 0.0 38 | count = 0 39 | for param in net.parameters(): 40 | if param.grad is not None: 41 | mean += torch.mean(torch.abs(param.grad.data)) 42 | count += 1 43 | if count > 0: 44 | mean = mean / count 45 | print(name) 46 | print(mean) 47 | 48 | 49 | def save_image(image_numpy, image_path, aspect_ratio=1.0): 50 | """Save a numpy image to the disk 51 | 52 | Parameters: 53 | image_numpy (numpy array) -- input numpy array 54 | image_path (str) -- the path of the image 55 | """ 56 | 57 | image_pil = Image.fromarray(image_numpy) 58 | h, w, _ = image_numpy.shape 59 | 60 | if aspect_ratio > 1.0: 61 | image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) 62 | if aspect_ratio < 1.0: 63 | image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) 64 | image_pil.save(image_path) 65 | 66 | 67 | def print_numpy(x, val=True, shp=False): 68 | """Print the mean, min, max, median, std, and size of a numpy array 69 | 70 | Parameters: 71 | val (bool) -- if print the values of the numpy array 72 | shp (bool) -- if print the shape of the numpy array 73 | """ 74 | x = x.astype(np.float64) 75 | if shp: 76 | print('shape,', x.shape) 77 | if val: 78 | x = x.flatten() 79 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 80 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 81 | 82 | 83 | def mkdirs(paths): 84 | """create empty directories if they don't exist 85 | 86 | Parameters: 87 | paths (str list) -- a list of directory paths 88 | """ 89 | if isinstance(paths, list) and not isinstance(paths, str): 90 | for path in paths: 91 | mkdir(path) 92 | else: 93 | mkdir(paths) 94 | 95 | 96 | def mkdir(path): 97 | """create a single empty directory if it didn't exist 98 | 99 | Parameters: 100 | path (str) -- a single directory path 101 | """ 102 | if not os.path.exists(path): 103 | os.makedirs(path) 104 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes all the modules related to data loading and preprocessing 2 | 3 | To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. 4 | You need to implement four functions: 5 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 6 | -- <__len__>: return the size of dataset. 7 | -- <__getitem__>: get a data point from data loader. 8 | -- : (optionally) add dataset-specific options and set default options. 9 | 10 | Now you can use the dataset class by specifying flag '--dataset_mode dummy'. 11 | See our template dataset class 'template_dataset.py' for more details. 12 | """ 13 | import importlib 14 | import torch.utils.data 15 | from data.base_dataset import BaseDataset 16 | 17 | 18 | def find_dataset_using_name(dataset_name): 19 | """Import the module "data/[dataset_name]_dataset.py". 20 | 21 | In the file, the class called DatasetNameDataset() will 22 | be instantiated. It has to be a subclass of BaseDataset, 23 | and it is case-insensitive. 24 | """ 25 | dataset_filename = "data." + dataset_name + "_dataset" 26 | datasetlib = importlib.import_module(dataset_filename) 27 | 28 | dataset = None 29 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 30 | for name, cls in datasetlib.__dict__.items(): 31 | if name.lower() == target_dataset_name.lower() \ 32 | and issubclass(cls, BaseDataset): 33 | dataset = cls 34 | 35 | if dataset is None: 36 | raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) 37 | 38 | return dataset 39 | 40 | 41 | def get_option_setter(dataset_name): 42 | """Return the static method of the dataset class.""" 43 | dataset_class = find_dataset_using_name(dataset_name) 44 | return dataset_class.modify_commandline_options 45 | 46 | 47 | def create_dataset(opt): 48 | """Create a dataset given the option. 49 | 50 | This function wraps the class CustomDatasetDataLoader. 51 | This is the main interface between this package and 'train.py'/'test.py' 52 | 53 | Example: 54 | >>> from data import create_dataset 55 | >>> dataset = create_dataset(opt) 56 | """ 57 | data_loader = CustomDatasetDataLoader(opt) 58 | dataset = data_loader.load_data() 59 | return dataset 60 | 61 | 62 | class CustomDatasetDataLoader(): 63 | """Wrapper class of Dataset class that performs multi-threaded data loading""" 64 | 65 | def __init__(self, opt): 66 | """Initialize this class 67 | 68 | Step 1: create a dataset instance given the name [dataset_mode] 69 | Step 2: create a multi-threaded data loader. 70 | """ 71 | self.opt = opt 72 | dataset_class = find_dataset_using_name(opt.dataset_mode) 73 | self.dataset = dataset_class(opt) 74 | print("dataset [%s] was created" % type(self.dataset).__name__) 75 | self.dataloader = torch.utils.data.DataLoader( 76 | self.dataset, 77 | batch_size=opt.batch_size, 78 | shuffle=not opt.serial_batches, 79 | num_workers=int(opt.num_threads)) 80 | 81 | def load_data(self): 82 | return self 83 | 84 | def __len__(self): 85 | """Return the number of data in the dataset""" 86 | return min(len(self.dataset), self.opt.max_dataset_size) 87 | 88 | def __iter__(self): 89 | """Return a batch of data""" 90 | for i, data in enumerate(self.dataloader): 91 | if i * self.opt.batch_size >= self.opt.max_dataset_size: 92 | break 93 | yield data 94 | -------------------------------------------------------------------------------- /util/get_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import tarfile 4 | import requests 5 | from warnings import warn 6 | from zipfile import ZipFile 7 | from bs4 import BeautifulSoup 8 | from os.path import abspath, isdir, join, basename 9 | 10 | 11 | class GetData(object): 12 | """A Python script for downloading CycleGAN or pix2pix datasets. 13 | 14 | Parameters: 15 | technique (str) -- One of: 'cyclegan' or 'pix2pix'. 16 | verbose (bool) -- If True, print additional information. 17 | 18 | Examples: 19 | >>> from util.get_data import GetData 20 | >>> gd = GetData(technique='cyclegan') 21 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. 22 | 23 | Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' 24 | and 'scripts/download_cyclegan_model.sh'. 25 | """ 26 | 27 | def __init__(self, technique='cyclegan', verbose=True): 28 | url_dict = { 29 | 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', 30 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' 31 | } 32 | self.url = url_dict.get(technique.lower()) 33 | self._verbose = verbose 34 | 35 | def _print(self, text): 36 | if self._verbose: 37 | print(text) 38 | 39 | @staticmethod 40 | def _get_options(r): 41 | soup = BeautifulSoup(r.text, 'lxml') 42 | options = [h.text for h in soup.find_all('a', href=True) 43 | if h.text.endswith(('.zip', 'tar.gz'))] 44 | return options 45 | 46 | def _present_options(self): 47 | r = requests.get(self.url) 48 | options = self._get_options(r) 49 | print('Options:\n') 50 | for i, o in enumerate(options): 51 | print("{0}: {1}".format(i, o)) 52 | choice = input("\nPlease enter the number of the " 53 | "dataset above you wish to download:") 54 | return options[int(choice)] 55 | 56 | def _download_data(self, dataset_url, save_path): 57 | if not isdir(save_path): 58 | os.makedirs(save_path) 59 | 60 | base = basename(dataset_url) 61 | temp_save_path = join(save_path, base) 62 | 63 | with open(temp_save_path, "wb") as f: 64 | r = requests.get(dataset_url) 65 | f.write(r.content) 66 | 67 | if base.endswith('.tar.gz'): 68 | obj = tarfile.open(temp_save_path) 69 | elif base.endswith('.zip'): 70 | obj = ZipFile(temp_save_path, 'r') 71 | else: 72 | raise ValueError("Unknown File Type: {0}.".format(base)) 73 | 74 | self._print("Unpacking Data...") 75 | obj.extractall(save_path) 76 | obj.close() 77 | os.remove(temp_save_path) 78 | 79 | def get(self, save_path, dataset=None): 80 | """ 81 | 82 | Download a dataset. 83 | 84 | Parameters: 85 | save_path (str) -- A directory to save the data to. 86 | dataset (str) -- (optional). A specific dataset to download. 87 | Note: this must include the file extension. 88 | If None, options will be presented for you 89 | to choose from. 90 | 91 | Returns: 92 | save_path_full (str) -- the absolute path to the downloaded data. 93 | 94 | """ 95 | if dataset is None: 96 | selected_dataset = self._present_options() 97 | else: 98 | selected_dataset = dataset 99 | 100 | save_path_full = join(save_path, selected_dataset.split('.')[0]) 101 | 102 | if isdir(save_path_full): 103 | warn("\n'{0}' already exists. Voiding Download.".format( 104 | save_path_full)) 105 | else: 106 | self._print('Downloading Data...') 107 | url = "{0}/{1}".format(self.url, selected_dataset) 108 | self._download_data(url, save_path=save_path) 109 | 110 | return abspath(save_path_full) 111 | -------------------------------------------------------------------------------- /util/features/core.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image 4 | 5 | from util.tiles import pixel_to_location 6 | 7 | 8 | def visualize(mask, path): 9 | """Writes a visual representation `.png` file for a binary mask. 10 | 11 | Args: 12 | mask: the binary mask to visualize. 13 | path: the path to save the `.png` image to. 14 | """ 15 | 16 | out = Image.fromarray(mask, mode="P") 17 | out.putpalette([0, 0, 0, 255, 255, 255]) 18 | out.save(path) 19 | 20 | 21 | def contours_to_mask(contours, shape): 22 | """Creates a binary mask for contours. 23 | 24 | Args: 25 | contours: the contours to create a mask for. 26 | shape: the resulting mask's shape 27 | 28 | Returns: 29 | The binary mask with rasterized contours. 30 | """ 31 | 32 | canvas = np.zeros(shape, np.uint8) 33 | cv2.drawContours(canvas, contours, contourIdx=-1, color=1) 34 | return canvas 35 | 36 | 37 | def featurize(tile, polygon, shape): 38 | """Transforms polygons in image pixel coordinates into world coordinates. 39 | 40 | Args: 41 | tile: the tile this polygon is in for coordinate calculation. 42 | polygon: the polygon to transform from pixel to world coordinates. 43 | shape: the image's max x and y coordinates. 44 | 45 | Returns: 46 | The closed polygon transformed into world coordinates. 47 | """ 48 | 49 | xmax, ymax = shape 50 | 51 | feature = [] 52 | 53 | for point in polygon: 54 | px, py = point[0] 55 | dx, dy = px / xmax, py / ymax 56 | 57 | feature.append(pixel_to_location(tile, dx, 1. - dy)) 58 | 59 | assert feature, "at least one location in polygon" 60 | feature.append(feature[0]) # polygons are closed 61 | 62 | return feature 63 | 64 | 65 | def denoise(mask, eps): 66 | """Removes noise from a mask. 67 | 68 | Args: 69 | mask: the mask to remove noise from. 70 | eps: the morphological operation's kernel size for noise removal, in pixel. 71 | 72 | Returns: 73 | The mask after applying denoising. 74 | """ 75 | 76 | struct = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (eps, eps)) 77 | return cv2.morphologyEx(mask, cv2.MORPH_OPEN, struct) 78 | 79 | 80 | def grow(mask, eps): 81 | """Grows a mask to fill in small holes, e.g. to establish connectivity. 82 | 83 | Args: 84 | mask: the mask to grow. 85 | eps: the morphological operation's kernel size for growing, in pixel. 86 | 87 | Returns: 88 | The mask after filling in small holes. 89 | """ 90 | 91 | struct = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (eps, eps)) 92 | return cv2.morphologyEx(mask, cv2.MORPH_CLOSE, struct) 93 | 94 | 95 | def contours(mask): 96 | """Extracts contours and the relationship between them from a binary mask. 97 | 98 | Args: 99 | mask: the binary mask to find contours in. 100 | 101 | Returns: 102 | The detected contours as a list of points and the contour hierarchy. 103 | 104 | Note: the hierarchy can be used to re-construct polygons with holes as one entity. 105 | """ 106 | 107 | contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 108 | return contours, hierarchy 109 | 110 | 111 | # Todo: should work for lines, too, but then needs other epsilon criterion than arc length 112 | def simplify(polygon, eps): 113 | """Simplifies a polygon to minimize the polygon's vertices. 114 | 115 | Args: 116 | polygon: the polygon made up of a list of vertices. 117 | eps: the approximation accuracy as max. percentage of the arc length, in [0, 1] 118 | 119 | """ 120 | 121 | assert 0 <= eps <= 1, "approximation accuracy is percentage in [0, 1]" 122 | 123 | epsilon = eps * cv2.arcLength(polygon, closed=True) 124 | return cv2.approxPolyDP(polygon, epsilon=epsilon, closed=True) 125 | 126 | 127 | def parents_in_hierarchy(node, tree): 128 | """Walks a hierarchy tree upwards from a starting node collecting all nodes on the way. 129 | 130 | Args: 131 | node: the index for the starting node in the hierarchy. 132 | tree: the hierarchy tree containing tuples of (next, prev, first child, parent) ids. 133 | 134 | Yields: 135 | The node ids on the upwards path in the hierarchy tree. 136 | """ 137 | 138 | def parent(n): 139 | # next, prev, fst child, parent 140 | return n[3] 141 | 142 | at = tree[node] 143 | up = parent(at) 144 | 145 | while up != -1: 146 | index = up 147 | at = tree[index] 148 | up = parent(at) 149 | 150 | assert index != node, "upward path does not include starting node" 151 | 152 | yield index 153 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | Logo 4 | 5 |

GANmapper - Geospatial Content Filling

6 | 7 | Logo 8 | 9 |

10 | 11 | This is the official repo of GANmapper, a building footprint generator using Generative Adversarial Networks 12 | 13 | ## Running GANmapper 14 | ### 1. Install prerequisites 15 | 16 | Use `environment.yml` to create a conda environment for GANmapper 17 | 18 | ```sh 19 | conda env create -f environment.yml 20 | conda activate GANmapper 21 | ``` 22 | 23 | ### 2. Download weights 24 | The weights files are available on figshare in the Checkpoints folder. 25 | 26 | ```https://doi.org/10.6084/m9.figshare.15103128.v1``` 27 | 28 | Place the `Checkpoints` folder in the repo. 29 | ### 3. Prediction 30 | Predictions can be carried out by running the following sample code. The name of the city depends on the name of each dataset. 31 | ```sh 32 | python predict.py --dataroot --checkpoints_dir --name 33 | ``` 34 | 35 | Testing an area in LA: 36 | ```sh 37 | python predict.py --dataroot datasets/Exp4/LA/Source --checkpoints_dir checkpoints/Exp3 --name LA 38 | ``` 39 | 40 | Testing an area in Singapore: 41 | ```sh 42 | python predict.py --dataroot datasets/Exp4/Singapore/Source --checkpoints_dir checkpoints/Exp3 --name Singapore 43 | ``` 44 | 45 | The result will be produced in XYZ directories in `./results//test_latest/images/fake` 46 | 47 | You can choose to visualise the tiles in QGIS using a local WMTS server. 48 | 49 | For example, use the following url and choose Zomm 16 only. 50 | 51 | ``` 52 | file:///D:/GANmapper//results/Singapore/test_latest/images/fake/{z}/{x}/{y}.png 53 | ``` 54 | 55 | ### 4. Vectorization 56 | 57 | If you want the output to be in Geojson polygons, use `extract.py` 58 | 59 | ```sh 60 | python extract.py 61 | ``` 62 | 63 | ```sh 64 | python extract.py results/Exp4/LA/test_latest/images/fake LA.geojson 65 | ``` 66 | 67 | 79 | 80 | ## License 81 | 82 | Distributed under the MIT License. See `LICENSE` for more information. 83 | 84 | 85 | 86 | 91 | 92 | ## Citation 93 | 94 | A [paper](https://doi.org/10.1080/13658816.2022.2041643) about the work was published in _International Journal of Geographical Information Science_, and it is available open access [here](https://ual.sg/publication/2022-ijgis-ganmapper/2022-ijgis-ganmapper.pdf). 95 | 96 | If you like this work and would like to use it in a scientific context, please cite this article. 97 | 98 | Wu AN, Biljecki F (2022): GANmapper: geographical data translation. International Journal of Geographical Information Science, 36(7): 1394-1422. doi:10.1080/13658816.2022.2041643 99 | 100 | ``` 101 | @article{2022_ijgis_ganmapper, 102 | author = {Wu, Abraham Noah and Biljecki, Filip}, 103 | doi = {10.1080/13658816.2022.2041643}, 104 | journal = {International Journal of Geographical Information Science}, 105 | title = {{GANmapper: geographical data translation}}, 106 | volume = {36}, 107 | issue = {7}, 108 | pages = {1394-1422}, 109 | year = {2022} 110 | } 111 | ``` 112 | 113 | ## Contact 114 | 115 | [Abraham Noah Wu](https://ual.sg/authors/abraham/), [Urban Analytics Lab](https://ual.sg), National University of Singapore, Singapore 116 | 117 | 118 | ## Acknowledgements 119 | 120 | This research is part of the project Large-scale 3D Geospatial Data for Urban Analytics, which is supported by the National University of Singapore under the Start-Up Grant R-295-000-171-133. 121 | 122 | We gratefully acknowledge the sources of the used input data. 123 | 124 | GANmapper is made possible by using the following packages 125 | 126 | * [PyTorch](https://pytorch.org/) 127 | * [GeoPandas](https://geopandas.org/) 128 | * [Robosat](https://github.com/mapbox/robosat) - 129 | mask to feature function is borrowed from robosat 130 | * [pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) - 131 | Model Architecture is heavily borrowed from the awesome repo by [junyanz](https://github.com/junyanz) 132 | -------------------------------------------------------------------------------- /util/features/building.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import collections 3 | 4 | import geojson 5 | 6 | import shapely.geometry 7 | 8 | from util.features.core import denoise, grow, contours, simplify, featurize, parents_in_hierarchy 9 | 10 | 11 | class Building_features: 12 | # kernel_size_denoise = 4 13 | # kernel_size_grow = 3 14 | # simplify_threshold = 0.01 15 | kernel_size_denoise = 3 16 | kernel_size_grow = 5 17 | simplify_threshold = 0.0000001 18 | 19 | 20 | def __init__(self): 21 | self.features = [] 22 | 23 | def apply(self, tile, mask): 24 | 25 | # The post-processing pipeline removes noise and fills in smaller holes. We then 26 | # extract contours, simplify them and transform tile pixels into coordinates. 27 | 28 | denoised = denoise(mask, self.kernel_size_denoise) 29 | #grown = grow(denoised, self.kernel_size_grow) 30 | 31 | # Contours have a hierarchy: for example an outer ring, and an inner ring for a polygon with a hole. 32 | # 33 | # The ith hierarchy entry is a tuple with (next, prev, fst child, parent) for the ith polygon with: 34 | # - next is the index into the polygons for the next polygon on the same hierarchy level 35 | # - prev is the index into the polygons for the previous polygon on the same hierarchy level 36 | # - fst child is the index into the polygons for the ith polygon's first child polygon 37 | # - parent is the index into the polygons for the ith polygon's single parent polygon 38 | # 39 | # In case of non-existend indices their index value is -1. 40 | 41 | multipolygons, hierarchy = contours(denoised) 42 | 43 | if hierarchy is None: 44 | return 45 | 46 | # In the following we re-construct the hierarchy walking from polygons up to the top-most polygon. 47 | # We then crete a GeoJSON polygon with a single outer ring and potentially multiple inner rings. 48 | # 49 | # Note: we currently do not handle multipolygons which are nested even deeper. 50 | 51 | # This seems to be a bug in the OpenCV Python bindings; the C++ interface 52 | # returns a vector but here it's always wrapped in an extra list. 53 | assert len(hierarchy) == 1, "always single hierarchy for all polygons in multipolygon" 54 | hierarchy = hierarchy[0] 55 | 56 | assert len(multipolygons) == len(hierarchy), "polygons and hierarchy in sync" 57 | 58 | polygons = [simplify(polygon, self.simplify_threshold) for polygon in multipolygons] 59 | 60 | # Todo: generalize and move to features.core 61 | 62 | # All child ids in hierarchy tree, keyed by root id. 63 | features = collections.defaultdict(set) 64 | 65 | for i, (polygon, node) in enumerate(zip(polygons, hierarchy)): 66 | if len(polygon) < 3: 67 | #print("Warning: simplified feature no longer valid polygon, skipping", file=sys.stderr) 68 | continue 69 | 70 | _, _, _, parent_idx = node 71 | 72 | ancestors = list(parents_in_hierarchy(i, hierarchy)) 73 | 74 | # Only handles polygons with a nesting of two levels for now => no multipolygons. 75 | if len(ancestors) > 1: 76 | #print("Warning: polygon ring nesting level too deep, skipping", file=sys.stderr) 77 | continue 78 | 79 | # A single mapping: i => {i} implies single free-standing polygon, no inner rings. 80 | # Otherwise: i => {i, j, k, l} implies: outer ring i, inner rings j, k, l. 81 | root = ancestors[-1] if ancestors else i 82 | 83 | features[root].add(i) 84 | 85 | for outer, inner in features.items(): 86 | rings = [featurize(tile, polygons[outer], mask.shape[:2])] 87 | 88 | # In mapping i => {i, ..} i is not a child. 89 | children = inner.difference(set([outer])) 90 | 91 | for child in children: 92 | rings.append(featurize(tile, polygons[child], mask.shape[:2])) 93 | 94 | assert 0 < len(rings), "at least one outer ring in a polygon" 95 | 96 | geometry = geojson.Polygon(rings) 97 | shape = shapely.geometry.shape(geometry) 98 | 99 | if shape.is_valid: 100 | self.features.append(geojson.Feature(geometry=geometry)) 101 | else: 102 | continue 103 | 104 | def save(self, out): 105 | collection = geojson.FeatureCollection(self.features) 106 | 107 | with open(out, "w") as fp: 108 | geojson.dump(collection, fp) 109 | 110 | def jsonify(self): 111 | collection = geojson.FeatureCollection(self.features) 112 | 113 | return collection 114 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | """General-purpose training script for image-to-image translation. 2 | 3 | This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and 4 | different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). 5 | You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). 6 | 7 | It first creates model, dataset, and visualizer given the option. 8 | It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. 9 | The script supports continue/resume training. Use '--continue_train' to resume your previous training. 10 | 11 | Example: 12 | Train a CycleGAN model: 13 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 14 | Train a pix2pix model: 15 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 16 | 17 | See options/base_options.py and options/train_options.py for more training options. 18 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md 19 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md 20 | """ 21 | import time 22 | from options.train_options import TrainOptions 23 | from data import create_dataset 24 | from models import create_model 25 | from util.visualizer import Visualizer 26 | 27 | if __name__ == '__main__': 28 | opt = TrainOptions().parse() # get training options 29 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 30 | dataset_size = len(dataset) # get the number of images in the dataset. 31 | print('The number of training images = %d' % dataset_size) 32 | 33 | model = create_model(opt) # create a model given opt.model and other options 34 | model.setup(opt) # regular setup: load and print networks; create schedulers 35 | visualizer = Visualizer(opt) # create a visualizer that display/save images and plots 36 | total_iters = 0 # the total number of training iterations 37 | 38 | for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , + 39 | epoch_start_time = time.time() # timer for entire epoch 40 | iter_data_time = time.time() # timer for data loading per iteration 41 | epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch 42 | visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch 43 | model.update_learning_rate() # update learning rates in the beginning of every epoch. 44 | for i, data in enumerate(dataset): # inner loop within one epoch 45 | iter_start_time = time.time() # timer for computation per iteration 46 | if total_iters % opt.print_freq == 0: 47 | t_data = iter_start_time - iter_data_time 48 | 49 | total_iters += opt.batch_size 50 | epoch_iter += opt.batch_size 51 | model.set_input(data) # unpack data from dataset and apply preprocessing 52 | model.optimize_parameters() # calculate loss functions, get gradients, update network weights 53 | 54 | if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file 55 | save_result = total_iters % opt.update_html_freq == 0 56 | model.compute_visuals() 57 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) 58 | 59 | if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk 60 | losses = model.get_current_losses() 61 | t_comp = (time.time() - iter_start_time) / opt.batch_size 62 | visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) 63 | if opt.display_id > 0: 64 | visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) 65 | 66 | if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations 67 | print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) 68 | save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' 69 | model.save_networks(save_suffix) 70 | 71 | iter_data_time = time.time() 72 | if epoch % opt.save_epoch_freq == 0: # cache our model every epochs 73 | print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) 74 | model.save_networks('latest') 75 | model.save_networks(epoch) 76 | 77 | print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time)) 78 | -------------------------------------------------------------------------------- /data/base_dataset.py: -------------------------------------------------------------------------------- 1 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets. 2 | 3 | It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. 4 | """ 5 | import random 6 | import numpy as np 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import torchvision.transforms as transforms 10 | from abc import ABC, abstractmethod 11 | 12 | 13 | class BaseDataset(data.Dataset, ABC): 14 | """This class is an abstract base class (ABC) for datasets. 15 | 16 | To create a subclass, you need to implement the following four functions: 17 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 18 | -- <__len__>: return the size of dataset. 19 | -- <__getitem__>: get a data point. 20 | -- : (optionally) add dataset-specific options and set default options. 21 | """ 22 | 23 | def __init__(self, opt): 24 | """Initialize the class; save the options in the class 25 | 26 | Parameters: 27 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 28 | """ 29 | self.opt = opt 30 | self.root = opt.dataroot 31 | 32 | @staticmethod 33 | def modify_commandline_options(parser, is_train): 34 | """Add new dataset-specific options, and rewrite default values for existing options. 35 | 36 | Parameters: 37 | parser -- original option parser 38 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 39 | 40 | Returns: 41 | the modified parser. 42 | """ 43 | return parser 44 | 45 | @abstractmethod 46 | def __len__(self): 47 | """Return the total number of images in the dataset.""" 48 | return 0 49 | 50 | @abstractmethod 51 | def __getitem__(self, index): 52 | """Return a data point and its metadata information. 53 | 54 | Parameters: 55 | index - - a random integer for data indexing 56 | 57 | Returns: 58 | a dictionary of data with their names. It ususally contains the data itself and its metadata information. 59 | """ 60 | pass 61 | 62 | 63 | def get_params(opt, size): 64 | w, h = size 65 | new_h = h 66 | new_w = w 67 | if opt.preprocess == 'resize_and_crop': 68 | new_h = new_w = opt.load_size 69 | elif opt.preprocess == 'scale_width_and_crop': 70 | new_w = opt.load_size 71 | new_h = opt.load_size * h // w 72 | 73 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) 74 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) 75 | 76 | flip = random.random() > 0.5 77 | 78 | return {'crop_pos': (x, y), 'flip': flip} 79 | 80 | 81 | def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): 82 | transform_list = [] 83 | if grayscale: 84 | transform_list.append(transforms.Grayscale(1)) 85 | if 'resize' in opt.preprocess: 86 | osize = [opt.load_size, opt.load_size] 87 | transform_list.append(transforms.Resize(osize, method)) 88 | elif 'scale_width' in opt.preprocess: 89 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) 90 | 91 | if 'crop' in opt.preprocess: 92 | if params is None: 93 | transform_list.append(transforms.RandomCrop(opt.crop_size)) 94 | else: 95 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) 96 | 97 | if opt.preprocess == 'none': 98 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) 99 | 100 | if not opt.no_flip: 101 | if params is None: 102 | transform_list.append(transforms.RandomHorizontalFlip()) 103 | elif params['flip']: 104 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) 105 | 106 | if convert: 107 | transform_list += [transforms.ToTensor()] 108 | if grayscale: 109 | transform_list += [transforms.Normalize((0.5,), (0.5,))] 110 | else: 111 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] 112 | return transforms.Compose(transform_list) 113 | 114 | 115 | def __make_power_2(img, base, method=Image.BICUBIC): 116 | ow, oh = img.size 117 | h = int(round(oh / base) * base) 118 | w = int(round(ow / base) * base) 119 | if h == oh and w == ow: 120 | return img 121 | 122 | __print_size_warning(ow, oh, w, h) 123 | return img.resize((w, h), method) 124 | 125 | 126 | def __scale_width(img, target_size, crop_size, method=Image.BICUBIC): 127 | ow, oh = img.size 128 | if ow == target_size and oh >= crop_size: 129 | return img 130 | w = target_size 131 | h = int(max(target_size * oh / ow, crop_size)) 132 | return img.resize((w, h), method) 133 | 134 | 135 | def __crop(img, pos, size): 136 | ow, oh = img.size 137 | x1, y1 = pos 138 | tw = th = size 139 | if (ow > tw or oh > th): 140 | return img.crop((x1, y1, x1 + tw, y1 + th)) 141 | return img 142 | 143 | 144 | def __flip(img, flip): 145 | if flip: 146 | return img.transpose(Image.FLIP_LEFT_RIGHT) 147 | return img 148 | 149 | 150 | def __print_size_warning(ow, oh, w, h): 151 | """Print warning information about image size(only print once)""" 152 | if not hasattr(__print_size_warning, 'has_printed'): 153 | print("The image size needs to be a multiple of 4. " 154 | "The loaded image size was (%d, %d), so it was adjusted to " 155 | "(%d, %d). This adjustment will be done to all images " 156 | "whose sizes are not multiples of 4" % (ow, oh, w, h)) 157 | __print_size_warning.has_printed = True 158 | -------------------------------------------------------------------------------- /models/template_model.py: -------------------------------------------------------------------------------- 1 | """Model class template 2 | 3 | This module provides a template for users to implement custom models. 4 | You can specify '--model template' to use this model. 5 | The class name should be consistent with both the filename and its model option. 6 | The filename should be _dataset.py 7 | The class name should be Dataset.py 8 | It implements a simple image-to-image translation baseline based on regression loss. 9 | Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: 10 | min_ ||netG(data_A) - data_B||_1 11 | You need to implement the following functions: 12 | : Add model-specific options and rewrite default values for existing options. 13 | <__init__>: Initialize this model class. 14 | : Unpack input data and perform data pre-processing. 15 | : Run forward pass. This will be called by both and . 16 | : Update network weights; it will be called in every training iteration. 17 | """ 18 | import torch 19 | from .base_model import BaseModel 20 | from . import networks 21 | 22 | 23 | class TemplateModel(BaseModel): 24 | @staticmethod 25 | def modify_commandline_options(parser, is_train=True): 26 | """Add new model-specific options and rewrite default values for existing options. 27 | 28 | Parameters: 29 | parser -- the option parser 30 | is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. 31 | 32 | Returns: 33 | the modified parser. 34 | """ 35 | parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. 36 | if is_train: 37 | parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. 38 | 39 | return parser 40 | 41 | def __init__(self, opt): 42 | """Initialize this model class. 43 | 44 | Parameters: 45 | opt -- training/test options 46 | 47 | A few things can be done here. 48 | - (required) call the initialization function of BaseModel 49 | - define loss function, visualization images, model names, and optimizers 50 | """ 51 | BaseModel.__init__(self, opt) # call the initialization method of BaseModel 52 | # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. 53 | self.loss_names = ['loss_G'] 54 | # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. 55 | self.visual_names = ['data_A', 'data_B', 'output'] 56 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. 57 | # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. 58 | self.model_names = ['G'] 59 | # define networks; you can use opt.isTrain to specify different behaviors for training and test. 60 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) 61 | if self.isTrain: # only defined during training time 62 | # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. 63 | # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) 64 | self.criterionLoss = torch.nn.L1Loss() 65 | # define and initialize optimizers. You can define one optimizer for each network. 66 | # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 67 | self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 68 | self.optimizers = [self.optimizer] 69 | 70 | # Our program will automatically call to define schedulers, load networks, and print networks 71 | 72 | def set_input(self, input): 73 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 74 | 75 | Parameters: 76 | input: a dictionary that contains the data itself and its metadata information. 77 | """ 78 | AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B 79 | self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A 80 | self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B 81 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths 82 | 83 | def forward(self): 84 | """Run forward pass. This will be called by both functions and .""" 85 | self.output = self.netG(self.data_A) # generate output image given the input data_A 86 | 87 | def backward(self): 88 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 89 | # caculate the intermediate results if necessary; here self.output has been computed during function 90 | # calculate loss given the input and intermediate results 91 | self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression 92 | self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G 93 | 94 | def optimize_parameters(self): 95 | """Update network weights; it will be called in every training iteration.""" 96 | self.forward() # first call forward to calculate intermediate results 97 | self.optimizer.zero_grad() # clear network G's existing gradients 98 | self.backward() # calculate gradients for network G 99 | self.optimizer.step() # update gradients for network G 100 | -------------------------------------------------------------------------------- /models/pix2pix_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .base_model import BaseModel 3 | from . import networks 4 | 5 | 6 | class Pix2PixModel(BaseModel): 7 | """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data. 8 | 9 | The model training requires '--dataset_mode aligned' dataset. 10 | By default, it uses a '--netG unet256' U-Net generator, 11 | a '--netD basic' discriminator (PatchGAN), 12 | and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper). 13 | 14 | pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf 15 | """ 16 | @staticmethod 17 | def modify_commandline_options(parser, is_train=True): 18 | """Add new dataset-specific options, and rewrite default values for existing options. 19 | 20 | Parameters: 21 | parser -- original option parser 22 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 23 | 24 | Returns: 25 | the modified parser. 26 | 27 | For pix2pix, we do not use image buffer 28 | The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1 29 | By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets. 30 | """ 31 | # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/) 32 | parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned') 33 | if is_train: 34 | parser.set_defaults(pool_size=0, gan_mode='vanilla') 35 | parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss') 36 | 37 | return parser 38 | 39 | def __init__(self, opt): 40 | """Initialize the pix2pix class. 41 | 42 | Parameters: 43 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 44 | """ 45 | BaseModel.__init__(self, opt) 46 | # specify the training losses you want to print out. The training/test scripts will call 47 | self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] 48 | # specify the images you want to save/display. The training/test scripts will call 49 | self.visual_names = ['real_A', 'fake_B', 'real_B'] 50 | # specify the models you want to save to the disk. The training/test scripts will call and 51 | if self.isTrain: 52 | self.model_names = ['G', 'D'] 53 | else: # during test time, only load G 54 | self.model_names = ['G'] 55 | # define networks (both generator and discriminator) 56 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, 57 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 58 | 59 | if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc 60 | self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, 61 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 62 | 63 | if self.isTrain: 64 | # define loss functions 65 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) 66 | self.criterionL1 = torch.nn.L1Loss() 67 | # initialize optimizers; schedulers will be automatically created by function . 68 | self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 69 | self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 70 | self.optimizers.append(self.optimizer_G) 71 | self.optimizers.append(self.optimizer_D) 72 | 73 | def set_input(self, input): 74 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 75 | 76 | Parameters: 77 | input (dict): include the data itself and its metadata information. 78 | 79 | The option 'direction' can be used to swap images in domain A and domain B. 80 | """ 81 | AtoB = self.opt.direction == 'AtoB' 82 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 83 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 84 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 85 | 86 | def forward(self): 87 | """Run forward pass; called by both functions and .""" 88 | self.fake_B = self.netG(self.real_A) # G(A) 89 | 90 | def backward_D(self): 91 | """Calculate GAN loss for the discriminator""" 92 | # Fake; stop backprop to the generator by detaching fake_B 93 | fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator 94 | pred_fake = self.netD(fake_AB.detach()) 95 | self.loss_D_fake = self.criterionGAN(pred_fake, False) 96 | # Real 97 | real_AB = torch.cat((self.real_A, self.real_B), 1) 98 | pred_real = self.netD(real_AB) 99 | self.loss_D_real = self.criterionGAN(pred_real, True) 100 | # combine loss and calculate gradients 101 | self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 102 | self.loss_D.backward() 103 | 104 | def backward_G(self): 105 | """Calculate GAN and L1 loss for the generator""" 106 | # First, G(A) should fake the discriminator 107 | fake_AB = torch.cat((self.real_A, self.fake_B), 1) 108 | pred_fake = self.netD(fake_AB) 109 | self.loss_G_GAN = self.criterionGAN(pred_fake, True) 110 | # Second, G(A) = B 111 | self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 112 | # combine loss and calculate gradients 113 | self.loss_G = self.loss_G_GAN + self.loss_G_L1 114 | self.loss_G.backward() 115 | 116 | def optimize_parameters(self): 117 | self.forward() # compute fake images: G(A) 118 | # update D 119 | self.set_requires_grad(self.netD, True) # enable backprop for D 120 | self.optimizer_D.zero_grad() # set D's gradients to zero 121 | self.backward_D() # calculate gradients for D 122 | self.optimizer_D.step() # update D's weights 123 | # update G 124 | self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G 125 | self.optimizer_G.zero_grad() # set G's gradients to zero 126 | self.backward_G() # calculate graidents for G 127 | self.optimizer_G.step() # udpate G's weights 128 | -------------------------------------------------------------------------------- /util/tiles.py: -------------------------------------------------------------------------------- 1 | """Slippy Map Tiles. 2 | 3 | The Slippy Map tile spec works with a directory structure of `z/x/y.png` where 4 | - `z` is the zoom level 5 | - `x` is the left / right index 6 | - `y` is the top / bottom index 7 | 8 | See: https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames 9 | """ 10 | 11 | import csv 12 | import io 13 | import os 14 | 15 | from PIL import Image 16 | import mercantile 17 | 18 | 19 | def pixel_to_location(tile, dx, dy): 20 | """Converts a pixel in a tile to a coordinate. 21 | 22 | Args: 23 | tile: the mercantile tile to calculate the location in. 24 | dx: the relative x offset in range [0, 1]. 25 | dy: the relative y offset in range [0, 1]. 26 | 27 | Returns: 28 | The coordinate for the pixel in the tile. 29 | """ 30 | 31 | assert 0 <= dx <= 1, "x offset is in [0, 1]" 32 | assert 0 <= dy <= 1, "y offset is in [0, 1]" 33 | 34 | west, south, east, north = mercantile.bounds(tile) 35 | 36 | def lerp(a, b, c): 37 | return a + c * (b - a) 38 | 39 | lon = lerp(west, east, dx) 40 | lat = lerp(south, north, dy) 41 | 42 | return lon, lat 43 | 44 | 45 | def fetch_image(session, url, timeout=10): 46 | """Fetches the image representation for a tile. 47 | 48 | Args: 49 | session: the HTTP session to fetch the image from. 50 | url: the tile imagery's url to fetch the image from. 51 | timeout: the HTTP timeout in seconds. 52 | 53 | Returns: 54 | The satellite imagery as bytes or None in case of error. 55 | """ 56 | 57 | try: 58 | resp = session.get(url, timeout=timeout) 59 | resp.raise_for_status() 60 | return io.BytesIO(resp.content) 61 | except Exception: 62 | return None 63 | 64 | 65 | def tiles_from_slippy_map(root): 66 | """Loads files from an on-disk slippy map directory structure. 67 | 68 | Args: 69 | root: the base directory with layout `z/x/y.*`. 70 | 71 | Yields: 72 | The mercantile tiles and file paths from the slippy map directory. 73 | """ 74 | 75 | # The Python string functions (.isdigit, .isdecimal, etc.) handle 76 | # unicode codepoints; we only care about digits convertible to int 77 | def isdigit(v): 78 | try: 79 | _ = int(v) # noqa: F841 80 | return True 81 | except ValueError: 82 | return False 83 | 84 | for z in os.listdir(root): 85 | if not isdigit(z): 86 | continue 87 | 88 | for x in os.listdir(os.path.join(root, z)): 89 | if not isdigit(x): 90 | continue 91 | 92 | for name in os.listdir(os.path.join(root, z, x)): 93 | y = os.path.splitext(name)[0] 94 | 95 | if not isdigit(y): 96 | continue 97 | 98 | tile = mercantile.Tile(x=int(x), y=int(y), z=int(z)) 99 | path = os.path.join(root, z, x, name) 100 | yield tile, path 101 | 102 | 103 | def tiles_from_csv(path): 104 | """Read tiles from a line-delimited csv file. 105 | 106 | Args: 107 | file: the path to read the csv file from. 108 | 109 | Yields: 110 | The mercantile tiles from the csv file. 111 | """ 112 | 113 | with open(path) as fp: 114 | reader = csv.reader(fp) 115 | 116 | for row in reader: 117 | if not row: 118 | continue 119 | 120 | yield mercantile.Tile(*map(int, row)) 121 | 122 | 123 | def stitch_image(into, into_box, image, image_box): 124 | """Stitches two images together in-place. 125 | 126 | Args: 127 | into: the image to stitch into and modify in-place. 128 | into_box: left, upper, right, lower image coordinates for where to place `image` in `into`. 129 | image: the image to stitch into `into`. 130 | image_box: left, upper, right, lower image coordinates for where to extract the sub-image from `image`. 131 | 132 | Note: 133 | Both boxes must be of same size. 134 | """ 135 | 136 | into.paste(image.crop(box=image_box), box=into_box) 137 | 138 | 139 | def adjacent_tile(tile, dx, dy, tiles): 140 | """Retrieves an adjacent tile from a tile store. 141 | 142 | Args: 143 | tile: the original tile to get an adjacent tile for. 144 | dx: the offset in tile x direction. 145 | dy: the offset in tile y direction. 146 | tiles: the tile store to get tiles from; must support `__getitem__` with tiles. 147 | 148 | Returns: 149 | The adjacent tile's image or `None` if it does not exist. 150 | """ 151 | 152 | x, y, z = map(int, [tile.x, tile.y, tile.z]) 153 | other = mercantile.Tile(x=x + dx, y=y + dy, z=z) 154 | 155 | try: 156 | path = tiles[other] 157 | return Image.open(path).convert("RGB") 158 | except KeyError: 159 | return None 160 | 161 | 162 | def buffer_tile_image(tile, tiles, overlap, tile_size, nodata=0): 163 | """Buffers a tile image adding borders on all sides based on adjacent tiles. 164 | 165 | Args: 166 | tile: the tile to buffer. 167 | tiles: available tiles; must be a mapping of tiles to their filesystem paths. 168 | overlap: the tile border to add on every side; in pixel. 169 | tile_size: the tile size. 170 | nodata: the color value to use when no adjacent tile is available. 171 | 172 | Returns: 173 | The composite image containing the original tile plus tile overlap on all sides. 174 | It's size is `tile_size` + 2 * `overlap` pixel for each side. 175 | """ 176 | 177 | tiles = dict(tiles) 178 | x, y, z = map(int, [tile.x, tile.y, tile.z]) 179 | 180 | # Todo: instead of nodata we should probably mirror the center image 181 | composite_size = tile_size + 2 * overlap 182 | composite = Image.new(mode="RGB", size=(composite_size, composite_size), color=nodata) 183 | 184 | path = tiles[tile] 185 | center = Image.open(path).convert("RGB") 186 | composite.paste(center, box=(overlap, overlap)) 187 | 188 | top_left = adjacent_tile(tile, -1, -1, tiles) 189 | top_right = adjacent_tile(tile, +1, -1, tiles) 190 | bottom_left = adjacent_tile(tile, -1, +1, tiles) 191 | bottom_right = adjacent_tile(tile, +1, +1, tiles) 192 | 193 | top = adjacent_tile(tile, 0, -1, tiles) 194 | left = adjacent_tile(tile, -1, 0, tiles) 195 | bottom = adjacent_tile(tile, 0, +1, tiles) 196 | right = adjacent_tile(tile, +1, 0, tiles) 197 | 198 | def maybe_stitch(maybe_tile, composite_box, tile_box): 199 | if maybe_tile: 200 | stitch_image(composite, composite_box, maybe_tile, tile_box) 201 | 202 | maybe_stitch(top_left, (0, 0, overlap, overlap), (tile_size - overlap, tile_size - overlap, tile_size, tile_size)) 203 | maybe_stitch( 204 | top_right, (tile_size + overlap, 0, composite_size, overlap), (0, tile_size - overlap, overlap, tile_size) 205 | ) 206 | maybe_stitch( 207 | bottom_left, 208 | (0, composite_size - overlap, overlap, composite_size), 209 | (tile_size - overlap, 0, tile_size, overlap), 210 | ) 211 | maybe_stitch( 212 | bottom_right, 213 | (composite_size - overlap, composite_size - overlap, composite_size, composite_size), 214 | (0, 0, overlap, overlap), 215 | ) 216 | maybe_stitch(top, (overlap, 0, composite_size - overlap, overlap), (0, tile_size - overlap, tile_size, tile_size)) 217 | maybe_stitch(left, (0, overlap, overlap, composite_size - overlap), (tile_size - overlap, 0, tile_size, tile_size)) 218 | maybe_stitch( 219 | bottom, 220 | (overlap, composite_size - overlap, composite_size - overlap, composite_size), 221 | (0, 0, tile_size, overlap), 222 | ) 223 | maybe_stitch( 224 | right, (composite_size - overlap, overlap, composite_size, composite_size - overlap), (0, 0, overlap, tile_size) 225 | ) 226 | 227 | return composite 228 | -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from util import util 4 | import torch 5 | import models 6 | import data 7 | 8 | 9 | class BaseOptions(): 10 | """This class defines options used during both training and test time. 11 | 12 | It also implements several helper functions such as parsing, printing, and saving the options. 13 | It also gathers additional options defined in functions in both dataset class and model class. 14 | """ 15 | 16 | def __init__(self): 17 | """Reset the class; indicates the class hasn't been initailized""" 18 | self.initialized = False 19 | 20 | def initialize(self, parser): 21 | """Define the common options that are used in both training and test.""" 22 | # basic parameters 23 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') 24 | parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 25 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 26 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 27 | # model parameters 28 | parser.add_argument('--model', type=str, default='pix2pix') 29 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') 30 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') 31 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') 32 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') 33 | parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') 34 | parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') 35 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') 36 | parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') 37 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') 38 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') 40 | # dataset parameters 41 | parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') 42 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') 43 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 44 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') 45 | parser.add_argument('--batch_size', type=int, default=1, help='input batch size') 46 | parser.add_argument('--load_size', type=int, default=512, help='scale images to this size') 47 | parser.add_argument('--crop_size', type=int, default=512, help='then crop to this size') 48 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 49 | parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') 50 | parser.add_argument('--no_flip', action='store_false', help='if specified, do not flip the images for data augmentation') 51 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') 52 | # additional parameters 53 | parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 54 | parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') 55 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') 56 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') 57 | self.initialized = True 58 | return parser 59 | 60 | def gather_options(self): 61 | """Initialize our parser with basic options(only once). 62 | Add additional model-specific and dataset-specific options. 63 | These options are defined in the function 64 | in model and dataset classes. 65 | """ 66 | if not self.initialized: # check if it has been initialized 67 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 68 | parser = self.initialize(parser) 69 | 70 | # get the basic options 71 | opt, _ = parser.parse_known_args() 72 | 73 | # modify model-related parser options 74 | model_name = opt.model 75 | model_option_setter = models.get_option_setter(model_name) 76 | parser = model_option_setter(parser, self.isTrain) 77 | opt, _ = parser.parse_known_args() # parse again with new defaults 78 | 79 | # modify dataset-related parser options 80 | dataset_name = opt.dataset_mode 81 | dataset_option_setter = data.get_option_setter(dataset_name) 82 | parser = dataset_option_setter(parser, self.isTrain) 83 | 84 | # save and return the parser 85 | self.parser = parser 86 | return parser.parse_args() 87 | 88 | def print_options(self, opt): 89 | """Print and save options 90 | 91 | It will print both current options and default values(if different). 92 | It will save options into a text file / [checkpoints_dir] / opt.txt 93 | """ 94 | message = '' 95 | message += '----------------- Options ---------------\n' 96 | for k, v in sorted(vars(opt).items()): 97 | comment = '' 98 | default = self.parser.get_default(k) 99 | if v != default: 100 | comment = '\t[default: %s]' % str(default) 101 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 102 | message += '----------------- End -------------------' 103 | print(message) 104 | 105 | # save to the disk 106 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name) 107 | util.mkdirs(expr_dir) 108 | file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) 109 | with open(file_name, 'wt') as opt_file: 110 | opt_file.write(message) 111 | opt_file.write('\n') 112 | 113 | def parse(self): 114 | """Parse our options, create checkpoints directory suffix, and set up gpu device.""" 115 | opt = self.gather_options() 116 | opt.isTrain = self.isTrain # train or test 117 | 118 | # process opt.suffix 119 | if opt.suffix: 120 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' 121 | opt.name = opt.name + suffix 122 | 123 | self.print_options(opt) 124 | 125 | # set gpu ids 126 | str_ids = opt.gpu_ids.split(',') 127 | opt.gpu_ids = [] 128 | for str_id in str_ids: 129 | id = int(str_id) 130 | if id >= 0: 131 | opt.gpu_ids.append(id) 132 | if len(opt.gpu_ids) > 0: 133 | torch.cuda.set_device(opt.gpu_ids[0]) 134 | 135 | self.opt = opt 136 | return self.opt 137 | -------------------------------------------------------------------------------- /models/cycle_gan_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from util.image_pool import ImagePool 4 | from .base_model import BaseModel 5 | from . import networks 6 | 7 | 8 | class CycleGANModel(BaseModel): 9 | """ 10 | This class implements the CycleGAN model, for learning image-to-image translation without paired data. 11 | 12 | The model training requires '--dataset_mode unaligned' dataset. 13 | By default, it uses a '--netG resnet_9blocks' ResNet generator, 14 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix), 15 | and a least-square GANs objective ('--gan_mode lsgan'). 16 | 17 | CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf 18 | """ 19 | @staticmethod 20 | def modify_commandline_options(parser, is_train=True): 21 | """Add new dataset-specific options, and rewrite default values for existing options. 22 | 23 | Parameters: 24 | parser -- original option parser 25 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 26 | 27 | Returns: 28 | the modified parser. 29 | 30 | For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. 31 | A (source domain), B (target domain). 32 | Generators: G_A: A -> B; G_B: B -> A. 33 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. 34 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) 35 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) 36 | Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) 37 | Dropout is not used in the original CycleGAN paper. 38 | """ 39 | parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout 40 | if is_train: 41 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') 42 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') 43 | parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') 44 | 45 | return parser 46 | 47 | def __init__(self, opt): 48 | """Initialize the CycleGAN class. 49 | 50 | Parameters: 51 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 52 | """ 53 | BaseModel.__init__(self, opt) 54 | # specify the training losses you want to print out. The training/test scripts will call 55 | self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] 56 | # specify the images you want to save/display. The training/test scripts will call 57 | visual_names_A = ['real_A', 'fake_B', 'rec_A'] 58 | visual_names_B = ['real_B', 'fake_A', 'rec_B'] 59 | if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) 60 | visual_names_A.append('idt_B') 61 | visual_names_B.append('idt_A') 62 | 63 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B 64 | # specify the models you want to save to the disk. The training/test scripts will call and . 65 | if self.isTrain: 66 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] 67 | else: # during test time, only load Gs 68 | self.model_names = ['G_A', 'G_B'] 69 | 70 | # define networks (both Generators and discriminators) 71 | # The naming is different from those used in the paper. 72 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) 73 | self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, 74 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 75 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, 76 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 77 | 78 | if self.isTrain: # define discriminators 79 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, 80 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 81 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, 82 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 83 | 84 | if self.isTrain: 85 | if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels 86 | assert(opt.input_nc == opt.output_nc) 87 | self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 88 | self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 89 | # define loss functions 90 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. 91 | self.criterionCycle = torch.nn.L1Loss() 92 | self.criterionIdt = torch.nn.L1Loss() 93 | # initialize optimizers; schedulers will be automatically created by function . 94 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 95 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 96 | self.optimizers.append(self.optimizer_G) 97 | self.optimizers.append(self.optimizer_D) 98 | 99 | def set_input(self, input): 100 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 101 | 102 | Parameters: 103 | input (dict): include the data itself and its metadata information. 104 | 105 | The option 'direction' can be used to swap domain A and domain B. 106 | """ 107 | AtoB = self.opt.direction == 'AtoB' 108 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 109 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 110 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 111 | 112 | def forward(self): 113 | """Run forward pass; called by both functions and .""" 114 | self.fake_B = self.netG_A(self.real_A) # G_A(A) 115 | self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) 116 | self.fake_A = self.netG_B(self.real_B) # G_B(B) 117 | self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) 118 | 119 | def backward_D_basic(self, netD, real, fake): 120 | """Calculate GAN loss for the discriminator 121 | 122 | Parameters: 123 | netD (network) -- the discriminator D 124 | real (tensor array) -- real images 125 | fake (tensor array) -- images generated by a generator 126 | 127 | Return the discriminator loss. 128 | We also call loss_D.backward() to calculate the gradients. 129 | """ 130 | # Real 131 | pred_real = netD(real) 132 | loss_D_real = self.criterionGAN(pred_real, True) 133 | # Fake 134 | pred_fake = netD(fake.detach()) 135 | loss_D_fake = self.criterionGAN(pred_fake, False) 136 | # Combined loss and calculate gradients 137 | loss_D = (loss_D_real + loss_D_fake) * 0.5 138 | loss_D.backward() 139 | return loss_D 140 | 141 | def backward_D_A(self): 142 | """Calculate GAN loss for discriminator D_A""" 143 | fake_B = self.fake_B_pool.query(self.fake_B) 144 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) 145 | 146 | def backward_D_B(self): 147 | """Calculate GAN loss for discriminator D_B""" 148 | fake_A = self.fake_A_pool.query(self.fake_A) 149 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) 150 | 151 | def backward_G(self): 152 | """Calculate the loss for generators G_A and G_B""" 153 | lambda_idt = self.opt.lambda_identity 154 | lambda_A = self.opt.lambda_A 155 | lambda_B = self.opt.lambda_B 156 | # Identity loss 157 | if lambda_idt > 0: 158 | # G_A should be identity if real_B is fed: ||G_A(B) - B|| 159 | self.idt_A = self.netG_A(self.real_B) 160 | self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt 161 | # G_B should be identity if real_A is fed: ||G_B(A) - A|| 162 | self.idt_B = self.netG_B(self.real_A) 163 | self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt 164 | else: 165 | self.loss_idt_A = 0 166 | self.loss_idt_B = 0 167 | 168 | # GAN loss D_A(G_A(A)) 169 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) 170 | # GAN loss D_B(G_B(B)) 171 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) 172 | # Forward cycle loss || G_B(G_A(A)) - A|| 173 | self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A 174 | # Backward cycle loss || G_A(G_B(B)) - B|| 175 | self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B 176 | # combined loss and calculate gradients 177 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B 178 | self.loss_G.backward() 179 | 180 | def optimize_parameters(self): 181 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 182 | # forward 183 | self.forward() # compute fake images and reconstruction images. 184 | # G_A and G_B 185 | self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs 186 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero 187 | self.backward_G() # calculate gradients for G_A and G_B 188 | self.optimizer_G.step() # update G_A and G_B's weights 189 | # D_A and D_B 190 | self.set_requires_grad([self.netD_A, self.netD_B], True) 191 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero 192 | self.backward_D_A() # calculate gradients for D_A 193 | self.backward_D_B() # calculate graidents for D_B 194 | self.optimizer_D.step() # update D_A and D_B's weights 195 | -------------------------------------------------------------------------------- /models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from collections import OrderedDict 4 | from abc import ABC, abstractmethod 5 | from . import networks 6 | 7 | 8 | class BaseModel(ABC): 9 | """This class is an abstract base class (ABC) for models. 10 | To create a subclass, you need to implement the following five functions: 11 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 12 | -- : unpack data from dataset and apply preprocessing. 13 | -- : produce intermediate results. 14 | -- : calculate losses, gradients, and update network weights. 15 | -- : (optionally) add model-specific options and set default options. 16 | """ 17 | 18 | def __init__(self, opt): 19 | """Initialize the BaseModel class. 20 | 21 | Parameters: 22 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 23 | 24 | When creating your custom class, you need to implement your own initialization. 25 | In this function, you should first call 26 | Then, you need to define four lists: 27 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 28 | -- self.model_names (str list): define networks used in our training. 29 | -- self.visual_names (str list): specify the images that you want to display and save. 30 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 31 | """ 32 | self.opt = opt 33 | self.gpu_ids = opt.gpu_ids 34 | self.isTrain = opt.isTrain 35 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU 36 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir 37 | if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. 38 | torch.backends.cudnn.benchmark = True 39 | self.loss_names = [] 40 | self.model_names = [] 41 | self.visual_names = [] 42 | self.optimizers = [] 43 | self.image_paths = [] 44 | self.metric = 0 # used for learning rate policy 'plateau' 45 | 46 | @staticmethod 47 | def modify_commandline_options(parser, is_train): 48 | """Add new model-specific options, and rewrite default values for existing options. 49 | 50 | Parameters: 51 | parser -- original option parser 52 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 53 | 54 | Returns: 55 | the modified parser. 56 | """ 57 | return parser 58 | 59 | @abstractmethod 60 | def set_input(self, input): 61 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 62 | 63 | Parameters: 64 | input (dict): includes the data itself and its metadata information. 65 | """ 66 | pass 67 | 68 | @abstractmethod 69 | def forward(self): 70 | """Run forward pass; called by both functions and .""" 71 | pass 72 | 73 | @abstractmethod 74 | def optimize_parameters(self): 75 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 76 | pass 77 | 78 | def setup(self, opt): 79 | """Load and print networks; create schedulers 80 | 81 | Parameters: 82 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 83 | """ 84 | if self.isTrain: 85 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] 86 | if not self.isTrain or opt.continue_train: 87 | load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch 88 | self.load_networks(load_suffix) 89 | self.print_networks(opt.verbose) 90 | 91 | def eval(self): 92 | """Make models eval mode during test time""" 93 | for name in self.model_names: 94 | if isinstance(name, str): 95 | net = getattr(self, 'net' + name) 96 | net.eval() 97 | 98 | def test(self): 99 | """Forward function used in test time. 100 | 101 | This function wraps function in no_grad() so we don't save intermediate steps for backprop 102 | It also calls to produce additional visualization results 103 | """ 104 | with torch.no_grad(): 105 | self.forward() 106 | self.compute_visuals() 107 | 108 | def compute_visuals(self): 109 | """Calculate additional output images for visdom and HTML visualization""" 110 | pass 111 | 112 | def get_image_paths(self): 113 | """ Return image paths that are used to load current data""" 114 | return self.image_paths 115 | 116 | def update_learning_rate(self): 117 | """Update learning rates for all the networks; called at the end of every epoch""" 118 | old_lr = self.optimizers[0].param_groups[0]['lr'] 119 | for scheduler in self.schedulers: 120 | if self.opt.lr_policy == 'plateau': 121 | scheduler.step(self.metric) 122 | else: 123 | scheduler.step() 124 | 125 | lr = self.optimizers[0].param_groups[0]['lr'] 126 | print('learning rate %.7f -> %.7f' % (old_lr, lr)) 127 | 128 | def get_current_visuals(self): 129 | """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" 130 | visual_ret = OrderedDict() 131 | for name in self.visual_names: 132 | if isinstance(name, str): 133 | visual_ret[name] = getattr(self, name) 134 | return visual_ret 135 | 136 | def get_current_losses(self): 137 | """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" 138 | errors_ret = OrderedDict() 139 | for name in self.loss_names: 140 | if isinstance(name, str): 141 | errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number 142 | return errors_ret 143 | 144 | def save_networks(self, epoch): 145 | """Save all the networks to the disk. 146 | 147 | Parameters: 148 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 149 | """ 150 | for name in self.model_names: 151 | if isinstance(name, str): 152 | save_filename = '%s_net_%s.pth' % (epoch, name) 153 | save_path = os.path.join(self.save_dir, save_filename) 154 | net = getattr(self, 'net' + name) 155 | 156 | if len(self.gpu_ids) > 0 and torch.cuda.is_available(): 157 | torch.save(net.module.cpu().state_dict(), save_path) 158 | net.cuda(self.gpu_ids[0]) 159 | else: 160 | torch.save(net.cpu().state_dict(), save_path) 161 | 162 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 163 | """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" 164 | key = keys[i] 165 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 166 | if module.__class__.__name__.startswith('InstanceNorm') and \ 167 | (key == 'running_mean' or key == 'running_var'): 168 | if getattr(module, key) is None: 169 | state_dict.pop('.'.join(keys)) 170 | if module.__class__.__name__.startswith('InstanceNorm') and \ 171 | (key == 'num_batches_tracked'): 172 | state_dict.pop('.'.join(keys)) 173 | else: 174 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 175 | 176 | def load_networks(self, epoch): 177 | """Load all the networks from the disk. 178 | 179 | Parameters: 180 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 181 | """ 182 | for name in self.model_names: 183 | if isinstance(name, str): 184 | load_filename = '%s_net_%s.pth' % (epoch, name) 185 | load_path = os.path.join(self.save_dir, load_filename) 186 | net = getattr(self, 'net' + name) 187 | if isinstance(net, torch.nn.DataParallel): 188 | net = net.module 189 | print('loading the model from %s' % load_path) 190 | # if you are using PyTorch newer than 0.4 (e.g., built from 191 | # GitHub source), you can remove str() on self.device 192 | state_dict = torch.load(load_path, map_location=str(self.device)) 193 | if hasattr(state_dict, '_metadata'): 194 | del state_dict._metadata 195 | 196 | # patch InstanceNorm checkpoints prior to 0.4 197 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop 198 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 199 | net.load_state_dict(state_dict) 200 | 201 | def print_networks(self, verbose): 202 | """Print the total number of parameters in the network and (if verbose) network architecture 203 | 204 | Parameters: 205 | verbose (bool) -- if verbose: print the network architecture 206 | """ 207 | print('---------- Networks initialized -------------') 208 | for name in self.model_names: 209 | if isinstance(name, str): 210 | net = getattr(self, 'net' + name) 211 | num_params = 0 212 | for param in net.parameters(): 213 | num_params += param.numel() 214 | if verbose: 215 | print(net) 216 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 217 | print('-----------------------------------------------') 218 | 219 | def set_requires_grad(self, nets, requires_grad=False): 220 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 221 | Parameters: 222 | nets (network list) -- a list of networks 223 | requires_grad (bool) -- whether the networks require gradients or not 224 | """ 225 | if not isinstance(nets, list): 226 | nets = [nets] 227 | for net in nets: 228 | if net is not None: 229 | for param in net.parameters(): 230 | param.requires_grad = requires_grad 231 | -------------------------------------------------------------------------------- /util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import ntpath 5 | from pathlib import Path 6 | import time 7 | from . import util, html 8 | from subprocess import Popen, PIPE 9 | 10 | if sys.version_info[0] == 2: 11 | VisdomExceptionBase = Exception 12 | else: 13 | VisdomExceptionBase = ConnectionError 14 | 15 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): 16 | """Save images to the disk. 17 | 18 | Parameters: 19 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) 20 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs 21 | image_path (str) -- the string is used to create image paths 22 | aspect_ratio (float) -- the aspect ratio of saved images 23 | width (int) -- the images will be resized to width x width 24 | 25 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. 26 | """ 27 | image_dir = webpage.get_image_dir() 28 | short_path = ntpath.basename(image_path[0]) 29 | name = os.path.splitext(short_path)[0] 30 | try: 31 | os.makedirs(image_dir+'/real') 32 | os.makedirs(image_dir+'/fake') 33 | os.makedirs(image_dir+'/realA') 34 | except: 35 | pass 36 | webpage.add_header(name) 37 | ims, txts, links = [], [], [] 38 | 39 | for label, im_data in visuals.items(): 40 | im = util.tensor2im(im_data) 41 | if label == 'real_A': 42 | image_name = 'realA/%s.png' % (name) 43 | if label == 'real_B': 44 | image_name = 'realB/%s.png' % (name) 45 | if label == 'fake_B': 46 | image_name = 'fakeB/%s.png' % (name) 47 | 48 | save_path = os.path.join(image_dir, image_name) 49 | dir_path = os.path.dirname(save_path) 50 | Path(dir_path).mkdir(parents=True, exist_ok=True) # create subdirs 51 | 52 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 53 | ims.append(image_name) 54 | txts.append(label) 55 | links.append(image_name) 56 | webpage.add_images(ims, txts, links, width=width) 57 | 58 | def save_images_predict(webpage, visuals, image_path, aspect_ratio=1.0, width=256): 59 | """Save images to the disk in slippymap format. 60 | 61 | Parameters: 62 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) 63 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs 64 | image_path (str) -- the string is used to create image paths 65 | aspect_ratio (float) -- the aspect ratio of saved images 66 | width (int) -- the images will be resized to width x width 67 | 68 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. 69 | """ 70 | image_dir = webpage.get_image_dir() 71 | 72 | name = image_path[0][-18:] #find slippymap dir from source 73 | webpage.add_header(name) 74 | ims, txts, links = [], [], [] 75 | 76 | for label, im_data in visuals.items(): 77 | im = util.tensor2im(im_data) 78 | if label == 'real': 79 | image_name = 'input\\' + str(name) 80 | if label == 'fake': 81 | image_name = 'fake\\' + str(name) 82 | save_path = os.path.join(image_dir, image_name) 83 | dir_path = os.path.dirname(save_path) 84 | Path(dir_path).mkdir(parents=True, exist_ok=True) # create subdirs 85 | 86 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 87 | ims.append(image_name) 88 | txts.append(label) 89 | links.append(image_name) 90 | webpage.add_images(ims, txts, links, width=width) 91 | 92 | class Visualizer(): 93 | """This class includes several functions that can display/save images and print/save logging information. 94 | 95 | It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. 96 | """ 97 | 98 | def __init__(self, opt): 99 | """Initialize the Visualizer class 100 | 101 | Parameters: 102 | opt -- stores all the experiment flags; needs to be a subclass of BaseOptions 103 | Step 1: Cache the training/test options 104 | Step 2: connect to a visdom server 105 | Step 3: create an HTML object for saveing HTML filters 106 | Step 4: create a logging file to store training losses 107 | """ 108 | self.opt = opt # cache the option 109 | self.display_id = opt.display_id 110 | self.use_html = opt.isTrain and not opt.no_html 111 | self.win_size = opt.display_winsize 112 | self.name = opt.name 113 | self.port = opt.display_port 114 | self.saved = False 115 | if self.display_id > 0: # connect to a visdom server given and 116 | import visdom 117 | self.ncols = opt.display_ncols 118 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) 119 | if not self.vis.check_connection(): 120 | self.create_visdom_connections() 121 | 122 | if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ 123 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 124 | self.img_dir = os.path.join(self.web_dir, 'images') 125 | print('create web directory %s...' % self.web_dir) 126 | util.mkdirs([self.web_dir, self.img_dir]) 127 | # create a logging file to store training losses 128 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 129 | with open(self.log_name, "a") as log_file: 130 | now = time.strftime("%c") 131 | log_file.write('================ Training Loss (%s) ================\n' % now) 132 | 133 | def reset(self): 134 | """Reset the self.saved status""" 135 | self.saved = False 136 | 137 | def create_visdom_connections(self): 138 | """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ 139 | cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port 140 | print('\n\nCould not connect to Visdom server. \n Trying to start a server....') 141 | print('Command: %s' % cmd) 142 | Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) 143 | 144 | def display_current_results(self, visuals, epoch, save_result): 145 | """Display current results on visdom; save current results to an HTML file. 146 | 147 | Parameters: 148 | visuals (OrderedDict) - - dictionary of images to display or save 149 | epoch (int) - - the current epoch 150 | save_result (bool) - - if save the current results to an HTML file 151 | """ 152 | if self.display_id > 0: # show images in the browser using visdom 153 | ncols = self.ncols 154 | if ncols > 0: # show all the images in one visdom panel 155 | ncols = min(ncols, len(visuals)) 156 | h, w = next(iter(visuals.values())).shape[:2] 157 | table_css = """""" % (w, h) # create a table css 161 | # create a table of images. 162 | title = self.name 163 | label_html = '' 164 | label_html_row = '' 165 | images = [] 166 | idx = 0 167 | for label, image in visuals.items(): 168 | image_numpy = util.tensor2im(image) 169 | label_html_row += '%s' % label 170 | images.append(image_numpy.transpose([2, 0, 1])) 171 | idx += 1 172 | if idx % ncols == 0: 173 | label_html += '%s' % label_html_row 174 | label_html_row = '' 175 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 176 | while idx % ncols != 0: 177 | images.append(white_image) 178 | label_html_row += '' 179 | idx += 1 180 | if label_html_row != '': 181 | label_html += '%s' % label_html_row 182 | try: 183 | self.vis.images(images, nrow=ncols, win=self.display_id + 1, 184 | padding=2, opts=dict(title=title + ' images')) 185 | label_html = '%s
' % label_html 186 | self.vis.text(table_css + label_html, win=self.display_id + 2, 187 | opts=dict(title=title + ' labels')) 188 | except VisdomExceptionBase: 189 | self.create_visdom_connections() 190 | 191 | else: # show each image in a separate visdom panel; 192 | idx = 1 193 | try: 194 | for label, image in visuals.items(): 195 | image_numpy = util.tensor2im(image) 196 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), 197 | win=self.display_id + idx) 198 | idx += 1 199 | except VisdomExceptionBase: 200 | self.create_visdom_connections() 201 | 202 | if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. 203 | self.saved = True 204 | # save images to the disk 205 | for label, image in visuals.items(): 206 | image_numpy = util.tensor2im(image) 207 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) 208 | util.save_image(image_numpy, img_path) 209 | 210 | # update website 211 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) 212 | for n in range(epoch, 0, -1): 213 | webpage.add_header('epoch [%d]' % n) 214 | ims, txts, links = [], [], [] 215 | 216 | for label, image_numpy in visuals.items(): 217 | image_numpy = util.tensor2im(image) 218 | img_path = 'epoch%.3d_%s.png' % (n, label) 219 | ims.append(img_path) 220 | txts.append(label) 221 | links.append(img_path) 222 | webpage.add_images(ims, txts, links, width=self.win_size) 223 | webpage.save() 224 | 225 | def plot_current_losses(self, epoch, counter_ratio, losses): 226 | """display the current losses on visdom display: dictionary of error labels and values 227 | 228 | Parameters: 229 | epoch (int) -- current epoch 230 | counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 231 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 232 | """ 233 | if not hasattr(self, 'plot_data'): 234 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} 235 | self.plot_data['X'].append(epoch + counter_ratio) 236 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) 237 | try: 238 | self.vis.line( 239 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), 240 | Y=np.array(self.plot_data['Y']), 241 | opts={ 242 | 'title': self.name + ' loss over time', 243 | 'legend': self.plot_data['legend'], 244 | 'xlabel': 'epoch', 245 | 'ylabel': 'loss'}, 246 | win=self.display_id) 247 | except VisdomExceptionBase: 248 | self.create_visdom_connections() 249 | 250 | # losses: same format as |losses| of plot_current_losses 251 | def print_current_losses(self, epoch, iters, losses, t_comp, t_data): 252 | """print current losses on console; also save the losses to the disk 253 | 254 | Parameters: 255 | epoch (int) -- current epoch 256 | iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) 257 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 258 | t_comp (float) -- computational time per data point (normalized by batch_size) 259 | t_data (float) -- data loading time per data point (normalized by batch_size) 260 | """ 261 | message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) 262 | for k, v in losses.items(): 263 | message += '%s: %.3f ' % (k, v) 264 | 265 | print(message) # print the message 266 | with open(self.log_name, "a") as log_file: 267 | log_file.write('%s\n' % message) # save the message 268 | -------------------------------------------------------------------------------- /models/networks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | import functools 5 | from torch.optim import lr_scheduler 6 | 7 | 8 | ############################################################################### 9 | # Helper Functions 10 | ############################################################################### 11 | 12 | 13 | class Identity(nn.Module): 14 | def forward(self, x): 15 | return x 16 | 17 | 18 | def get_norm_layer(norm_type='instance'): 19 | """Return a normalization layer 20 | 21 | Parameters: 22 | norm_type (str) -- the name of the normalization layer: batch | instance | none 23 | 24 | For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). 25 | For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. 26 | """ 27 | if norm_type == 'batch': 28 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) 29 | elif norm_type == 'instance': 30 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) 31 | elif norm_type == 'none': 32 | def norm_layer(x): return Identity() 33 | else: 34 | raise NotImplementedError('normalization layer [%s] is not found' % norm_type) 35 | return norm_layer 36 | 37 | 38 | def get_scheduler(optimizer, opt): 39 | """Return a learning rate scheduler 40 | 41 | Parameters: 42 | optimizer -- the optimizer of the network 43 | opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  44 | opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine 45 | 46 | For 'linear', we keep the same learning rate for the first epochs 47 | and linearly decay the rate to zero over the next epochs. 48 | For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. 49 | See https://pytorch.org/docs/stable/optim.html for more details. 50 | """ 51 | if opt.lr_policy == 'linear': 52 | def lambda_rule(epoch): 53 | lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) 54 | return lr_l 55 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 56 | elif opt.lr_policy == 'step': 57 | scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) 58 | elif opt.lr_policy == 'plateau': 59 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 60 | elif opt.lr_policy == 'cosine': 61 | scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) 62 | else: 63 | return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) 64 | return scheduler 65 | 66 | 67 | def init_weights(net, init_type='normal', init_gain=0.02): 68 | """Initialize network weights. 69 | 70 | Parameters: 71 | net (network) -- network to be initialized 72 | init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal 73 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 74 | 75 | We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might 76 | work better for some applications. Feel free to try yourself. 77 | """ 78 | def init_func(m): # define the initialization function 79 | classname = m.__class__.__name__ 80 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 81 | if init_type == 'normal': 82 | init.normal_(m.weight.data, 0.0, init_gain) 83 | elif init_type == 'xavier': 84 | init.xavier_normal_(m.weight.data, gain=init_gain) 85 | elif init_type == 'kaiming': 86 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 87 | elif init_type == 'orthogonal': 88 | init.orthogonal_(m.weight.data, gain=init_gain) 89 | else: 90 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 91 | if hasattr(m, 'bias') and m.bias is not None: 92 | init.constant_(m.bias.data, 0.0) 93 | elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. 94 | init.normal_(m.weight.data, 1.0, init_gain) 95 | init.constant_(m.bias.data, 0.0) 96 | 97 | print('initialize network with %s' % init_type) 98 | net.apply(init_func) # apply the initialization function 99 | 100 | 101 | def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): 102 | """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights 103 | Parameters: 104 | net (network) -- the network to be initialized 105 | init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal 106 | gain (float) -- scaling factor for normal, xavier and orthogonal. 107 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 108 | 109 | Return an initialized network. 110 | """ 111 | if len(gpu_ids) > 0: 112 | assert(torch.cuda.is_available()) 113 | net.to(gpu_ids[0]) 114 | net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs 115 | init_weights(net, init_type, init_gain=init_gain) 116 | return net 117 | 118 | 119 | def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): 120 | """Create a generator 121 | 122 | Parameters: 123 | input_nc (int) -- the number of channels in input images 124 | output_nc (int) -- the number of channels in output images 125 | ngf (int) -- the number of filters in the last conv layer 126 | netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 127 | norm (str) -- the name of normalization layers used in the network: batch | instance | none 128 | use_dropout (bool) -- if use dropout layers. 129 | init_type (str) -- the name of our initialization method. 130 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 131 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 132 | 133 | Returns a generator 134 | 135 | Our current implementation provides two types of generators: 136 | U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) 137 | The original U-Net paper: https://arxiv.org/abs/1505.04597 138 | 139 | Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) 140 | Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. 141 | We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). 142 | 143 | 144 | The generator has been initialized by . It uses RELU for non-linearity. 145 | """ 146 | net = None 147 | norm_layer = get_norm_layer(norm_type=norm) 148 | 149 | if netG == 'resnet_9blocks': 150 | net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) 151 | elif netG == 'resnet_6blocks': 152 | net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) 153 | elif netG == 'unet_128': 154 | net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) 155 | elif netG == 'unet_256': 156 | net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) 157 | else: 158 | raise NotImplementedError('Generator model name [%s] is not recognized' % netG) 159 | return init_net(net, init_type, init_gain, gpu_ids) 160 | 161 | 162 | def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): 163 | """Create a discriminator 164 | 165 | Parameters: 166 | input_nc (int) -- the number of channels in input images 167 | ndf (int) -- the number of filters in the first conv layer 168 | netD (str) -- the architecture's name: basic | n_layers | pixel 169 | n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' 170 | norm (str) -- the type of normalization layers used in the network. 171 | init_type (str) -- the name of the initialization method. 172 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 173 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 174 | 175 | Returns a discriminator 176 | 177 | Our current implementation provides three types of discriminators: 178 | [basic]: 'PatchGAN' classifier described in the original pix2pix paper. 179 | It can classify whether 70×70 overlapping patches are real or fake. 180 | Such a patch-level discriminator architecture has fewer parameters 181 | than a full-image discriminator and can work on arbitrarily-sized images 182 | in a fully convolutional fashion. 183 | 184 | [n_layers]: With this mode, you can specify the number of conv layers in the discriminator 185 | with the parameter (default=3 as used in [basic] (PatchGAN).) 186 | 187 | [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. 188 | It encourages greater color diversity but has no effect on spatial statistics. 189 | 190 | The discriminator has been initialized by . It uses Leakly RELU for non-linearity. 191 | """ 192 | net = None 193 | norm_layer = get_norm_layer(norm_type=norm) 194 | 195 | if netD == 'basic': # default PatchGAN classifier 196 | net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) 197 | elif netD == 'n_layers': # more options 198 | net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) 199 | elif netD == 'pixel': # classify if each pixel is real or fake 200 | net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) 201 | else: 202 | raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) 203 | return init_net(net, init_type, init_gain, gpu_ids) 204 | 205 | 206 | ############################################################################## 207 | # Classes 208 | ############################################################################## 209 | class GANLoss(nn.Module): 210 | """Define different GAN objectives. 211 | 212 | The GANLoss class abstracts away the need to create the target label tensor 213 | that has the same size as the input. 214 | """ 215 | 216 | def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): 217 | """ Initialize the GANLoss class. 218 | 219 | Parameters: 220 | gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. 221 | target_real_label (bool) - - label for a real image 222 | target_fake_label (bool) - - label of a fake image 223 | 224 | Note: Do not use sigmoid as the last layer of Discriminator. 225 | LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. 226 | """ 227 | super(GANLoss, self).__init__() 228 | self.register_buffer('real_label', torch.tensor(target_real_label)) 229 | self.register_buffer('fake_label', torch.tensor(target_fake_label)) 230 | self.gan_mode = gan_mode 231 | if gan_mode == 'lsgan': 232 | self.loss = nn.MSELoss() 233 | elif gan_mode == 'vanilla': 234 | self.loss = nn.BCEWithLogitsLoss() 235 | elif gan_mode in ['wgangp']: 236 | self.loss = None 237 | else: 238 | raise NotImplementedError('gan mode %s not implemented' % gan_mode) 239 | 240 | def get_target_tensor(self, prediction, target_is_real): 241 | """Create label tensors with the same size as the input. 242 | 243 | Parameters: 244 | prediction (tensor) - - tpyically the prediction from a discriminator 245 | target_is_real (bool) - - if the ground truth label is for real images or fake images 246 | 247 | Returns: 248 | A label tensor filled with ground truth label, and with the size of the input 249 | """ 250 | 251 | if target_is_real: 252 | target_tensor = self.real_label 253 | else: 254 | target_tensor = self.fake_label 255 | return target_tensor.expand_as(prediction) 256 | 257 | def __call__(self, prediction, target_is_real): 258 | """Calculate loss given Discriminator's output and grount truth labels. 259 | 260 | Parameters: 261 | prediction (tensor) - - tpyically the prediction output from a discriminator 262 | target_is_real (bool) - - if the ground truth label is for real images or fake images 263 | 264 | Returns: 265 | the calculated loss. 266 | """ 267 | if self.gan_mode in ['lsgan', 'vanilla']: 268 | target_tensor = self.get_target_tensor(prediction, target_is_real) 269 | loss = self.loss(prediction, target_tensor) 270 | elif self.gan_mode == 'wgangp': 271 | if target_is_real: 272 | loss = -prediction.mean() 273 | else: 274 | loss = prediction.mean() 275 | return loss 276 | 277 | 278 | def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): 279 | """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 280 | 281 | Arguments: 282 | netD (network) -- discriminator network 283 | real_data (tensor array) -- real images 284 | fake_data (tensor array) -- generated images from the generator 285 | device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') 286 | type (str) -- if we mix real and fake data or not [real | fake | mixed]. 287 | constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2 288 | lambda_gp (float) -- weight for this loss 289 | 290 | Returns the gradient penalty loss 291 | """ 292 | if lambda_gp > 0.0: 293 | if type == 'real': # either use real images, fake images, or a linear interpolation of two. 294 | interpolatesv = real_data 295 | elif type == 'fake': 296 | interpolatesv = fake_data 297 | elif type == 'mixed': 298 | alpha = torch.rand(real_data.shape[0], 1, device=device) 299 | alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) 300 | interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) 301 | else: 302 | raise NotImplementedError('{} not implemented'.format(type)) 303 | interpolatesv.requires_grad_(True) 304 | disc_interpolates = netD(interpolatesv) 305 | gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, 306 | grad_outputs=torch.ones(disc_interpolates.size()).to(device), 307 | create_graph=True, retain_graph=True, only_inputs=True) 308 | gradients = gradients[0].view(real_data.size(0), -1) # flat the data 309 | gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps 310 | return gradient_penalty, gradients 311 | else: 312 | return 0.0, None 313 | 314 | 315 | class ResnetGenerator(nn.Module): 316 | """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. 317 | 318 | We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) 319 | """ 320 | 321 | def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): 322 | """Construct a Resnet-based generator 323 | 324 | Parameters: 325 | input_nc (int) -- the number of channels in input images 326 | output_nc (int) -- the number of channels in output images 327 | ngf (int) -- the number of filters in the last conv layer 328 | norm_layer -- normalization layer 329 | use_dropout (bool) -- if use dropout layers 330 | n_blocks (int) -- the number of ResNet blocks 331 | padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero 332 | """ 333 | assert(n_blocks >= 0) 334 | super(ResnetGenerator, self).__init__() 335 | if type(norm_layer) == functools.partial: 336 | use_bias = norm_layer.func == nn.InstanceNorm2d 337 | else: 338 | use_bias = norm_layer == nn.InstanceNorm2d 339 | 340 | model = [nn.ReflectionPad2d(3), 341 | nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), 342 | norm_layer(ngf), 343 | nn.ReLU(True)] 344 | 345 | n_downsampling = 2 346 | for i in range(n_downsampling): # add downsampling layers 347 | mult = 2 ** i 348 | model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), 349 | norm_layer(ngf * mult * 2), 350 | nn.ReLU(True)] 351 | 352 | mult = 2 ** n_downsampling 353 | for i in range(n_blocks): # add ResNet blocks 354 | 355 | model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] 356 | 357 | for i in range(n_downsampling): # add upsampling layers 358 | mult = 2 ** (n_downsampling - i) 359 | model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), 360 | kernel_size=3, stride=2, 361 | padding=1, output_padding=1, 362 | bias=use_bias), 363 | norm_layer(int(ngf * mult / 2)), 364 | nn.ReLU(True)] 365 | model += [nn.ReflectionPad2d(3)] 366 | model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] 367 | model += [nn.Tanh()] 368 | 369 | self.model = nn.Sequential(*model) 370 | 371 | def forward(self, input): 372 | """Standard forward""" 373 | return self.model(input) 374 | 375 | 376 | class ResnetBlock(nn.Module): 377 | """Define a Resnet block""" 378 | 379 | def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): 380 | """Initialize the Resnet block 381 | 382 | A resnet block is a conv block with skip connections 383 | We construct a conv block with build_conv_block function, 384 | and implement skip connections in function. 385 | Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf 386 | """ 387 | super(ResnetBlock, self).__init__() 388 | self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) 389 | 390 | def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): 391 | """Construct a convolutional block. 392 | 393 | Parameters: 394 | dim (int) -- the number of channels in the conv layer. 395 | padding_type (str) -- the name of padding layer: reflect | replicate | zero 396 | norm_layer -- normalization layer 397 | use_dropout (bool) -- if use dropout layers. 398 | use_bias (bool) -- if the conv layer uses bias or not 399 | 400 | Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) 401 | """ 402 | conv_block = [] 403 | p = 0 404 | if padding_type == 'reflect': 405 | conv_block += [nn.ReflectionPad2d(1)] 406 | elif padding_type == 'replicate': 407 | conv_block += [nn.ReplicationPad2d(1)] 408 | elif padding_type == 'zero': 409 | p = 1 410 | else: 411 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 412 | 413 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] 414 | if use_dropout: 415 | conv_block += [nn.Dropout(0.5)] 416 | 417 | p = 0 418 | if padding_type == 'reflect': 419 | conv_block += [nn.ReflectionPad2d(1)] 420 | elif padding_type == 'replicate': 421 | conv_block += [nn.ReplicationPad2d(1)] 422 | elif padding_type == 'zero': 423 | p = 1 424 | else: 425 | raise NotImplementedError('padding [%s] is not implemented' % padding_type) 426 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] 427 | 428 | return nn.Sequential(*conv_block) 429 | 430 | def forward(self, x): 431 | """Forward function (with skip connections)""" 432 | out = x + self.conv_block(x) # add skip connections 433 | return out 434 | 435 | 436 | class UnetGenerator(nn.Module): 437 | """Create a Unet-based generator""" 438 | 439 | def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): 440 | """Construct a Unet generator 441 | Parameters: 442 | input_nc (int) -- the number of channels in input images 443 | output_nc (int) -- the number of channels in output images 444 | num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, 445 | image of size 128x128 will become of size 1x1 # at the bottleneck 446 | ngf (int) -- the number of filters in the last conv layer 447 | norm_layer -- normalization layer 448 | 449 | We construct the U-Net from the innermost layer to the outermost layer. 450 | It is a recursive process. 451 | """ 452 | super(UnetGenerator, self).__init__() 453 | # construct unet structure 454 | unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer 455 | for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters 456 | unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) 457 | # gradually reduce the number of filters from ngf * 8 to ngf 458 | unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) 459 | unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) 460 | unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) 461 | self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer 462 | 463 | def forward(self, input): 464 | """Standard forward""" 465 | return self.model(input) 466 | 467 | 468 | class UnetSkipConnectionBlock(nn.Module): 469 | """Defines the Unet submodule with skip connection. 470 | X -------------------identity---------------------- 471 | |-- downsampling -- |submodule| -- upsampling --| 472 | """ 473 | 474 | def __init__(self, outer_nc, inner_nc, input_nc=None, 475 | submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): 476 | """Construct a Unet submodule with skip connections. 477 | 478 | Parameters: 479 | outer_nc (int) -- the number of filters in the outer conv layer 480 | inner_nc (int) -- the number of filters in the inner conv layer 481 | input_nc (int) -- the number of channels in input images/features 482 | submodule (UnetSkipConnectionBlock) -- previously defined submodules 483 | outermost (bool) -- if this module is the outermost module 484 | innermost (bool) -- if this module is the innermost module 485 | norm_layer -- normalization layer 486 | use_dropout (bool) -- if use dropout layers. 487 | """ 488 | super(UnetSkipConnectionBlock, self).__init__() 489 | self.outermost = outermost 490 | if type(norm_layer) == functools.partial: 491 | use_bias = norm_layer.func == nn.InstanceNorm2d 492 | else: 493 | use_bias = norm_layer == nn.InstanceNorm2d 494 | if input_nc is None: 495 | input_nc = outer_nc 496 | downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, 497 | stride=2, padding=1, bias=use_bias) 498 | downrelu = nn.LeakyReLU(0.2, True) 499 | downnorm = norm_layer(inner_nc) 500 | uprelu = nn.ReLU(True) 501 | upnorm = norm_layer(outer_nc) 502 | 503 | if outermost: 504 | upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, 505 | kernel_size=4, stride=2, 506 | padding=1) 507 | down = [downconv] 508 | up = [uprelu, upconv, nn.Tanh()] 509 | model = down + [submodule] + up 510 | elif innermost: 511 | upconv = nn.ConvTranspose2d(inner_nc, outer_nc, 512 | kernel_size=4, stride=2, 513 | padding=1, bias=use_bias) 514 | down = [downrelu, downconv] 515 | up = [uprelu, upconv, upnorm] 516 | model = down + up 517 | else: 518 | upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, 519 | kernel_size=4, stride=2, 520 | padding=1, bias=use_bias) 521 | down = [downrelu, downconv, downnorm] 522 | up = [uprelu, upconv, upnorm] 523 | 524 | if use_dropout: 525 | model = down + [submodule] + up + [nn.Dropout(0.5)] 526 | else: 527 | model = down + [submodule] + up 528 | 529 | self.model = nn.Sequential(*model) 530 | 531 | def forward(self, x): 532 | if self.outermost: 533 | return self.model(x) 534 | else: # add skip connections 535 | return torch.cat([x, self.model(x)], 1) 536 | 537 | 538 | class NLayerDiscriminator(nn.Module): 539 | """Defines a PatchGAN discriminator""" 540 | 541 | def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): 542 | """Construct a PatchGAN discriminator 543 | 544 | Parameters: 545 | input_nc (int) -- the number of channels in input images 546 | ndf (int) -- the number of filters in the last conv layer 547 | n_layers (int) -- the number of conv layers in the discriminator 548 | norm_layer -- normalization layer 549 | """ 550 | super(NLayerDiscriminator, self).__init__() 551 | if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters 552 | use_bias = norm_layer.func == nn.InstanceNorm2d 553 | else: 554 | use_bias = norm_layer == nn.InstanceNorm2d 555 | 556 | kw = 4 557 | padw = 1 558 | sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] 559 | nf_mult = 1 560 | nf_mult_prev = 1 561 | for n in range(1, n_layers): # gradually increase the number of filters 562 | nf_mult_prev = nf_mult 563 | nf_mult = min(2 ** n, 8) 564 | sequence += [ 565 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), 566 | norm_layer(ndf * nf_mult), 567 | nn.LeakyReLU(0.2, True) 568 | ] 569 | 570 | nf_mult_prev = nf_mult 571 | nf_mult = min(2 ** n_layers, 8) 572 | sequence += [ 573 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), 574 | norm_layer(ndf * nf_mult), 575 | nn.LeakyReLU(0.2, True) 576 | ] 577 | 578 | sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map 579 | self.model = nn.Sequential(*sequence) 580 | 581 | def forward(self, input): 582 | """Standard forward.""" 583 | return self.model(input) 584 | 585 | 586 | class PixelDiscriminator(nn.Module): 587 | """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" 588 | 589 | def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): 590 | """Construct a 1x1 PatchGAN discriminator 591 | 592 | Parameters: 593 | input_nc (int) -- the number of channels in input images 594 | ndf (int) -- the number of filters in the last conv layer 595 | norm_layer -- normalization layer 596 | """ 597 | super(PixelDiscriminator, self).__init__() 598 | if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters 599 | use_bias = norm_layer.func == nn.InstanceNorm2d 600 | else: 601 | use_bias = norm_layer == nn.InstanceNorm2d 602 | 603 | self.net = [ 604 | nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), 605 | nn.LeakyReLU(0.2, True), 606 | nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), 607 | norm_layer(ndf * 2), 608 | nn.LeakyReLU(0.2, True), 609 | nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] 610 | 611 | self.net = nn.Sequential(*self.net) 612 | 613 | def forward(self, input): 614 | """Standard forward.""" 615 | return self.net(input) 616 | --------------------------------------------------------------------------------