├── TODO.sh ├── src ├── trainers │ ├── utils │ │ ├── face_morph │ │ │ ├── img │ │ │ │ ├── 00000-after.jpeg │ │ │ │ ├── 00000-before.jpeg │ │ │ │ ├── 00003-after.jpeg │ │ │ │ └── 00003-before.jpeg │ │ │ └── face_morph.py │ │ ├── init_utils.py │ │ ├── gan_utils.py │ │ └── report_utils.py │ ├── base_trainer.py │ ├── gan_trainer.py │ ├── cyclegan_trainer.py │ └── pairedcyclegan_trainer.py ├── dataset │ ├── data │ │ ├── utility.py │ │ ├── extract_faces_parallel.sh │ │ ├── split_images.py │ │ └── extract_faces.py │ ├── download_images_parallel.sh │ ├── search │ │ ├── pinterest │ │ │ └── extract_pinterest_urls.py │ │ ├── instagram │ │ │ ├── README.md │ │ │ ├── extract_instagram_urls.py │ │ │ └── test_urls.csv │ │ └── searcher.py │ ├── transforms.py │ ├── README.md │ ├── download_images.py │ └── dataset.py ├── models │ ├── pairedcyclegan.py │ ├── cyclegan.py │ ├── residual.py │ ├── style.py │ ├── maskgan.py │ └── dcgan.py ├── config.yaml └── train.py ├── .gitignore └── README.md /TODO.sh: -------------------------------------------------------------------------------- 1 | # bash TODO.sh 2 | find . -type f -name "*.py" | xargs grep -n --color "TODO" 3 | find . -type f -name "*.py" | xargs grep -n --color "XXX" -------------------------------------------------------------------------------- /src/trainers/utils/face_morph/img/00000-after.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zeligism/PairedCycleGAN/HEAD/src/trainers/utils/face_morph/img/00000-after.jpeg -------------------------------------------------------------------------------- /src/trainers/utils/face_morph/img/00000-before.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zeligism/PairedCycleGAN/HEAD/src/trainers/utils/face_morph/img/00000-before.jpeg -------------------------------------------------------------------------------- /src/trainers/utils/face_morph/img/00003-after.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zeligism/PairedCycleGAN/HEAD/src/trainers/utils/face_morph/img/00003-after.jpeg -------------------------------------------------------------------------------- /src/trainers/utils/face_morph/img/00003-before.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zeligism/PairedCycleGAN/HEAD/src/trainers/utils/face_morph/img/00003-before.jpeg -------------------------------------------------------------------------------- /src/dataset/data/utility.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def files_iter(directory): 4 | 5 | for fname in os.listdir(directory): 6 | 7 | if fname[0] == ".": 8 | continue # skip files starting with `.` 9 | if os.path.isdir(os.path.join(directory, fname)): 10 | continue # skip directories 11 | 12 | yield fname -------------------------------------------------------------------------------- /src/dataset/data/extract_faces_parallel.sh: -------------------------------------------------------------------------------- 1 | SOURCE_DIR=${1:-"instagram/test_images"} 2 | DEST_DIR=${2:-"instagram/test_faces"} 3 | LOG="extract_faces.log" 4 | 5 | mkdir -p "$DEST_DIR" 6 | rm -f "$LOG" 7 | 8 | time (find "$SOURCE_DIR" -maxdepth 1 -type f -exec basename {} \; \ 9 | | parallel --bar "python extract_faces.py --source_dir $SOURCE_DIR --dest_dir $DEST_DIR --image {} 1>>$LOG 2>&1") -------------------------------------------------------------------------------- /src/dataset/download_images_parallel.sh: -------------------------------------------------------------------------------- 1 | # I'm kinda embarrassed that the following shell script employing wget and 2 | # parallel blows my download_images.py out of the water. 3 | # Not sure why I even thought that writing that python script would be a good idea. 4 | 5 | # Download 6 | DOWNLOAD_DIR="downloaded_images" 7 | echo "Downloading images in $1" 8 | cat "$1" | parallel --gnu "wget {} -P ${DOWNLOAD_DIR}/" 9 | echo "Done." 10 | echo "" 11 | 12 | # Rename 13 | index=1 14 | for file in ${DOWNLOAD_DIR}/*; do 15 | index_name=$(printf "${DOWNLOAD_DIR}/%05d.jpg" $index) 16 | echo "Renaming $file to $index_name" 17 | mv "$file" "$index_name" 18 | ((index++)) 19 | done 20 | -------------------------------------------------------------------------------- /src/dataset/search/pinterest/extract_pinterest_urls.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | def main(): 4 | pre_p1, post_p1 = "3x, ", " 4x" 5 | pre_p2, post_p2 = '"orig": {"url": "', '", "width' 6 | p1 = re.compile("(?:"+pre_p1+")" + r"(.*?)" + "(?:"+post_p1+")") 7 | p2 = re.compile("(?:"+pre_p2+")" + r"(.*?)" + "(?:"+post_p2+")") 8 | 9 | image_urls = set() 10 | for i in range(1,6): 11 | pinterest_html = "html_sources/pinterest{}.html".format(i) 12 | with open(pinterest_html, "r") as f: 13 | html = f.read() 14 | image_urls |= set(p1.findall(html)) 15 | image_urls |= set(p2.findall(html)) 16 | 17 | with open("pinterest_urls.csv", "w") as f: 18 | f.writelines(image_url+"\n" for image_url in image_urls) 19 | 20 | 21 | 22 | if __name__ == '__main__': 23 | main() -------------------------------------------------------------------------------- /src/dataset/transforms.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class MakeupSampleTransform: 4 | """A wrapper around torch transforms for 5 | transforming samples from MakeupDataset.""" 6 | 7 | def __init__(self, transform): 8 | """ 9 | Initializes the transform. 10 | 11 | Args: 12 | transform: A transform (such as `torchvision.transforms.ToTensor()`). 13 | """ 14 | self.transform = transform 15 | 16 | def __call__(self, sample): 17 | """ 18 | Transforms the given sample. 19 | 20 | Args: 21 | sample: A sample from MakeupDataset to be transformed. 22 | 23 | Returns: 24 | The transform sample with `self.transform`. 25 | """ 26 | 27 | sample["before"] = self.transform(sample["before"]) 28 | sample["after"] = self.transform(sample["after"]) 29 | 30 | return sample 31 | 32 | -------------------------------------------------------------------------------- /src/dataset/search/instagram/README.md: -------------------------------------------------------------------------------- 1 | # Crawling posts from instagram hashtag feed 2 | 3 | Go to hastag crawler folder: 4 | cd third_party/instagram-hashtag-crawler 5 | 6 | Assuming the folder "hashtags" contains no feeds (if so, move them into a separate folder or delete them), we start crawling and group rawfeeds in a folder: 7 | ``` 8 | python __init__.py -u -p -f hashtag_files/.txt 9 | mkdir hashtags/ && mv hashtags/*.json hashtags/ 10 | ``` 11 | where `` could be either 'makeup' or 'nomakeup'. 12 | 13 | Now we go back and simply extract the urls from these rawfeeds (note that I edited the crawler in instagram-hastag-crawler to simply stop after getting the rawfeed without beautifying). The argument --hashtag-dir is "third_party/instagram-hashtag-crawler/hashtags" by default, change it as necessary. The command is: 14 | ``` 15 | python extract_instagram_urls.py -o "_urls.csv" 16 | ``` 17 | 18 | Now go back two directories and download the images using `download_images.sh` as follows: 19 | ``` 20 | ./download_images.sh "search/instagram/test_urls.csv" 21 | ``` 22 | -------------------------------------------------------------------------------- /src/dataset/README.md: -------------------------------------------------------------------------------- 1 | # Requirements for downloading and filtering utilities 2 | 3 | If you're using a mac, just do this: 4 | ``` 5 | brew install wget parallel fdupes 6 | ``` 7 | If you're not using a mac, well, tough luck buddy, you've gotta figure it out yourself. 8 | You're a grown-ass programmer by now and you should know how to do it. 9 | 10 | ## Why do I need these boring packages (aka where is my awesome deep learning stuff)? 11 | They're not boring, ok. They're wonderful. 12 | You need `wget` and `parallel` for `download_images_parallel.sh`, it's really much better than `download_images.py`. 13 | Though parallel downloads will result in duplicates that you'll have to remove later, 14 | which is why you also need `fdupes`, which is a very convenient tool for removing "dupes" (i.e. duplicates). 15 | 16 | I recommend using manual interactive deletion with `fdupes --delete path/to/images`. 17 | Just press 1 for each set, or use `SHIFT+RIGHT` and `SHIFT+LEFT` to tag for keeping and deletion. 18 | Pressing 1 and enter constantly is easier and faster. After you go through all sets of duplicates, type `prune` and then enter. 19 | Yeah, tedious work, I know, but you'll have to scan that dataset anyway, won't you? (You really should). -------------------------------------------------------------------------------- /src/models/pairedcyclegan.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | 4 | from .dcgan import DCGAN_Discriminator 5 | from .maskgan import MaskGAN 6 | 7 | 8 | class PairedCycleGAN(nn.Module): 9 | def __init__(self, 10 | num_features=64, 11 | image_channels=3, 12 | image_size=64, 13 | gan_type="gan", 14 | custom_remover=None, 15 | custom_applier=None, 16 | **kwargs): 17 | super().__init__() 18 | 19 | self.num_features = num_features 20 | self.image_channels = image_channels 21 | self.image_size = image_size 22 | self.gan_type = gan_type 23 | 24 | model_config = { 25 | "image_channels": image_channels, 26 | "num_features": num_features, 27 | "image_size": image_size, 28 | "gan_type": gan_type, 29 | } 30 | 31 | self.remover = custom_remover or MaskGAN(**model_config) 32 | self.applier = custom_applier or MaskGAN(**model_config, with_reference=True) 33 | self.style_D = StyleDiscriminator(**model_config) 34 | 35 | 36 | class StyleDiscriminator(DCGAN_Discriminator): 37 | def __init__(self, *args, **kwargs): 38 | 39 | kwargs["image_channels"] *= 2 # XXX: extract features from img first? 40 | super().__init__(*args, **kwargs) 41 | 42 | -------------------------------------------------------------------------------- /src/models/cyclegan.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | 4 | from .dcgan import DCGAN 5 | from .maskgan import MaskGAN 6 | 7 | 8 | class CycleGAN(nn.Module): 9 | def __init__(self, 10 | num_features=64, 11 | image_channels=3, 12 | image_size=64, 13 | gan_type="gan", 14 | **kwargs): 15 | super().__init__() 16 | 17 | self.num_features = num_features 18 | self.image_channels = image_channels 19 | self.image_size = image_size 20 | self.gan_type = gan_type 21 | 22 | model_config = { 23 | "image_channels": image_channels, 24 | "num_features": num_features, 25 | "image_size": image_size, 26 | "gan_type": gan_type, 27 | } 28 | 29 | self.applier = DCGAN(**model_config) 30 | self.remover = DCGAN(**model_config) 31 | 32 | 33 | class MaskCycleGAN(nn.Module): 34 | def __init__(self, 35 | num_features=64, 36 | image_channels=3, 37 | image_size=64, 38 | gan_type="gan", 39 | with_reference=False, 40 | **kwargs): 41 | super().__init__() 42 | 43 | 44 | self.num_features = num_features 45 | self.image_channels = image_channels 46 | self.image_size = image_size 47 | self.gan_type = gan_type 48 | 49 | model_config = { 50 | "image_channels": image_channels, 51 | "num_features": num_features, 52 | "image_size": image_size, 53 | "gan_type": gan_type, 54 | "with_reference": with_reference, 55 | } 56 | 57 | self.applier = MaskGAN(**model_config) 58 | self.remover = MaskGAN(**model_config) 59 | 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | 107 | # My Stuff 108 | .DS_Store 109 | .ipynb 110 | secret* 111 | test*.py 112 | test*.ipynb 113 | searcher.json 114 | image_urls.csv 115 | *.html 116 | 117 | # Dataset 118 | src/dataset/data/*/ 119 | 120 | # Results 121 | results/ 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/dataset/search/instagram/extract_instagram_urls.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import glob 4 | import json 5 | import argparse 6 | 7 | HASHTAG_DIR="third_party/instagram-hashtag-crawler/hashtags" 8 | IMAGE_URL_CSV="instagram_urls.csv" 9 | LOW_RES=1 10 | HIGH_RES=0 11 | 12 | def get_post_image_urls(post, res=HIGH_RES): 13 | if "image_versions2" in post: 14 | return [post["image_versions2"]["candidates"][res]["url"]] 15 | elif "carousel_media" in post: 16 | return [subpost["image_versions2"]["candidates"][res]["url"] 17 | for subpost in post["carousel_media"]] 18 | else: 19 | return [] 20 | 21 | def main(args): 22 | # Get JSON files in hashtag-crawling results directory 23 | json_files = os.path.join(args.hashtag_dir, "*.json") 24 | hashtag_json_fs = [f for f in glob.glob(json_files) if "rawfeed" in f] 25 | 26 | # Get the posts from each hashtag crawl file 27 | hashtag_posts = [] 28 | for hashtag_json_f in hashtag_json_fs: 29 | with open(hashtag_json_f, "r") as json_f: 30 | posts = json.load(json_f) 31 | hashtag_posts.append(posts) 32 | 33 | # Get the image url of each post 34 | image_urls = [get_post_image_urls(post) for posts in hashtag_posts for post in posts] 35 | image_urls = [url for post_urls in image_urls for url in post_urls] # flatten 36 | 37 | # Write the urls in the output file 38 | with open(args.out, "w") as f: 39 | f.writelines(image_url+"\n" for image_url in image_urls) 40 | 41 | if __name__ == '__main__': 42 | 43 | parser = argparse.ArgumentParser( 44 | description="Extract URLs of images from the JSON crawl files of instagram-hashtag-crawler.") 45 | 46 | parser.add_argument("--hashtag-dir", type=str, default=HASHTAG_DIR, 47 | help="directory containing hashtag crawling results.") 48 | parser.add_argument("-o", "--out", type=str, default=IMAGE_URL_CSV, 49 | help="text file containing image urls.") 50 | 51 | args = parser.parse_args() 52 | 53 | main(args) 54 | 55 | -------------------------------------------------------------------------------- /src/trainers/utils/init_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | def create_weights_init(conv_std=0.01, batchnorm_std=0.01): 7 | """ 8 | A function that returns the weights initialization function for a net, 9 | which can be used as `net.apply(create_weights_init())`, for example. 10 | 11 | Args: 12 | conv_std: the standard deviation of the conv/up-conv layers. 13 | batchnorm_std: the standard deviation of the batch-norm layers. 14 | """ 15 | 16 | def weights_init(module): 17 | classname = module.__class__.__name__ 18 | if classname.find('Conv') != -1: 19 | nn.init.normal_(module.weight.data, 0.0, conv_std) 20 | elif classname.find('BatchNorm') != -1: 21 | nn.init.normal_(module.weight.data, 1.0, batchnorm_std) 22 | nn.init.constant_(module.bias.data, 0) 23 | 24 | def weights_init_kaiming(module): 25 | if isinstance(module, nn.Conv2d): 26 | #nn.init.kaiming_normal_(module.weight, nonlinearity="leaky_relu") 27 | nn.init.normal_(module.weight, 0.0, conv_std) 28 | elif isinstance(module, nn.ConvTranspose2d): 29 | nn.init.kaiming_normal_(module.weight, nonlinearity="relu") 30 | elif isinstance(module, nn.BatchNorm2d): 31 | nn.init.constant_(module.weight, 1) 32 | nn.init.constant_(module.bias, 0) 33 | 34 | return weights_init_kaiming 35 | 36 | 37 | def init_optim(params, optim_choice="sgd", lr=1e-4, momentum=0.0, betas=(0.9, 0.999)): 38 | """ 39 | Initializes the optimizer. 40 | 41 | Args: 42 | params: Parameters the optimizer will optimize. 43 | choice: The choice of the optimizer. 44 | optim_configs: Configurations for the optimizer. 45 | 46 | Returns: 47 | The optimizer (torch.optim). 48 | """ 49 | 50 | if optim_choice == "adam": 51 | optim = torch.optim.Adam(params, lr=lr, betas=betas) 52 | elif optim_choice == "adamw": 53 | optim = torch.optim.AdamW(params, lr=lr, betas=betas) 54 | elif optim_choice == "rmsprop": 55 | optim = torch.optim.RMSprop(params, lr=lr) 56 | elif optim_choice == "sgd": 57 | optim = torch.optim.SGD(params, lr=lr, momentum=momentum) 58 | else: 59 | raise ValueError(f"Optimizer '{optim_choice}' not recognized.") 60 | 61 | return optim 62 | 63 | -------------------------------------------------------------------------------- /src/models/residual.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .style import ChannelNoise 6 | 7 | 8 | class ResidualBlock(nn.Module): 9 | 10 | def __init__(self, 11 | in_channels, 12 | out_channels, 13 | dilation=(1,1), 14 | downsample=None, 15 | dropout_p=0.0): 16 | super().__init__() 17 | 18 | self.dilation = dilation 19 | self.downsample = downsample 20 | 21 | self.main = nn.Sequential( 22 | ### Conv 3x3 ### 23 | nn.Conv2d(in_channels, out_channels, 3, 24 | padding=dilation[0], dilation=dilation[0], bias=False), 25 | nn.BatchNorm2d(out_channels), 26 | nn.ReLU(), 27 | ChannelNoise(out_channels), 28 | ### Conv 3x3 ### 29 | nn.Conv2d(out_channels, out_channels, 3, 30 | padding=dilation[1], dilation=dilation[1], bias=False), 31 | nn.BatchNorm2d(out_channels), 32 | ) 33 | 34 | 35 | def forward(self, x): 36 | 37 | residual = x if self.downsample is None else self.downsample(x) 38 | 39 | return F.relu(self.main(x) + residual) 40 | 41 | 42 | class ResidualBottleneck(nn.Module): 43 | 44 | def __init__(self, 45 | in_channels, 46 | out_channels, 47 | downsample=None, 48 | dilation=1, 49 | dropout_p=0.0): 50 | super().__init__() 51 | 52 | self.downsample = downsample 53 | self.dilation = dilation 54 | 55 | self.main = nn.Sequential( 56 | 57 | ### Conv 1x1 ### 58 | nn.Conv2d(in_channels, out_channels, 1, bias=False), 59 | nn.BatchNorm2d(out_channels), 60 | nn.ReLU(), 61 | nn.Dropout(p=dropout_p), 62 | 63 | ### Conv 3x3 ### 64 | nn.Conv2d(out_channels, out_channels, 3, 65 | padding=dilation[1], dilation=dilation[1], bias=False), 66 | nn.BatchNorm2d(out_channels), 67 | nn.ReLU(), 68 | nn.Dropout(p=dropout_p), 69 | 70 | ### Conv 1x1 ### 71 | nn.Conv2d(out_channels, out_channels * 4, 1, bias=False), 72 | nn.BatchNorm2d(out_channels * 4), 73 | ) 74 | 75 | 76 | def forward(self, x): 77 | 78 | residual = x if self.downsample is None else self.downsample(x) 79 | 80 | return F.relu(self.main(x) + residual) 81 | 82 | -------------------------------------------------------------------------------- /src/trainers/utils/gan_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn.functional as F 4 | 5 | 6 | def get_D_loss(gan_type="gan"): 7 | if gan_type in ("gan", "gan-gp"): 8 | return D_loss_GAN 9 | elif gan_type in ("wgan", "wgan-gp"): 10 | return D_loss_WGAN 11 | else: 12 | raise ValueError(f"gan_type {gan_type} not supported") 13 | 14 | 15 | def get_G_loss(gan_type="gan"): 16 | if gan_type in ("gan", "gan-gp"): 17 | return G_loss_GAN 18 | elif gan_type in ("wgan", "wgan-gp"): 19 | return G_loss_WGAN 20 | else: 21 | raise ValueError(f"gan_type {gan_type} not supported") 22 | 23 | 24 | def D_loss_GAN(D_on_real, D_on_fake, label_smoothing=True): 25 | 26 | # Create (noisy) real and fake labels XXX 27 | if label_smoothing: 28 | real_label = 0.7 + 0.5 * torch.rand_like(D_on_real) 29 | else: 30 | real_label = torch.ones_like(D_on_real) - 0.1 31 | fake_label = torch.zeros_like(D_on_fake) 32 | 33 | # Calculate binary cross entropy loss 34 | D_loss_on_real = F.binary_cross_entropy(D_on_real, real_label) 35 | D_loss_on_fake = F.binary_cross_entropy(D_on_fake, fake_label) 36 | 37 | # Loss is: - log(D(x)) - log(1 - D(x_g)), 38 | # which is equiv. to maximizing: log(D(x)) + log(1 - D(x_g)) 39 | D_loss = D_loss_on_real + D_loss_on_fake 40 | 41 | return D_loss.mean() 42 | 43 | 44 | def D_loss_WGAN(D_on_real, D_on_fake, grad_penalty=0.0): 45 | 46 | # Maximize: D(x) - D(x_g) - const * (|| grad of D(x_i) wrt x_i || - 1)^2, 47 | # where x_i <- eps * x + (1 - eps) * x_g, and eps ~ rand(0,1) 48 | D_loss = -1 * (D_on_real - D_on_fake - grad_penalty) 49 | 50 | return D_loss.mean() 51 | 52 | 53 | def G_loss_GAN(D_on_fake): 54 | 55 | # Calculate binary cross entropy loss with a fake binary label 56 | fake_label = torch.zeros_like(D_on_fake) 57 | 58 | # Loss is: -log(D(G(z))), which is equiv. to minimizing log(1-D(G(z))) 59 | # We use this loss vs. the original one for stability only. 60 | G_loss = F.binary_cross_entropy(D_on_fake, 1 - fake_label) 61 | 62 | return G_loss.mean() 63 | 64 | 65 | def G_loss_WGAN(D_on_fake): 66 | 67 | # Minimize: -D(G(z)) 68 | G_loss = -D_on_fake 69 | 70 | return G_loss.mean() 71 | 72 | 73 | def random_interpolate(real, fake): 74 | eps = torch.rand(real.size(0), 1, 1, 1).to(real) 75 | return eps * real + (1 - eps) * fake 76 | 77 | 78 | def simple_gradient_penalty(D, x, center=0.): 79 | x.requires_grad_() 80 | D_on_x = D(x) 81 | D_grad = torch.autograd.grad(D_on_x, x, torch.ones_like(D_on_x), create_graph=True) 82 | D_grad_norm = D_grad[0].view(x.size(0), -1).norm(dim=1) 83 | return (D_grad_norm - center).pow(2).mean() 84 | -------------------------------------------------------------------------------- /src/models/style.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn as nn 4 | 5 | EPS = 1e-8 6 | 7 | # TODO: Style modules. 8 | 9 | class LatentMapper(nn.Module): 10 | """ 11 | Latent mapper module. 12 | """ 13 | 14 | def __init__(self, latent_dim, layer_dim, interlatent_dim, num_layers): 15 | super().__init__() 16 | 17 | dims = [latent_dim] + num_layers * [layer_dim] + [interlatent_dim] 18 | self.main = nn.Sequential(nn.Linear(in_dim, out_dim) 19 | for in_dim, out_dim in zip(dims, dims[1:])) 20 | 21 | 22 | def forward(self, z): 23 | return self.main(z) 24 | 25 | 26 | class ChannelNoise(nn.Module): 27 | """ 28 | Channel noise injection module. 29 | Adds a linearly transformed noise to a convolution layer. 30 | """ 31 | 32 | def __init__(self, num_channels, std=0.02): 33 | super().__init__() 34 | self.std = std 35 | self.scale = nn.Parameter(torch.ones(1, num_channels, 1, 1)) 36 | 37 | 38 | def forward(self, x): 39 | noise_size = [x.size()[0], 1, *x.size()[2:]] # single channel 40 | noise = self.std * torch.randn(noise_size).to(x) 41 | 42 | return x + self.scale * noise 43 | 44 | 45 | class AdaIN(nn.Module): 46 | """ 47 | Adaptive Instance Normalization. 48 | """ 49 | 50 | def __init__(self, num_channels): 51 | super().__init__() 52 | self.num_channels = num_channels 53 | self.linear = nn.Linear(2*num_channels, 2*num_channels) 54 | 55 | 56 | def forward(self, x, transformed_latent): 57 | 58 | # Unpack dims of x 59 | batch_size, num_channels, height, width = x.size() 60 | 61 | # Group height and width dims and get their mean and std 62 | x = x.view(batch_size, num_channels, -1) 63 | x_mean = x.mean(dim=2) 64 | x_std = x.std(dim=2) 65 | 66 | # Regroup x's dims back and calculate the normalized x 67 | x = x.view([batch_size, num_channels, height, width]) 68 | normalized_x = (x - x_mean) / x_std 69 | 70 | # Transform intermediate latent representation to style 71 | style = self.linear(transformed_latent) 72 | # Unpack scale and bias 73 | style_scale = style[:num_channels].view(1, num_channels, 1, 1) 74 | style_bias = style[num_channels:].view(1, num_channels, 1, 1) 75 | 76 | return style_scale * normalized_x + style_bias 77 | 78 | 79 | class PixelNorm(nn.Module): 80 | """ 81 | Pixel norm. 82 | """ 83 | 84 | def __init__(self): 85 | super().__init__() 86 | 87 | 88 | def forward(self, x): 89 | return pixel_norm(x) 90 | 91 | 92 | def pixel_norm(x): 93 | num_channels = x.size()[1] 94 | pixel_mean = x.sum(dim=1) / num_channels 95 | return x / (pixel_mean.sqrt() + EPS) 96 | 97 | 98 | 99 | 100 | -------------------------------------------------------------------------------- /src/models/maskgan.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn as nn 4 | 5 | from .dcgan import DCGAN_Discriminator 6 | from .residual import ResidualBlock 7 | 8 | 9 | class MaskGAN(nn.Module): 10 | def __init__(self, 11 | num_features=64, 12 | image_channels=3, 13 | image_size=64, 14 | gan_type="gan", 15 | with_reference=False): 16 | super().__init__() 17 | 18 | D_params = { 19 | "num_features": num_features, 20 | "image_channels": image_channels, 21 | "image_size": image_size, 22 | "gan_type": gan_type, 23 | } 24 | G_params = { 25 | "num_features": 3*num_features, # XXX: due to parameters inbalance 26 | "with_reference": with_reference, 27 | } 28 | 29 | self.D = DCGAN_Discriminator(**D_params) 30 | self.G = MaskGenerator(**G_params) 31 | 32 | 33 | class MaskGenerator(nn.Module): 34 | """A neural network that generates a mask to apply.""" 35 | def __init__(self, num_features=64, with_reference=False): 36 | super().__init__() 37 | 38 | self.num_features = num_features 39 | self.with_reference = with_reference 40 | 41 | def make_features_extractor(num_features): 42 | return nn.Sequential( 43 | nn.Conv2d(3, num_features, 7, padding=3, bias=False), 44 | nn.ReLU(), 45 | ) 46 | 47 | # Extract features from source 48 | self.source_features_extractor = make_features_extractor(self.num_features) 49 | 50 | # Extract features from reference 51 | if self.with_reference: 52 | self.reference_features_extractor = make_features_extractor(self.num_features) 53 | 54 | 55 | # Double the number of features in the mask generator if with reference 56 | if self.with_reference: 57 | num_features *= 2 58 | 59 | self.mask_generator = nn.Sequential( 60 | ResidualBlock(num_features, num_features), 61 | ResidualBlock(num_features, num_features, dilation=(2,2)), 62 | ResidualBlock(num_features, num_features, dilation=(4,4)), 63 | ResidualBlock(num_features, num_features, dilation=(8,8)), 64 | nn.Conv2d(num_features, num_features, 3, padding=2, dilation=2, bias=False), 65 | nn.ReLU(), 66 | nn.Conv2d(num_features, 3, 3, padding=1, bias=False), 67 | nn.Tanh(), 68 | ) 69 | 70 | 71 | def forward(self, source, reference=None): 72 | 73 | assert reference is None or self.with_reference 74 | 75 | features = self.source_features_extractor(source) 76 | 77 | if self.with_reference: 78 | reference_features = self.reference_features_extractor(reference) 79 | features = torch.cat([features, reference_features], dim=1) 80 | 81 | mask = self.mask_generator(features) 82 | 83 | return (source + mask).clamp(-1,1) # XXX: range could go outside [-1, 1] !!! 84 | 85 | -------------------------------------------------------------------------------- /src/dataset/data/split_images.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import argparse 4 | from PIL import Image 5 | 6 | from utility import files_iter 7 | 8 | 9 | # Get absolute path of this file and force relative-to-file paths 10 | FILE_DIR = os.path.dirname(os.path.realpath(__file__)) 11 | 12 | ### NOTE: we assume that all visible files in source dir are images ### 13 | SOURCE_DIR = os.path.join(FILE_DIR, "processing", "cleaned") 14 | DEST_DIR = os.path.join(FILE_DIR, "processing", "splits") 15 | 16 | 17 | def split_image(file_name, source_dir, dest_dir): 18 | """ 19 | Splits the image `file_name` (to left and right) and save the splits. 20 | 21 | Args: 22 | file_name: The name of the file (image) to be split. 23 | source_dir: Directory of source images. 24 | dest_dir: Directory where split images will be saved. 25 | """ 26 | 27 | with Image.open(os.path.join(source_dir, file_name)) as img: 28 | 29 | # Remove extension from file name and rename split images 30 | img_name = file_name.split(".")[0] 31 | ext = img.format.lower() 32 | img_path_left = os.path.join(dest_dir, "{}-before.{}".format(img_name, ext)) 33 | img_path_right = os.path.join(dest_dir, "{}-after.{}".format(img_name, ext)) 34 | 35 | if os.path.exists(img_path_left) or os.path.exists(img_path_right): 36 | return # this checks if the images was already split 37 | 38 | # Create left and right crops for splitting the image 39 | (left, upper, right, lower) = img.getbbox() 40 | mid_x = left + (right - left) // 2 41 | left_box = (left, upper, mid_x, lower) 42 | right_box = (mid_x, upper, right, lower) 43 | 44 | # Save left and right crops of image 45 | img.crop(left_box).save(img_path_left, format=img.format) 46 | img.crop(right_box).save(img_path_right, format=img.format) 47 | 48 | 49 | def split_images(source_dir, dest_dir): 50 | """ 51 | Try to split the images in source_dir and save them to dest_dir. 52 | 53 | Args: 54 | source_dir: Directory of source images. 55 | dest_dir: Directory where processed images will be saved. 56 | """ 57 | 58 | # Create destination directory if it doesn't exist 59 | if not os.path.isdir(dest_dir): os.mkdir(dest_dir) 60 | 61 | for file_name in files_iter(source_dir): 62 | try: 63 | # We assume that file_name has no dots except the one before its extension 64 | print("Splitting image {}... ".format(file_name.split(".")[0]), end="") 65 | split_image(file_name, source_dir, dest_dir) 66 | print("Done.") 67 | 68 | except Exception: 69 | print("Failed.") 70 | 71 | 72 | def main(args): 73 | split_images(args.source_dir, args.dest_dir) 74 | 75 | 76 | if __name__ == '__main__': 77 | 78 | parser = argparse.ArgumentParser(description="split makeup images into before and after.") 79 | 80 | parser.add_argument('--source_dir', type=str, default=SOURCE_DIR, 81 | help="source directory of images to be split in half.") 82 | parser.add_argument('--dest_dir', type=str, default=DEST_DIR, 83 | help="destination directory where split images will be saved.") 84 | 85 | args = parser.parse_args() 86 | 87 | main(args) 88 | 89 | 90 | -------------------------------------------------------------------------------- /src/trainers/utils/report_utils.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torchvision.utils as vutils 4 | import matplotlib.pyplot as plt 5 | import matplotlib.animation as animation 6 | 7 | from math import log2, floor 8 | 9 | 10 | def plot_lines(losses_dict, filename=None, title=""): 11 | """ 12 | Plots the losses of the discriminator and the generator. 13 | 14 | Args: 15 | filename: The plot's filename. If None, plot won't be saved. 16 | """ 17 | 18 | plt.figure(figsize=(10,5)) 19 | plt.title(title) 20 | for label, losses in losses_dict.items(): 21 | plt.plot(losses, label=label) 22 | plt.xlabel("t") 23 | plt.legend() 24 | 25 | if filename is not None: 26 | plt.savefig(filename) 27 | 28 | plt.show() 29 | plt.close() 30 | 31 | 32 | def create_progress_animation(frames, filename): 33 | """ 34 | Creates a video of the progress of the generator on a fixed latent vector. 35 | 36 | Args: 37 | filename: The animation's filename. 38 | """ 39 | 40 | fig = plt.figure(figsize=(8,8)) 41 | plt.axis("off") 42 | ims = [[plt.imshow(img.permute(1,2,0), animated=True)] 43 | for img in frames] 44 | ani = animation.ArtistAnimation(fig, ims, blit=True) 45 | 46 | ani.save(filename) 47 | 48 | plt.close() 49 | 50 | 51 | def generate_grid(generator, latent): 52 | """ 53 | Check generator's output on latent vectors and return it. 54 | 55 | Args: 56 | generator: The generator. 57 | latent: Latent vector from which an image grid will be generated. 58 | 59 | Returns: 60 | A grid of images generated by `generator` from `latent`. 61 | """ 62 | 63 | with torch.no_grad(): 64 | fake = generator(latent).detach() 65 | 66 | image_grid = vutils.make_grid(fake.cpu(), padding=2, normalize=True, range=(-1,1)) 67 | 68 | return image_grid 69 | 70 | 71 | def generate_G_grid(generator, before): 72 | """ 73 | Generate a grid of pairs of images, where each pair shows a before-after 74 | transition when applying G on before. 75 | """ 76 | 77 | if len(before.size()) == 3: 78 | before.unsqueeze(0) 79 | 80 | batch_size = before.size()[0] 81 | img_dim = before.size()[1:] 82 | 83 | with torch.no_grad(): 84 | after = generator(before) 85 | 86 | row = torch.zeros([2 * batch_size, *img_dim]) 87 | row[0::2] = before.detach() 88 | row[1::2] = after.detach() 89 | 90 | image_grid = vutils.make_grid(row.cpu(), nrow=8, padding=2, normalize=True, range=(-1,1)) 91 | 92 | return image_grid 93 | 94 | 95 | def generate_makeup_grid(applier_ref, remover, before, after_ref): 96 | """ 97 | Generate a grid, 8 images per row, as follows: 98 | Image #1: real photo of a face WITHOUT makeup (call it face #1). 99 | Image #2: real (makeup reference) photo of a face WITH makeup (call it face #2). 100 | Image #3: fake photo of face #1 WITH makeup style from face #2 (applied). 101 | Image #4: fake photo of face #2 WITHOUT makeup (removed). 102 | Image #5: Repeat the same pattern from Image #1... 103 | 104 | In case only 4 images are needed per row, change `nrow` below to 4. 105 | """ 106 | 107 | if len(before.size()) == 3: 108 | before.unsqueeze(0) 109 | 110 | batch_size = before.size()[0] 111 | img_dim = before.size()[1:] 112 | 113 | with torch.no_grad(): 114 | fake_after = applier_ref(before, after_ref) 115 | fake_before_ref = remover(after_ref) 116 | 117 | 118 | row = torch.zeros([4 * batch_size, *img_dim]) 119 | row[0::4] = before.detach() 120 | row[1::4] = after_ref.detach() 121 | row[2::4] = fake_after.detach() 122 | row[3::4] = fake_before_ref.detach() 123 | 124 | image_grid = vutils.make_grid(row.cpu(), nrow=8, padding=2, normalize=True, range=(-1,1)) 125 | 126 | return image_grid 127 | 128 | -------------------------------------------------------------------------------- /src/config.yaml: -------------------------------------------------------------------------------- 1 | #################################### 2 | makeup: 3 | dataset: 4 | dataset_dir: "dataset/data/instagram" 5 | 6 | model: 7 | num_features: 7 8 | image_channels: 3 9 | image_size: 128 10 | gan_type: gan 11 | 12 | trainer: 13 | num_gpu: 1 14 | num_workers: 32 15 | results_dir: "results/" 16 | 17 | batch_size: 128 18 | 19 | D_optim_config: 20 | optim_choice: adam 21 | lr: 2.0e-4 22 | momentum: 0.9 23 | betas: [0.5, 0.9] 24 | 25 | G_optim_config: 26 | optim_choice: adam 27 | lr: 1.0e-4 28 | momentum: 0.9 29 | betas: [0.5, 0.9] 30 | 31 | constants: 32 | applier_adversarial: 2. 33 | remover_adversarial: 2. 34 | style_adversarial: 1. 35 | before_identity_robustness: 2. 36 | after_identity_robustness: 0. 37 | style_identity_robustness: 0. 38 | applier_mask_sparsity: 2. 39 | remover_mask_sparsity: 0. 40 | applier_D_grad_penalty: 10. 41 | remover_D_grad_penalty: 10. 42 | style_D_grad_penalty: 10. 43 | 44 | D_iters: 3 45 | 46 | report_interval: 10 47 | generate_grid_interval: 6 48 | 49 | #################################### 50 | makeup-test: 51 | dataset: 52 | dataset_dir: "dataset/data/instagram" 53 | 54 | model: 55 | num_features: 5 56 | image_channels: 3 57 | image_size: 32 58 | gan_type: wgan-gp 59 | 60 | trainer: 61 | num_gpu: 0 62 | num_workers: 4 63 | results_dir: "results/" 64 | batch_size: 4 65 | D_iters: 1 66 | report_interval: 10 67 | generate_grid_interval: 10 68 | 69 | constants: 70 | applier_adversarial: 1 71 | remover_adversarial: 1 72 | style_adversarial: 1 73 | before_identity_robustness: 1 74 | after_identity_robustness: 1 75 | style_identity_robustness: 1 76 | applier_mask_sparsity: 1 77 | remover_mask_sparsity: 1 78 | applier_D_grad_penalty: 1 79 | remover_D_grad_penalty: 1 80 | style_D_grad_penalty: 1 81 | 82 | #################################### 83 | dcgan: 84 | dataset: 85 | dataset_dir: "dataset/data/processing/faces" 86 | 87 | model: 88 | num_latents: 128 89 | num_features: 64 90 | image_channels: 3 91 | image_size: 64 92 | gan_type: gan 93 | 94 | trainer: 95 | name: "trainer" 96 | results_dir: "results/" 97 | load_model_path: 98 | 99 | num_gpu: 1 100 | num_workers: 2 101 | batch_size: 4 102 | 103 | D_optim_config: 104 | optim_choice: sgd 105 | lr: 1.0e-4 106 | momentum: 0. 107 | betas: [0.9, 0.999] 108 | 109 | G_optim_config: 110 | optim_choice: sgd 111 | lr: 1.0e-4 112 | momentum: 0. 113 | betas: [0.9, 0.999] 114 | 115 | D_iters: 5 116 | clamp: [-0.01, 0.01] 117 | gp_coeff: 10. 118 | 119 | report_interval: 50 120 | generate_grid_interval: 200 121 | 122 | #################################### 123 | wgan: 124 | dataset: 125 | dataset_dir: "dataset/data/processing/faces" 126 | 127 | model: 128 | num_latents: 128 129 | num_features: 64 130 | image_channels: 3 131 | image_size: 64 132 | gan_type: wgan 133 | 134 | trainer: 135 | name: "trainer" 136 | results_dir: "results/" 137 | load_model_path: 138 | 139 | num_gpu: 1 140 | num_workers: 2 141 | batch_size: 4 142 | 143 | D_optim_config: 144 | optim_choice: rmsprop 145 | lr: 5.0e-5 146 | momentum: 0. 147 | betas: [0.5, 0.9] 148 | 149 | G_optim_config: 150 | optim_choice: rmsprop 151 | lr: 5.0e-5 152 | momentum: 0. 153 | betas: [0.5, 0.9] 154 | 155 | D_iters: 5 156 | clamp: [-0.01, 0.01] 157 | gp_coeff: 10. 158 | 159 | report_interval: 50 160 | generate_grid_interval: 200 161 | 162 | #################################### 163 | wgan-gp: 164 | dataset: 165 | dataset_dir: "dataset/data/processing/faces" 166 | 167 | model: 168 | num_latents: 128 169 | num_features: 64 170 | image_channels: 3 171 | image_size: 64 172 | gan_type: wgan-gp 173 | 174 | trainer: 175 | name: "trainer" 176 | results_dir: "results/" 177 | load_model_path: 178 | 179 | num_gpu: 1 180 | num_workers: 2 181 | batch_size: 4 182 | 183 | D_optim_config: 184 | optim_choice: adam 185 | lr: 1.0e-4 186 | momentum: 0. 187 | betas: [0.0, 0.9] 188 | 189 | G_optim_config: 190 | optim_choice: adam 191 | lr: 1.0e-4 192 | momentum: 0. 193 | betas: [0.0, 0.9] 194 | 195 | D_iters: 5 196 | clamp: [-0.01, 0.01] 197 | gp_coeff: 10. 198 | 199 | report_interval: 50 200 | generate_grid_interval: 200 201 | 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is this? 2 | This repository contains an implementation of [PairedCycleGAN](https://gfx.cs.princeton.edu/pubs/Chang_2018_PAS/Chang-CVPR-2018.pdf) plus its components (GAN, CycleGAN, Residual blocks, etc.) GANs could also be trained with Wasserstein loss and gradient penalty. The codebase is designed in a way such that it can be adapted easily to other future projects in deep learning. The code starts from the `train.py` script. First we have to define three things, which are: 3 | * Dataset 4 | * Model 5 | * Model trainer (+ optimizer) 6 | 7 | These three things should be defined separately, but we have to make sure that their points of interactions, if any, are compatible. For example, the dataset has to generate images with compatible sizes to the model. The trainer is, of course, deeply intertwined with the model itself, but the philosophy of this design is that direct access into the inner parts of the model should be minimized. The trainer is designed in a way such that it separates data pre-processing, logging, reporting, loading, and saving from the training algorithm as much as possible. Therefore, we should try to design our methods in a way that emphasizes algorithmic clarity over efficiency. 8 | 9 | For this project, I created two datasets of before-and-after makeup images. One dataset is paired (pairs of before-and-after images of the same person), which is in the order of 1000 pairs, and another unpaired dataset, which is in the order of 5000 (total 10,000). Of course, curating these datasets cost me some painstakingly long periods of boring time, in addition to short bursts of depression from having to watch all of the hot girls that I will never get to hang out with. After that, I created a few models with their corresponding trainers, ending up with a trained PairedCycleGAN model. Algorithmically speaking, it's the same, but the engineering part is different. I didn't bother extracting face parts and just went ahead with the whole face, but I did do some face-morphing stuff. Anyway, my main motivation for creating a project like this was to understand the whole pipeline of creating a deep learning project from scratch (minus the coming-up-with-the-idea-in-the-first-place part). 10 | 11 | The code in this project is missing a few important things. One of them is logging. Another is good docs everywhere. One more is good design choices and software engineering stuff. This is all doable but I'm not feeling motivated enough to fix any of it. The prettiest scenario that could happen is for someone to code this stuff for me. 12 | 13 | ## Creating the dataset 14 | 15 | I will explain here the dataset creation pipeline, which is pretty boring. 16 | 17 | - Search using `dataset/search/searcher.py` to generate `image_urls.csv`. 18 | - Optional: extract from Pinterest html sources with `pinterest/extract_pinterest_urls.py`, then run `cat pinterest/pinterest_urls.csv >> image_urls.csv`. 19 | - Download using `dataset/download_images.py`. 20 | - Clean dataset manually (a little tedious but unavoidable). 21 | - Split images to **before** and **after** makeup (just split vertically, fix the rest manually). Use `dataset/data/split_images.py` to split vertically. 22 | - Extract faces with `dataset/data/extract_faces.py`. 23 | - Now you can call use your dataset by importing `MakeupDataset` from `dataset/dataset.py`, and then calling `MakeupDataset(dataset_dir)`, where `dataset_dir` is the path to the directory containing the processed images. 24 | 25 | ## Training 26 | 27 | Just choose the model you want (in our case, it is the PairedCycleGAN), and then train it using its corresponding trainer. I created these trainers with rapid experimentation and debugging in mind. 28 | 29 | I will add more details here later... 30 | 31 | ## Requirements 32 | First, you need conda. Then do this: 33 | ``` 34 | conda create -n automakeup -c conda-forge -c pytorch python=3.7 pip pyyaml pillow=6.1 ffmpeg matplotlib opencv pytorch torchvision tensorboard 35 | conda activate automakeup 36 | python -m pip install cmake 37 | python -m pip install face_recognition 38 | ``` 39 | The last step takes some time because it installs dlib. 40 | When running pip, make sure you're running the one you installed in `automakeup` env. 41 | To ensure that, I activate `automakeup` env and use `python -m pip install` instead of simply `pip install`. 42 | Also, `cmake` need to be installed in a separate step before `face_recognition` for some reason. I'm thinking about using another lightweight face recognition library at the moment (this one is lightweight and simple in terms of API, but installing dlib is annoying). If you still face an error from dlib, check whether you have gcc, g++, and make installed. If you are using a fresh Ubuntu container, for example, do this: 43 | ``` 44 | apt-get install gcc g++ make 45 | ``` 46 | And you'll be good to go. 47 | 48 | ## Running the program 49 | 50 | To run the training experiment, do this: 51 | ``` 52 | conda activate automakeup 53 | cd src 54 | python train.py 55 | ``` 56 | -------------------------------------------------------------------------------- /src/dataset/download_images.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import requests 4 | import os 5 | import argparse 6 | 7 | # The file where image_urls were exported to 8 | DOWNLOAD_DIR = os.path.join("data", "downloaded") 9 | IMAGE_URLS = os.path.join("search", "image_urls.csv") 10 | 11 | # Get absolute path of this file and force relative-to-file paths 12 | FILE_DIR = os.path.dirname(os.path.realpath(__file__)) 13 | DOWNLOAD_DIR = os.path.join(FILE_DIR, DOWNLOAD_DIR) 14 | IMAGE_URLS = os.path.join(FILE_DIR, IMAGE_URLS) 15 | 16 | # Variables to deal with errors occuring during download 17 | TRY_AGAIN = False # retry previously failed requests (for a second run of download.py) 18 | ERROR_TAG = b"(error)" # The error tag is always prepended to an error file 19 | IS_ERROR_FILE = lambda f: f.read()[:len(ERROR_TAG)] == ERROR_TAG 20 | IMAGE_NAME_FORMAT = lambda index: "{:05d}".format(index) # The format of image names 21 | 22 | 23 | def download_image(image_url, image_path="untitled"): 24 | """ 25 | Download an image from `image_url` and save it to `image_path`. 26 | 27 | Args: 28 | image_url: The url of the image to be downloaded. 29 | image_path: The path where the image will be saved. 30 | 31 | Returns: 32 | "Success" or the exception in case of an error. 33 | """ 34 | 35 | try: 36 | # Download image in chunks 37 | with requests.get(image_url, stream=True, timeout=30) as image_response: 38 | image_response.raise_for_status() 39 | with open(image_path, 'wb') as f: 40 | chunk_size = 1 << 10 41 | for chunk in image_response.iter_content(chunk_size): 42 | f.write(chunk) 43 | return "Success" 44 | except Exception as e: 45 | # Image will be text describing the error message 46 | with open(image_path, 'w') as f: 47 | f.write("(error) {}".format(e)) 48 | return e 49 | 50 | 51 | def download_images(image_urls, download_dir): 52 | """ 53 | Download the images from `image_urls` and save them in `download_dir`. 54 | 55 | Args: 56 | image_urls: The urls of the images to be downloaded. 57 | download_dir: The directory where the images will be saved. 58 | """ 59 | 60 | # Download images 61 | for index, image_url in enumerate(image_urls): 62 | 63 | # Create image name and path 64 | image_name = IMAGE_NAME_FORMAT(index) 65 | image_path = os.path.join(download_dir, image_name) 66 | 67 | # If a file called 'image_name' already exists, open it and find whether 68 | # it has an '(error)' tag in it. If it doesn't, then we already downloaded 69 | # it successfully, so we skip it. If it does, then we skip it only if we 70 | # don't want to try downloading it again. 71 | if os.path.exists(image_path): 72 | with open(image_path, "rb") as image: 73 | if not IS_ERROR_FILE(image): 74 | continue # skip because we already downloaded this image 75 | if not TRY_AGAIN: 76 | continue # skip because we don't want to try again 77 | 78 | # Download image and check download success 79 | print("[{:05d}] Downloading {} ... ".format(index, image_url), end="") 80 | status = download_image(image_url, image_path) 81 | print(status) 82 | 83 | 84 | def delete_error_files(download_dir): 85 | """ 86 | Delete error files, i.e. images that failed to download. 87 | 88 | Args: 89 | download_dir: The directory where the images are saved. 90 | """ 91 | 92 | num_errors_files = 0 93 | index = 0 94 | notexist_tally = 0 95 | 96 | while notexist_tally < 10: # XXX: bad heuristic check 97 | # Create image name and path 98 | image_name = IMAGE_NAME_FORMAT(index) 99 | image_path = os.path.join(download_dir, image_name) 100 | 101 | # Delete error file, if any 102 | if os.path.exists(image_path): 103 | notexist_tally = 0 104 | with open(image_path, "rb") as image: 105 | if IS_ERROR_FILE(image): 106 | print("Removing %s" % image_name) 107 | os.remove(image_path) 108 | num_errors_files += 1 109 | else: 110 | notexist_tally += 1 111 | 112 | index += 1 113 | 114 | print("Deleted %d error files." % num_errors_files) 115 | return num_errors_files 116 | 117 | 118 | def main(args): 119 | 120 | start_time = time.time() 121 | 122 | # Create dataset directory if it doesn't exist 123 | if not os.path.isdir(args.download_dir): 124 | os.mkdir(args.download_dir) 125 | 126 | # Download images 127 | with open(args.image_urls, "r") as f: 128 | image_urls = (line.rstrip() for line in f) 129 | download_images(image_urls, args.download_dir) 130 | 131 | delete_error_files(args.download_dir) 132 | 133 | print("Time elapsed = {:.3f}".format(time.time() - start_time)) 134 | 135 | 136 | if __name__ == '__main__': 137 | 138 | parser = argparse.ArgumentParser(description="Download images from a file of image urls.") 139 | 140 | parser.add_argument("-o", "--download_dir", type=str, default=DOWNLOAD_DIR, 141 | help="the directory where the images will be downloaded.") 142 | parser.add_argument("-i", "--image_urls", type=str, default=IMAGE_URLS, 143 | help="the output file where the urls of the images are saved.") 144 | 145 | args = parser.parse_args() 146 | 147 | main(args) 148 | 149 | -------------------------------------------------------------------------------- /src/dataset/dataset.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import torch 4 | import torch.utils.data as data_utils 5 | 6 | import random 7 | import glob 8 | from PIL import Image 9 | 10 | try: 11 | from face_recognition import face_landmarks 12 | except ImportError: 13 | print("Could not import face_recognition module. Can't use MakeupDataset.") 14 | def face_landmarks(_): 15 | raise NotImplementedError("face_recognition module is not available.") 16 | 17 | 18 | def dict_to_list(d): 19 | return [x for l in d.values() for x in l] 20 | 21 | 22 | def files_iter(directory): 23 | return filter(os.path.isfile, glob.iglob(os.path.join(directory, "*"))) 24 | 25 | 26 | class MakeupDataset(data_utils.Dataset): 27 | """A dataset of before-and-after makeup images.""" 28 | 29 | def __init__(self, dataset_dir, 30 | transform=None, 31 | with_landmarks=False, 32 | paired=False, 33 | reverse=False): 34 | """ 35 | Initializes MakeupDataset. 36 | 37 | Args: 38 | dataset_dir: The directory of the dataset. 39 | transform: The transform used on the data. 40 | with_landmarks: A flag indicating whether landmarks should be used or not. 41 | paired: Indicates whether images should be paired when sampled or not. 42 | reverse: Reverses sample if True (before = with makeup, after = no makeup). 43 | """ 44 | 45 | if not os.path.isdir(dataset_dir): 46 | raise FileNotFoundError(f"Dataset directory '{dataset_dir}' does not exist.") 47 | 48 | self.dataset_dir = dataset_dir 49 | self.with_landmarks = with_landmarks 50 | self.transform = transform 51 | self.paired = paired 52 | self.reverse = reverse 53 | 54 | self.images_before, self.images_after = self.get_images() 55 | self.landmarks_cache = {} 56 | self.landmarks_size = [72, 2] 57 | 58 | 59 | def get_images(self): 60 | """ 61 | Return a list of pairs of (before, after) makeup images name in `dataset_dir`. 62 | 63 | Returns: 64 | A list of tuples of the names of before and after makeup images in `dataset_dir`. 65 | """ 66 | 67 | all_images = list(files_iter(self.dataset_dir)) 68 | before_images = list(filter(lambda s: s.find("before") != -1, all_images)) 69 | after_images = list(filter(lambda s: s.find("after") != -1, all_images)) 70 | 71 | return sorted(before_images), sorted(after_images) 72 | 73 | 74 | def __len__(self): 75 | """Returns the length of the dataset.""" 76 | return min(len(self.images_before), len(self.images_after)) 77 | 78 | 79 | def __getitem__(self, index): 80 | """ 81 | Get the next data point from the dataset. 82 | 83 | Args: 84 | index: the index of the data point. 85 | 86 | Returns: 87 | The data point transformed and ready for consumption. 88 | """ 89 | 90 | # Shuffle the other list/dataset every time we reiterate from the beginning 91 | if not self.paired and index == 0: 92 | random.shuffle(self.images_after) 93 | 94 | # Sample before and after images 95 | image_before = self.images_before[index] 96 | image_after = self.images_after[index] 97 | 98 | # Get path of before and after images 99 | path_before = image_before #os.path.join(self.dataset_dir, image_before) 100 | path_after = image_after #os.path.join(self.dataset_dir, image_after) 101 | 102 | # Create sample 103 | sample = { 104 | "before": Image.open(path_before).convert("RGB"), 105 | "after": Image.open(path_after).convert("RGB"), 106 | } 107 | 108 | # Apply transformations on images 109 | if self.transform is not None: 110 | sample = self.transform(sample) 111 | 112 | # Find landmarks, use cache if already done 113 | if self.with_landmarks: 114 | sample["landmarks"] = { 115 | "before": self.get_landmarks(image_before, sample["before"]), 116 | "after": self.get_landmarks(image_after, sample["after"]), 117 | } 118 | 119 | # Reverse direction of sample 120 | if self.reverse: 121 | sample = self.reverse_sample(sample) 122 | 123 | return sample 124 | 125 | 126 | def get_landmarks(self, label, image): 127 | """ 128 | Get the landmarks associated with the label and image. 129 | If label is not in landmarks' cache, find the landmarks in image. 130 | 131 | Args: 132 | label: The label of the image. 133 | image: Image in PyTorch tensor format. 134 | 135 | Returns: 136 | Landmarks in PyTorch tensor format. 137 | """ 138 | 139 | if label in self.landmarks_cache: 140 | landmarks = self.landmarks_cache[label] 141 | else: 142 | landmarks = self.find_landmarks(image) 143 | self.landmarks_cache[label] = landmarks 144 | 145 | return landmarks 146 | 147 | 148 | def find_landmarks(self, image): 149 | """ 150 | Find the landmarks of an image. 151 | 152 | Args: 153 | image: image in PyTorch tensor format. 154 | 155 | Returns: 156 | The landmarks of the image as a tensor. 157 | """ 158 | 159 | unnormalize = lambda t: t * 0.5 + 0.5 # XXX: hard-coded un-normalization 160 | to_uint8_rgb = lambda t: (255 * t).round().to(torch.uint8) 161 | torch_to_numpy = lambda t: t.permute(1, 2, 0).numpy() 162 | 163 | # Image is a pytorch tensor, prepare it as an image in standard numpy format 164 | image = torch_to_numpy(to_uint8_rgb(unnormalize(image))) 165 | 166 | # Find landmarks in the image 167 | landmarks_found = face_landmarks(image) 168 | # If found any, return first one as a tensor, else return zeros 169 | if len(landmarks_found) > 0: 170 | landmarks = torch.tensor(dict_to_list(landmarks_found[0]), dtype=torch.int) 171 | else: 172 | landmarks = torch.zeros(self.landmarks_size, dtype=torch.int) 173 | 174 | return landmarks 175 | 176 | 177 | def reverse_sample(self, sample): 178 | """ 179 | Reverse direction of sample. 180 | 181 | Args: 182 | sample: A sample from the dataset 183 | """ 184 | 185 | sample = { 186 | "before": sample["after"], 187 | "after": sample["before"], 188 | } 189 | 190 | if "landmarks" in sample: 191 | sample["landmarks"] = { 192 | "before": sample["landmarks"]["after"], 193 | "after": sample["landmarks"]["before"], 194 | } 195 | 196 | 197 | return sample 198 | 199 | 200 | def __repr__(self): 201 | return "{}({!r})".format(self.__class__.__name__, self.dataset_dir) 202 | 203 | 204 | class MakeupDataset2(MakeupDataset): 205 | """A new, expanded, unpaired version of MakeupDataset.""" 206 | 207 | def __init__(self, dataset_dir, 208 | transform=None, 209 | with_landmarks=False, 210 | reverse=False): 211 | # Initialize as an unpaired MakeupDataset 212 | super().__init__(dataset_dir, transform=transform, 213 | with_landmarks=with_landmarks, paired=False, reverse=reverse) 214 | 215 | def get_images(self): 216 | """ 217 | Return a list of pairs of (before, after) makeup images name in `dataset_dir`. 218 | 219 | Returns: 220 | A list of tuples of the names of before and after makeup images in `dataset_dir`. 221 | """ 222 | nomakeup_dir = os.path.join(self.dataset_dir, "nomakeup") 223 | makeup_dir = os.path.join(self.dataset_dir, "makeup") 224 | 225 | if not os.path.isdir(nomakeup_dir): 226 | raise FileNotFoundError(f"No-Makeup directory '{nomakeup_dir}' does not exist.") 227 | if not os.path.isdir(makeup_dir): 228 | raise FileNotFoundError(f"Makeup directory '{makeup_dir}' does not exist.") 229 | 230 | before_images = list(files_iter(nomakeup_dir)) 231 | after_images = list(files_iter(makeup_dir)) 232 | 233 | return sorted(before_images), sorted(after_images) 234 | 235 | 236 | -------------------------------------------------------------------------------- /src/dataset/data/extract_faces.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import argparse 4 | import pickle 5 | import cv2 6 | import numpy as np 7 | import face_recognition 8 | from PIL import Image, ImageDraw 9 | 10 | from utility import files_iter 11 | 12 | 13 | # Get absolute path and force relative-to-file paths 14 | FILE_DIR = os.path.dirname(os.path.realpath(__file__)) 15 | 16 | ### NOTE: we assume that all visible files in source dir are images ### 17 | SOURCE_DIR = os.path.join(FILE_DIR, "processing", "splits") 18 | DEST_DIR = os.path.join(FILE_DIR, "processing", "faces") 19 | 20 | 21 | #################### FACES #################### 22 | 23 | 24 | def centroid(points): 25 | xs, ys = zip(*points) 26 | xc = sum(xs) / len(xs) 27 | yc = sum(ys) / len(ys) 28 | return round(xc), round(yc) 29 | 30 | 31 | def align_eyes_horizontally(face_img): 32 | landmarks = face_recognition.face_landmarks(np.array(face_img)) 33 | if len(landmarks) == 0: raise Exception(" Couldn't extract any faces.") 34 | 35 | left_eye = centroid(landmarks[0]["left_eye"]) 36 | right_eye = centroid(landmarks[0]["right_eye"]) 37 | h = right_eye[1] - left_eye[1] 38 | w = right_eye[0] - left_eye[0] 39 | angle = np.arcsin(h / np.sqrt(h*h + w*w)) * 180.0 / np.pi 40 | 41 | return face_img.rotate(angle) 42 | 43 | 44 | def zoom_on_face(face_img, scale=1.2): 45 | landmarks = face_recognition.face_landmarks(np.array(face_img)) 46 | if len(landmarks) == 0: raise Exception(" Couldn't extract any faces.") 47 | 48 | landmarks_list = [x for xs in landmarks[0].values() for x in xs] 49 | x, y, w, h = cv2.boundingRect(np.array(landmarks_list)) 50 | y -= (scale - 1) * 0.7 * h 51 | x -= (scale - 1) * 0.5 * w 52 | h *= scale 53 | w *= scale 54 | 55 | return face_img.crop((x, y, x+w, y+h)) 56 | 57 | 58 | def extract_face(file_name, source_dir, dest_dir): 59 | """ 60 | Extract the first detected face from the image in `file_name` and save it. 61 | 62 | Args: 63 | file_name: The name of the file (image). 64 | source_dir: Directory of source images. 65 | dest_dir: Directory where processed images will be saved. 66 | 67 | Returns: 68 | The name of the face image. 69 | """ 70 | print("Extracting face from {}... ".format(file_name), end="") 71 | face_image_name = file_name 72 | 73 | # Check if destination image already exists (i.e. processed previously) 74 | face_image_path = os.path.join(dest_dir, face_image_name) 75 | if not os.path.exists(face_image_path): 76 | # load image and extract faces from it 77 | source_path = os.path.join(source_dir, file_name) 78 | #image = face_recognition.load_image_file(source_path) 79 | face_img = Image.open(source_path).convert("RGB") 80 | 81 | face_img = align_eyes_horizontally(face_img) 82 | face_img = zoom_on_face(face_img) 83 | 84 | # Crop image and save as PIL 85 | face_img.save(face_image_path) 86 | 87 | print("Done.") 88 | return face_image_name 89 | 90 | 91 | def extract_faces(source_dir, faces_dir, with_landmarks=True, ensure_pairs=True): 92 | """ 93 | Try to extract faces from the images in source_dir and save them to faces_dir. 94 | 95 | Args: 96 | source_dir: Directory of source images. 97 | faces_dir: Directory where face images will be saved. 98 | with_landmarks: Extract faces landmarks as well 99 | ensure_pairs: Ensure only paired images by removing unpaired ones. 100 | """ 101 | 102 | landmarks_dir = os.path.join(faces_dir, "landmarks") 103 | 104 | # Create destination directory if it doesn't exist 105 | if not os.path.isdir(faces_dir): os.mkdir(faces_dir) 106 | if with_landmarks and not os.path.isdir(landmarks_dir): os.mkdir(landmarks_dir) 107 | 108 | for file_name in files_iter(source_dir): 109 | # Try to extract face from file (image) 110 | try: 111 | face_image_name = extract_face(file_name, source_dir, faces_dir) 112 | 113 | # Extract landmarks if needed 114 | if with_landmarks: 115 | extract_landmarks(face_image_name, faces_dir, landmarks_dir) 116 | 117 | except Exception as e: 118 | print("Failed."); print(f" {str(e)}") 119 | 120 | # Delete useless files 121 | if ensure_pairs: clean_incomplete_face_pairs(faces_dir) 122 | if with_landmarks: clean_landmarks(faces_dir, landmarks_dir) 123 | 124 | 125 | def clean_incomplete_face_pairs(faces_dir): 126 | """ 127 | Clean incomplete face pairs (either before or after image is missing) 128 | 129 | Args: 130 | source_dir: Directory of the examples. 131 | """ 132 | 133 | for file_name in files_iter(faces_dir): 134 | 135 | image_name, ext = file_name.split(".") 136 | index, which = image_name.split("-") 137 | 138 | other_which = "after" if which == "before" else "before" 139 | other_file = "{}-{}.{}".format(index, other_which, ext) 140 | 141 | if not os.path.exists(os.path.join(faces_dir, other_file)): 142 | # Remove this file if the other does not exist 143 | os.remove(os.path.join(faces_dir, file_name)) 144 | print("Removed face: {}".format(file_name)) 145 | 146 | 147 | #################### END FACES #################### 148 | 149 | 150 | #################### LANDMARKS #################### 151 | 152 | def extract_landmarks(file_name, source_dir, dest_dir): 153 | """ 154 | Extract the first detected face from the image in `file_name` and save it. 155 | 156 | Args: 157 | file_name: The path to the face image 158 | source_dir: Directory of images. 159 | dest_dir: Directory where processed images will be saved. 160 | 161 | Returns: 162 | The name of the landmarks file. 163 | """ 164 | 165 | landmarks_name = file_name.split(".")[0] + ".png" 166 | 167 | # Check if landmarks already exists 168 | landmarks_path = os.path.join(dest_dir, landmarks_name) 169 | if not os.path.exists(landmarks_path): 170 | 171 | # load image and extract landmarks from it 172 | face_image = Image.open(os.path.join(source_dir, file_name)) 173 | face_landmarks = face_recognition.face_landmarks(np.array(face_image)) 174 | 175 | print("Extracted {} face landmarks... ".format(len(face_landmarks)), end="") 176 | if len(face_landmarks) == 0: 177 | raise Exception(" Couldn't extract any landmarks.") 178 | 179 | # Draw landmarks on an empty PIL image 180 | landmarks_image = Image.new("RGB", face_image.size) 181 | draw_landmarks(landmarks_image, face_landmarks[0]) 182 | 183 | # Save landmarks 184 | landmarks_image.save(os.path.join(dest_dir, landmarks_name)) 185 | 186 | # Pickle landmarks 187 | landmarks_path = os.path.join(dest_dir, os.path.splitext(landmarks_name)[0] + ".pickle") 188 | with open(landmarks_path, "wb") as f: 189 | pickle.dump(face_landmarks[0], f) 190 | 191 | return landmarks_name 192 | 193 | 194 | def draw_landmarks(landmarks_image, landmarks, fill=None, width=3): 195 | """ 196 | Draws the landmarks on an empty image. 197 | 198 | Args: 199 | landmarks_image: PIL image on which we will draw the landmarks. 200 | landmarks: A dict of the landmarks coordinates, as in {"part": [coords, ...]}. 201 | fill: Color of the lines. 202 | width: Width of the lines. 203 | """ 204 | 205 | d = ImageDraw.Draw(landmarks_image) 206 | 207 | for part, xy in landmarks.items(): 208 | d.line(xy, fill=fill, width=3) 209 | 210 | # For the eyes, close the loop (sounds a little poetic, i know) 211 | if part == "right_eye" or part == "left_eye": 212 | closing_line = [xy[-1], xy[0]] 213 | d.line(closing_line, fill=fill, width=3) 214 | 215 | 216 | def clean_landmarks(faces_dir, landmarks_dir): 217 | """ 218 | Clean landmarks not associated to any images in faces_dir. 219 | 220 | Args: 221 | faces_dir: Directory of the faces. 222 | landmarks_dir: Directory of the landmarks. 223 | """ 224 | 225 | faces_set = set(f.split(".")[0] for f in files_iter(faces_dir)) 226 | for landmarks in files_iter(landmarks_dir): 227 | landmarks_name = landmarks.split(".")[0] 228 | if landmarks_name not in faces_set: 229 | os.remove(os.path.join(landmarks_dir, landmarks)) 230 | print("Removed landmarks {}".format(landmarks)) 231 | 232 | 233 | #################### END LANDMARKS #################### 234 | 235 | 236 | def main(args): 237 | if args.image: 238 | extract_face(args.image, args.source_dir, args.dest_dir) 239 | else: 240 | extract_faces(args.source_dir, args.dest_dir, args.with_landmarks, args.ensure_pairs) 241 | 242 | 243 | if __name__ == '__main__': 244 | 245 | parser = argparse.ArgumentParser(description="extract faces and save them.") 246 | 247 | parser.add_argument('--source_dir', type=str, default=SOURCE_DIR, 248 | help="source directory of images from which faces will be extracted.") 249 | parser.add_argument('--dest_dir', type=str, default=DEST_DIR, 250 | help="destination directory where face images will be saved.") 251 | parser.add_argument('-i', '--image', type=str, default="", 252 | help="path to the image, relative to --source_dir (if specified, only this image will be processed).") 253 | parser.add_argument("--with_landmarks", action="store_true", 254 | help="extract faces landmarks as well") 255 | parser.add_argument("--ensure_pairs", action="store_true", 256 | help="ensure only paired images (remove images with no corresponding paired image)") 257 | 258 | args = parser.parse_args() 259 | 260 | main(args) 261 | 262 | -------------------------------------------------------------------------------- /src/models/dcgan.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | 4 | 5 | class DCGAN(nn.Module): 6 | """Deep Convolutional Generative Adversarial Network""" 7 | 8 | def __init__(self, 9 | num_latents=100, 10 | num_features=64, 11 | image_channels=3, 12 | image_size=64, 13 | gan_type="gan", 14 | fully_convolutional=True, 15 | activation=None, 16 | use_batchnorm=True, 17 | use_spectralnorm=False,): 18 | """ 19 | Initializes DCGAN. 20 | 21 | Args: 22 | num_latents: Number of latent factors. 23 | num_features: Number of features in the convolutions. 24 | image_channels: Number of channels in the input image. 25 | image_size: Size (i.e. height or width) of image. 26 | gan_type: Type of GAN (e.g. "gan" or "wgan-gp"). 27 | """ 28 | super().__init__() 29 | 30 | self.num_latents = num_latents 31 | self.num_features = num_features 32 | self.image_channels = image_channels 33 | self.image_size = image_size 34 | self.gan_type = gan_type 35 | self.fully_convolutional = fully_convolutional 36 | self.activation = activation 37 | self.use_batchnorm = use_batchnorm 38 | self.use_spectralnorm = use_spectralnorm 39 | 40 | D_params = { 41 | "num_latents": 1, # XXX 42 | "num_features": num_features, 43 | "image_channels": image_channels, 44 | "image_size": image_size, 45 | "gan_type": gan_type, 46 | "fully_convolutional": fully_convolutional, 47 | "activation": activation, 48 | "use_batchnorm": use_batchnorm, 49 | "use_spectralnorm": use_spectralnorm, 50 | } 51 | G_params = { 52 | "num_latents": num_latents, 53 | "num_features": num_features, 54 | "image_channels": image_channels, 55 | "image_size": image_size, 56 | "gan_type": gan_type, 57 | "fully_convolutional": fully_convolutional, 58 | "activation": activation, 59 | "use_batchnorm": use_batchnorm, 60 | "use_spectralnorm": use_spectralnorm, 61 | } 62 | 63 | self.D = DCGAN_Discriminator(**D_params) 64 | self.G = DCGAN_Generator(**G_params) 65 | 66 | 67 | class DCGAN_DiscriminatorBlock(nn.Module): 68 | """ 69 | A discriminator convolutional block. 70 | Default stride and padding half the size of features, 71 | e.g. if input is [in_channels, 64, 64], output will be [out_channels, 32, 32]. 72 | """ 73 | 74 | def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, 75 | use_batchnorm=True, use_spectralnorm=False, activation=None): 76 | super().__init__() 77 | 78 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, 79 | stride=stride, padding=padding, bias=False) 80 | if use_spectralnorm: 81 | self.conv = nn.utils.spectral_norm(self.conv) 82 | self.batchnorm = nn.BatchNorm2d(out_channels) if use_batchnorm else None 83 | self.activation = nn.LeakyReLU(0.2, inplace=True) if activation is None else activation() 84 | 85 | def forward(self, x): 86 | x = self.conv(x) 87 | if self.batchnorm: 88 | x = self.batchnorm(x) 89 | x = self.activation(x) 90 | return x 91 | 92 | 93 | class DCGAN_GeneratorBlock(nn.Module): 94 | """ 95 | A generator convolutional block. 96 | Default stride and padding double the size of features, 97 | e.g. if input is [in_channels, 32, 32], output will be [out_channels, 64, 64]. 98 | """ 99 | 100 | def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, 101 | use_batchnorm=True, use_spectralnorm=False, activation=None): 102 | super().__init__() 103 | 104 | self.convT = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, 105 | stride=stride, padding=padding, bias=False) 106 | if use_spectralnorm: 107 | self.convT = nn.utils.spectral_norm(self.convT) 108 | self.batchnorm = nn.BatchNorm2d(out_channels) if use_batchnorm else None 109 | self.activation = nn.LeakyReLU(0.2, inplace=True) if activation is None else activation() # XXX: ReLU? 110 | 111 | def forward(self, x): 112 | x = self.convT(x) 113 | if self.batchnorm: 114 | x = self.batchnorm(x) 115 | x = self.activation(x) 116 | return x 117 | 118 | 119 | class DCGAN_Discriminator(nn.Module): 120 | """The discriminator of a DCGAN""" 121 | 122 | def __init__(self, 123 | num_latents=1, 124 | num_features=64, 125 | image_channels=3, 126 | image_size=64, 127 | max_features=512, 128 | gan_type="gan", 129 | fully_convolutional=True, 130 | activation=None, 131 | use_batchnorm=True, 132 | use_spectralnorm=False, 133 | D_block=DCGAN_DiscriminatorBlock): 134 | super().__init__() 135 | 136 | using_grad_penalty = gan_type in ("gan-gp", "wgan-gp") 137 | output_sigmoid = gan_type in ("gan", "gan-gp") 138 | 139 | block_config = { 140 | "activation": activation, 141 | "use_batchnorm": use_batchnorm and not using_grad_penalty, 142 | "use_spectralnorm": use_spectralnorm, 143 | } 144 | 145 | # Calculate intermediate image sizes 146 | image_sizes = [image_size] 147 | while image_sizes[-1] > 5: 148 | image_sizes.append(image_sizes[-1] // 2) 149 | latent_kernel = image_sizes[-1] # should be either 3, 4, or 5 150 | num_layers = len(image_sizes) - 1 151 | 152 | # Calculate feature sizes 153 | features = [min(num_features * 2**i, max_features) for i in range(num_layers)] 154 | 155 | # Input layer 156 | self.input_layer = D_block(image_channels, features[0], **block_config) 157 | 158 | # Intermediate layers 159 | self.main_layers = nn.Sequential(*[ 160 | D_block(in_features, out_features, **block_config) 161 | for in_features, out_features in zip(features, features[1:]) 162 | ]) 163 | 164 | # Output layer (feature_size = 3, 4, or 5 -> 1) 165 | if fully_convolutional: 166 | self.output_layer = nn.Sequential( 167 | nn.Conv2d(features[-1], num_latents, latent_kernel, bias=False), 168 | nn.Flatten(), 169 | ) 170 | else: 171 | self.output_layer = nn.Sequential( 172 | nn.Flatten(), 173 | nn.Linear(features[-1] * latent_kernel**2, num_latents, bias=False) 174 | ) 175 | 176 | # Add sigmoid activation if using regular GAN loss 177 | self.output_activation = nn.Sigmoid() if output_sigmoid else None 178 | 179 | def forward(self, x): 180 | x = self.input_layer(x) 181 | x = self.main_layers(x) 182 | x = self.output_layer(x) 183 | if self.output_activation: 184 | x = self.output_activation(x) 185 | # Remove H and W dimensions, infer channels dim (remove if 1) 186 | x = x.view(x.size(0), -1).squeeze(1) 187 | return x 188 | 189 | 190 | class DCGAN_Generator(nn.Module): 191 | """The generator of a DCGAN""" 192 | 193 | def __init__(self, 194 | num_latents=100, 195 | num_features=64, 196 | image_channels=3, 197 | image_size=64, 198 | max_features=512, 199 | gan_type="gan", 200 | fully_convolutional=True, 201 | activation=None, 202 | use_batchnorm=True, 203 | use_spectralnorm=False, 204 | G_block=DCGAN_GeneratorBlock): 205 | super().__init__() 206 | 207 | block_config = { 208 | "activation": activation, 209 | "use_batchnorm": use_batchnorm, 210 | "use_spectralnorm": use_spectralnorm 211 | } 212 | 213 | # Calculate intermediate image sizes 214 | image_sizes = [image_size] 215 | while image_sizes[-1] > 5: 216 | image_sizes.append(image_sizes[-1] // 2) 217 | latent_kernel = image_sizes[-1] # should be either 3, 4, or 5 218 | num_layers = len(image_sizes) - 1 219 | 220 | # Calculate feature sizes 221 | features = [min(num_features * 2**i, max_features) for i in range(num_layers)] 222 | 223 | # Reverse order of image sizes and features for generator 224 | image_sizes = image_sizes[::-1] 225 | features = features[::-1] 226 | 227 | # Input layer 228 | if fully_convolutional: 229 | self.input_layer = G_block(num_latents, features[0], kernel_size=latent_kernel, 230 | stride=1, padding=0, **block_config) 231 | else: 232 | self.input_layer = nn.Sequential( 233 | nn.Flatten(), 234 | nn.Linear(num_latents, features[0] * image_sizes[0]**2, bias=False), 235 | View(features[0], image_sizes[0], image_sizes[0]) 236 | ) 237 | 238 | # Intermediate layers 239 | self.main_layers = nn.Sequential(*[ 240 | G_block(in_features, out_features, kernel_size=4+(expected_size%2), **block_config) 241 | for in_features, out_features, expected_size in zip(features, features[1:], image_sizes[1:]) 242 | ]) 243 | 244 | # Output layer 245 | self.output_layer = nn.ConvTranspose2d(features[-1], image_channels, kernel_size=4+(image_size%2), 246 | stride=2, padding=1, bias=False) 247 | self.output_activation = nn.Tanh() 248 | 249 | def forward(self, x): 250 | # Add H and W dimensions, infer channels dim (add if none) 251 | x = x.view(x.size(0), -1, 1, 1) 252 | x = self.input_layer(x) 253 | x = self.main_layers(x) 254 | x = self.output_layer(x) 255 | x = self.output_activation(x) 256 | return x 257 | 258 | 259 | class View(nn.Module): 260 | def __init__(self, *shape, including_batch=False): 261 | super().__init__() 262 | self.shape = shape 263 | self.including_batch = including_batch 264 | 265 | def forward(self, x): 266 | if self.including_batch: 267 | return x.view(*self.shape) 268 | else: 269 | return x.view(x.size(0), *self.shape) 270 | 271 | -------------------------------------------------------------------------------- /src/trainers/utils/face_morph/face_morph.py: -------------------------------------------------------------------------------- 1 | 2 | import cv2 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | import matplotlib.animation as animation 6 | 7 | from math import pi, atan2 8 | from PIL import Image, ImageDraw 9 | from face_recognition import face_landmarks 10 | 11 | dict_to_list = lambda d: [x for l in d.values() for x in l] 12 | 13 | 14 | def get_face_mask(img_array, landmarks): 15 | # Get the convex hull containing face landmarks 16 | hull = cv2.convexHull(np.array(landmarks)).squeeze() 17 | 18 | # Draw the contours on the mask (thickness -1 fills the inside of the contours) 19 | mask = np.zeros((img_array.shape[0], img_array.shape[1]), dtype=np.uint8) 20 | cv2.drawContours(mask, [hull], 0, color=(255, 255, 255), thickness=-1) 21 | 22 | # Apply the mask to the face to get the face only 23 | mask = mask.reshape(mask.shape + (1,)) 24 | face_mask = (mask > 0) * img_array 25 | 26 | return face_mask 27 | 28 | def get_face_tone(img_array, landmarks=None): 29 | if landmarks: 30 | img_array = get_face_mask(img_array, landmarks) 31 | return np.mean(img_array, axis=(0,1)).reshape(1,1,-1) 32 | 33 | def adjust_face_tone(img_array1, img_array2, landmarks1=None, landmarks2=None): 34 | face_tone1 = get_face_tone(img_array1, landmarks1) 35 | face_tone2 = get_face_tone(img_array2, landmarks2) 36 | img_array1 = img_array1 * face_tone2 / face_tone1 37 | img_array1 = img_array1.round().clip(0, 255).astype(np.uint8) 38 | 39 | return img_array1 40 | 41 | def delauney(points, img, draw=False): 42 | """ 43 | Finds Delauney's triangulation of the list of points 44 | in an img, and draw it if necessary. 45 | 46 | Args: 47 | points: A list of 2D points as tuples. 48 | img: The image of containing the points as a PIL or numpy. 49 | draw: Draw the triangulation on the img if True. 50 | """ 51 | 52 | # Init subdiv and insert points 53 | h, w, _ = np.array(img).shape 54 | xs, ys = zip(*points) 55 | rect_x = min(0, min(xs)) 56 | rect_y = min(0, min(ys)) 57 | rect_w = max(w, max(xs) - rect_x) + 1 58 | rect_h = max(h, max(ys) - rect_y) + 1 59 | 60 | rect = (rect_x, rect_y, rect_w, rect_h) 61 | subdiv = cv2.Subdiv2D(rect) 62 | subdiv.insert(points) 63 | 64 | triangles = [] 65 | for t in subdiv.getTriangleList(): 66 | # Get triangle point 67 | t = np.int32(t) 68 | p1 = (t[0], t[1]) 69 | p2 = (t[2], t[3]) 70 | p3 = (t[4], t[5]) 71 | t = tuple(sorted([p1, p2, p3])) 72 | # Sort the rest of the points in clockwise order XXX: overkill? 73 | # angle = lambda p: (-0.5*pi-atan2(p[1] - t[0][1], p[0] - t[0][0])) % (2*pi) 74 | # t = tuple([t[0]] + sorted(t[1:], key=angle)) 75 | triangles.append(t) 76 | 77 | return sorted(triangles) 78 | 79 | 80 | def warp_triangle(t1, t2, img1, img2, alpha=0.8): 81 | """ 82 | Warps triangle1 in img1 to triangle2 in img2 by a factor of alpha. 83 | 84 | Args: 85 | t1: A list of 3 points (or tuples) of the triangle in img1. 86 | t2: A list of 3 points (or tuples) of the triangle in img2. 87 | img1: Tthe source image as a numpy array. 88 | img2: The destination image as a numpy array. 89 | alpha: Interpolation factor of the warp. 90 | """ 91 | 92 | # Convert source and destination triangle to numpy 93 | t1 = np.float32(t1) 94 | t2 = np.float32(t2) 95 | 96 | # Get the bounding rectangles (patches) of the triangles 97 | x1, y1, w1, h1 = cv2.boundingRect(t1) 98 | x2, y2, w2, h2 = cv2.boundingRect(t2) 99 | 100 | # Sometimes, landmarks reside slightly outside the image... XXX: hacky. 101 | x1, y1, x2, y2 = max(0, x1), max(0, y1), max(0, x2), max(0, y2) 102 | 103 | # Offset triangles' coordinates by the bounding rect's coordinates 104 | t1[:, 0] = t1[:, 0] - x1 105 | t1[:, 1] = t1[:, 1] - y1 106 | t2[:, 0] = t2[:, 0] - x2 107 | t2[:, 1] = t2[:, 1] - y2 108 | 109 | # Get the rectangles from the images 110 | patch1 = img1[y1:y1+h1, x1:x1+w1].copy() 111 | patch2 = img2[y2:y2+h2, x2:x2+w2].copy() 112 | 113 | # If one of the patches have length 0 in any dimension, skip this warp 114 | if 0 in patch1.shape or 0 in patch2.shape: 115 | return 116 | 117 | # Get the affine transformation between the triangles in the new coordinates 118 | affine1to2 = cv2.getAffineTransform(t1, t2) 119 | 120 | # Affine-warp patch1 to patch2 121 | #print("t1:", t1) 122 | #print(x1, y1, w1, h1) 123 | #print("patch1.shape:", patch1.shape) 124 | patch1_warped = cv2.warpAffine(patch1, affine1to2, (w2,h2), 125 | borderMode=cv2.BORDER_REFLECT_101) 126 | 127 | # Crop out points outside image on the max side. XXX: hacky but ok. 128 | if patch1_warped.shape != patch2.shape: 129 | patch1_warped = patch1_warped[0:patch2.shape[0], 0:patch2.shape[1]] 130 | 131 | # Create a mask to get a t2-like triangle out of patch1_warped 132 | mask = cv2.fillConvexPoly(np.zeros_like(patch2), np.int32(t2), (1.0, 1.0, 1.0)) 133 | 134 | # Now interpolate the warped t1 in patch1_warped to t2 by a factor of alpha 135 | mask = alpha * np.float32(mask) 136 | warped_patch = mask * patch1_warped + (1 - mask) * patch2 137 | 138 | # Paste the warped patch to img2 in place of patch2 139 | img2[y2:y2+h2, x2:x2+w2] = warped_patch 140 | 141 | return t1, t2, patch1, patch2, patch1_warped 142 | 143 | 144 | # XXX 145 | def face_morph(img1, img2, landmarks1=None, landmarks2=None, alpha=0.95, adjust_tone=False): 146 | """ 147 | Morph face in img1 to face in img2 by a factor of alpha, given their landmarks. 148 | 149 | Args: 150 | img1: The source image as a PIL image or a numpy array. 151 | img2: The destination image as a PIL image or a numpy array. 152 | landmarks1: List of landmarks points from img1. 153 | landmarks2: List of landmarks points from img2. 154 | alpha: Factor of interpolation from face in img1 to face in img2. 155 | adjust_tone: adjust the morphed face to the skin tone of the target face. 156 | """ 157 | 158 | # Convert PIL images to np arrays 159 | img_array1, img_array2 = np.array(img1), np.array(img2) 160 | 161 | # Find landmarks if none were given 162 | if landmarks1 is None: landmarks1 = find_landmarks(img1) 163 | if landmarks2 is None: landmarks2 = find_landmarks(img2) 164 | 165 | # Adjust skin tone 166 | if adjust_tone: 167 | img_array1 = adjust_face_tone(img_array1, img_array2) 168 | 169 | # Create a map that links the vertices of the two landmarks together 170 | points_map = dict(zip(landmarks1, landmarks2)) 171 | 172 | # Warp each triangle in img1 to its corresponding one in img2 173 | for t1 in delauney(landmarks1, img1): 174 | t2 = tuple(points_map[p] for p in t1 if p in points_map) 175 | if len(t2) == 3: 176 | warp_triangle(t1, t2, img_array1, img_array2, alpha=alpha) 177 | 178 | return img_array2 179 | 180 | 181 | def face_morph_video(filename, img1, img2, landmarks1=None, landmarks2=None, adjust_tone=True, plt_saving=False): 182 | 183 | # Convert PIL images to np arrays 184 | img_array1, img_array2 = np.array(img1), np.array(img2) 185 | 186 | # Find landmarks if none were given 187 | if landmarks1 is None: landmarks1 = find_landmarks(img1) 188 | if landmarks2 is None: landmarks2 = find_landmarks(img2) 189 | 190 | # Adjust skin tone 191 | if adjust_tone: 192 | img_array1 = adjust_face_tone(img_array1, img_array2) 193 | 194 | # Create a map that links the vertices of the two landmarks together 195 | points_map = dict(zip(landmarks1, landmarks2)) 196 | 197 | # Find Delauney triangles of both landmarks 198 | triangles1 = delauney(landmarks1, img1) 199 | # triangles2 = delauney(landmarks2, img2) 200 | 201 | # Prepare figure and frames of morphed images 202 | fig = plt.figure() 203 | plt.axis('off') 204 | morphed_imgs = [] 205 | 206 | # For each morphing factor `alpha`... 207 | step = 0.05 208 | for alpha in np.arange(0, 1.0, step): 209 | 210 | # Start morphed img from the original img2 211 | morphed_img = img_array2.copy() 212 | 213 | # Warp each triangle in img1 to its corresponding one in img2 214 | for t1 in triangles1: 215 | t2 = tuple(points_map[p] for p in t1 if p in points_map) 216 | if len(t2) == 3: 217 | warp_triangle(t1, t2, img_array1, morphed_img, alpha=alpha) # morphed_img is in-place op 218 | 219 | # save frame of morphed image to animate later 220 | if plt_saving: # using matplotlib to save the morphing video 221 | morphed_imgs.append([plt.imshow(morphed_img, animated=True)]) 222 | else: # using cv2 instead 223 | morphed_imgs.append(morphed_img) 224 | 225 | # Make animation and play 226 | if plt_saving: 227 | ani = animation.ArtistAnimation(fig, morphed_imgs, interval=200, blit=True, repeat_delay=1000) 228 | ani.save(filename) 229 | else: 230 | out = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc(*'mp4v'), 2, (morphed_imgs[0].shape[1],morphed_imgs[0].shape[0])) 231 | for frame in morphed_imgs: 232 | out.write(frame[:,:,::-1]) # RGB to BGR 233 | out.release() 234 | 235 | 236 | def find_landmarks(img): 237 | landmarks_found = face_landmarks(np.array(img)) 238 | if len(landmarks_found) > 0: 239 | return dict_to_list(landmarks_found[0]) 240 | else: 241 | raise Exception("Failed to find face landmarks for one of the images.") 242 | 243 | ############################################# 244 | 245 | def test(test_num, reverse=False): 246 | 247 | # Just a test 248 | img1_name = f"img/{test_num}-before.jpeg" 249 | img2_name = f"img/{test_num}-after.jpeg" 250 | 251 | # Reverse morph direction (morph 2 -> 1 instead of 1 -> 2) 252 | if reverse: 253 | img1_name, img2_name = img2_name, img1_name 254 | 255 | # Load images 256 | img1 = Image.open(img1_name) 257 | img2 = Image.open(img2_name) 258 | 259 | # Extract landmarks 260 | landmarks1 = find_landmarks(img1) 261 | landmarks2 = find_landmarks(img2) 262 | 263 | # Morph face 264 | makeup_process = "apply" if reverse else "remove" 265 | filename = f"morphing_face_{test_num}_{makeup_process}.mp4" 266 | face_morph_video(filename, img1, img2, landmarks1, landmarks2) 267 | 268 | 269 | def main(): 270 | test("00000") 271 | test("00003") 272 | test("mixed1") 273 | test("mixed2") 274 | test("00000", reverse=True) 275 | test("00003", reverse=True) 276 | test("mixed1", reverse=True) 277 | test("mixed2", reverse=True) 278 | 279 | 280 | if __name__ == '__main__': 281 | main() 282 | 283 | -------------------------------------------------------------------------------- /src/trainers/base_trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import datetime 4 | import torch 5 | import torch.utils.tensorboard as tensorboard 6 | 7 | from pprint import pformat 8 | from collections import defaultdict 9 | from .utils.report_utils import plot_lines 10 | 11 | 12 | class BaseTrainer: 13 | """The base trainer class.""" 14 | 15 | def __init__(self, model, dataset, 16 | name="trainer", 17 | results_dir="results/", 18 | load_model_path=None, 19 | num_gpu=1, 20 | num_workers=0, 21 | batch_size=4, 22 | report_interval=10, 23 | save_interval=100000, 24 | use_tensorboard=False, # XXX: not implemented yet 25 | description="no description given", 26 | **kwargs): 27 | """ 28 | Initializes BaseTrainer. 29 | 30 | Args: 31 | model: The model or net. 32 | dataset: The dataset on which the model will be training. 33 | name: Name of this trainer. 34 | results_dir: Directory in which results will be saved for each run. 35 | load_model_path: Path to the model that will be loaded, if any. 36 | num_gpu: Number of GPUs to use for training. 37 | num_workers: Number of workers sampling from the dataset. 38 | batch_size: Size of the batch. Must be > num_gpu. 39 | report_interval: Report stats every `report_interval` iters. 40 | save_interval: Save model every `save_interval` iters. 41 | description: Description of the experiment the trainer is running. 42 | """ 43 | 44 | self.model = model 45 | self.dataset = dataset 46 | 47 | self.name = name 48 | self.results_dir = results_dir 49 | self.load_model_path = load_model_path 50 | 51 | self.num_gpu = num_gpu 52 | self.num_workers = num_workers 53 | self.batch_size = batch_size 54 | 55 | self.report_interval = report_interval 56 | self.save_interval = save_interval 57 | self.description = description 58 | self.save_results = False 59 | 60 | self.start_time = datetime.datetime.now() 61 | self.stop_time = datetime.datetime.now() 62 | self.iters = 1 # current iteration (i.e. # of batches processed so far) 63 | self.batch = 1 # current batch 64 | self.epoch = 1 # current epoch 65 | self.num_batches = 1 + len(self.dataset) // self.batch_size # num of batches per epoch 66 | self.num_epochs = 0 # number of epochs to run 67 | 68 | self._dataset_sampler = iter(()) # generates samples from the dataset 69 | self._data = defaultdict(list) # contains data of experiment 70 | 71 | self.writer = None 72 | self.use_tensorboard = use_tensorboard 73 | 74 | # Load model if necessary 75 | if load_model_path is not None: 76 | self.load_model(load_model_path) 77 | 78 | # Initialize device 79 | using_cuda = torch.cuda.is_available() and self.num_gpu > 0 80 | self.device = torch.device("cuda:0" if using_cuda else "cpu") 81 | 82 | # Move model to device and parallelize model if possible 83 | self.model = self.model.to(self.device) 84 | if self.device.type == "cuda" and self.num_gpu > 1: 85 | self.model = torch.nn.DistributedDataParallel(self.model, list(range(self.num_gpu))) 86 | 87 | 88 | def load_model(self, model_path): 89 | if not os.path.isfile(model_path): 90 | print(f"Couldn't load model: file '{model_path}' does not exist") 91 | print("Training model from scratch.") 92 | else: 93 | print("Loading model...") 94 | self.model.load_state_dict(torch.load(model_path)) 95 | 96 | 97 | def save_model(self, model_path): 98 | print("Saving model...") 99 | torch.save(self.model.state_dict(), model_path) 100 | 101 | 102 | def time_since_start(self): 103 | elapsed_time = datetime.datetime.now() - self.start_time 104 | return elapsed_time.total_seconds() 105 | 106 | 107 | def run(self, num_epochs, save_results=False): 108 | """ 109 | Runs the trainer. Trainer will train the model and then save it. 110 | Note that running trainer more than once will accumulate the results. 111 | 112 | Args: 113 | num_epochs: Number of epochs to run. 114 | save_results: A flag indicating whether we should save the results this run. 115 | """ 116 | self.start_time = datetime.datetime.now() 117 | self.num_epochs = num_epochs + self.epoch - 1 118 | self.save_results = save_results 119 | 120 | # Create experiment directory 121 | experiment_name = self.get_experiment_name() 122 | experiment_dir = os.path.join(self.results_dir, experiment_name) 123 | if self.save_results: 124 | if not os.path.isdir(self.results_dir): os.mkdir(self.results_dir) 125 | if not os.path.isdir(experiment_dir): os.mkdir(experiment_dir) 126 | 127 | with tensorboard.SummaryWriter(f"runs/{experiment_name}") as self.writer: 128 | # Try training the model, then stop the training when an exception is thrown 129 | try: 130 | self.train() 131 | finally: 132 | self.stop_time = datetime.datetime.now() 133 | self.stop() 134 | 135 | 136 | def train(self): 137 | """ 138 | Train model on dataset for `num_epochs` epochs. 139 | 140 | Args: 141 | num_epochs: Number of epochs to run. 142 | """ 143 | 144 | # Train until dataset sampler is exhausted (i.e. until it throws StopIteration) 145 | self.init_dataset_sampler() 146 | 147 | try: 148 | print(f"Starting training {self.name}...") 149 | while True: 150 | # One training step/iteration 151 | self.pre_train_step() 152 | self.train_step() 153 | self.post_train_step() 154 | self.iters += 1 155 | 156 | except StopIteration: 157 | print("Finished training.") 158 | 159 | 160 | def init_dataset_sampler(self): 161 | """ 162 | Initializes the sampler (or iterator) of the dataset. 163 | 164 | Args: 165 | num_epochs: Number of epochs. 166 | """ 167 | loader_config = { 168 | "batch_size": self.batch_size, 169 | "shuffle": True, 170 | "num_workers": self.num_workers, 171 | } 172 | self._dataset_sampler = iter(self.sample_loader(loader_config)) 173 | 174 | 175 | def sample_loader(self, loader_config): 176 | """ 177 | A generator that yields samples from the dataset, exhausting it `num_epochs` times. 178 | 179 | Args: 180 | num_epochs: Number of epochs. 181 | loader_config: Configuration for pytorch's data loader. 182 | """ 183 | 184 | for self.epoch in range(self.epoch, self.num_epochs + 1): 185 | data_loader = torch.utils.data.DataLoader(self.dataset, **loader_config) 186 | for self.batch, sample in enumerate(data_loader, 1): 187 | yield sample 188 | 189 | self.epoch += 1 190 | 191 | 192 | def sample_dataset(self): 193 | """ 194 | Samples the dataset. To be called by the client. 195 | 196 | Returns: 197 | A sample from the dataset. 198 | """ 199 | return next(self._dataset_sampler) 200 | 201 | 202 | def pre_train_step(self): 203 | """ 204 | The training preparation, or what happens before each training step. 205 | """ 206 | pass 207 | 208 | 209 | def train_step(self): 210 | """ 211 | Makes one training step. 212 | """ 213 | pass 214 | 215 | 216 | def post_train_step(self): 217 | """ 218 | The training checkpoint, or what happens after each training step. 219 | """ 220 | should_report_stats = self.iters % self.report_interval == 0 221 | should_save_progress = self.iters % self.save_interval == 0 222 | finished_epoch = self.batch == self.num_batches 223 | 224 | # Report training stats 225 | if should_report_stats or finished_epoch: 226 | self.report_stats() 227 | 228 | if self.save_results and should_save_progress: 229 | model_path = os.path.join(self.results_dir, 230 | self.get_experiment_name(), 231 | f"model@{self.iters}.pt") 232 | self.save_model(model_path) 233 | 234 | 235 | def stop(self): 236 | """ 237 | Stops the trainer, or what happens when the trainer stops. 238 | Note: This will run even on keyboard interrupts. 239 | """ 240 | 241 | # plot losses, if any 242 | plot_lines(self.get_data_containing("loss"), title="Losses") 243 | 244 | 245 | def get_experiment_name(self, delimiter=", "): 246 | """ 247 | Get the name of trainer's training train... 248 | 249 | Args: 250 | delimiter: The delimiter between experiment's parameters. Pretty useless. 251 | """ 252 | info = { 253 | "name": self.name, 254 | "batch_size": self.batch_size, 255 | } 256 | 257 | timestamp = self.start_time.strftime("%y%m%d-%H%M%S") 258 | experiment = delimiter.join(f"{k}={v}" for k,v in info.items()) 259 | 260 | return "[{}] {}".format(timestamp, experiment) 261 | 262 | 263 | def report_stats(self, precision=3): 264 | """ 265 | Default training stats report. 266 | Prints the current value of each data list recorded. 267 | """ 268 | 269 | # Progress of training 270 | progress = f"[{self.epoch}/{self.num_epochs}][{self.batch}/{self.num_batches}] " 271 | 272 | # Show the stat of an item 273 | item_stat = lambda item: f"{item[0]} = {item[1][-1]:.{precision}f}" 274 | # Join the stats separated by tabs 275 | stats = ", ".join(map(item_stat, self._data.items())) 276 | 277 | report = progress + stats 278 | 279 | print(report) 280 | 281 | 282 | def get_current_value(self, label): 283 | """ 284 | Get the current value of the quantity given by `label`. 285 | 286 | Args: 287 | label: Name/label of the data/quantity. 288 | 289 | Returns: 290 | The current value of the quantity given by `label`. 291 | """ 292 | return self._data[label][-1] if len(self._data[label]) > 0 else None 293 | 294 | 295 | def get_data_containing(self, phrase): 296 | """ 297 | Get the data lists that contain `phrase` in their names/labels. 298 | 299 | Args: 300 | phrase: A phrase to find in the label of the data, such as "loss". 301 | 302 | Returns: 303 | A dict containing the data lists that contain `phrase` in their labels. 304 | """ 305 | return {k: v for k, v in self._data.items() if k.find(phrase) != -1} 306 | 307 | 308 | def add_data(self, **kwargs): 309 | """ 310 | Adds/appends a value to the list given by `label`. 311 | 312 | Args: 313 | kwargs: Dict of values to be added to data lists corresponding to their labels. 314 | """ 315 | for key, value in kwargs.items(): 316 | self._data[key].append(value) 317 | 318 | 319 | def __repr__(self): 320 | 321 | self_dict = dict({k:v for k,v in self.__dict__.items() if k[0] != "_"}) 322 | pretty_dict = pformat(self_dict) 323 | 324 | return self.__class__.__name__ + "(**" + pretty_dict + ")" 325 | 326 | -------------------------------------------------------------------------------- /src/trainers/gan_trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import torch 4 | 5 | from .base_trainer import BaseTrainer 6 | from .utils.init_utils import init_optim 7 | from .utils.gan_utils import * 8 | from .utils.report_utils import * 9 | 10 | 11 | class GANTrainer(BaseTrainer): 12 | """A trainer for a GAN.""" 13 | 14 | def __init__(self, model, dataset, 15 | D_optim_config={}, 16 | G_optim_config={}, 17 | D_iters=5, 18 | clamp=0.01, 19 | gp_coeff=10.0, 20 | generate_grid_interval=200, 21 | **kwargs): 22 | """ 23 | Initializes GANTrainer. 24 | 25 | Note: 26 | Optimizer's configurations/parameters must be passable to the 27 | optimizer (in torch.optim). It should also include a parameter 28 | `optim_choice` for the choice of the optimizer (e.g. "sgd" or "adam"). 29 | 30 | Args: 31 | model: The model. 32 | dataset: The dataset. 33 | D_optim_config: Configurations for the discriminator's optimizer. 34 | G_optim_config: Configurations for the generator's optimizer. 35 | D_iters: Number of iterations to train discriminator every batch. 36 | clamp: Range on which the discriminator's weight will be clamped after each update. 37 | gp_coeff: A coefficient for the gradient penalty (gp) of the discriminator. 38 | generate_grid_interval: Check progress every `generate_grid_interval` batch. 39 | """ 40 | super().__init__(model, dataset, **kwargs) 41 | 42 | self.D_iters = D_iters 43 | self.clamp = clamp 44 | self.gp_coeff = gp_coeff 45 | self.generate_grid_interval = generate_grid_interval 46 | 47 | # Initialize optimizers for generator and discriminator 48 | self.D_optim = init_optim(self.model.D.parameters(), **D_optim_config) 49 | self.G_optim = init_optim(self.model.G.parameters(), **G_optim_config) 50 | 51 | # Initialize list of image grids generated from a fixed latent variable 52 | grid_size = 8 * 8 53 | self._fixed_latent = torch.randn([grid_size, self.model.num_latents], device=self.device) 54 | self._generated_grids = [] 55 | 56 | 57 | #################### Training Methods #################### 58 | 59 | def train_step(self): 60 | """ 61 | Makes one training step. 62 | Throughout this doc, we will denote a sample from the real data 63 | distribution, fake data distribution, and latent variables respectively 64 | as follows: 65 | x ~ real, x_g ~ fake, z ~ latent 66 | 67 | Now recall that in order to train a GAN, we try to find a solution to 68 | a min-max game of the form `min_G max_D V(G,D)`, where G is the generator, 69 | D is the discriminator, and V(G,D) is the score function. 70 | For a regular GAN, V(G,D) = log(D(x)) + log(1 - D(x_g)), 71 | which is the Jensen-Shannon (JS) divergence between the probability 72 | distributions P(x) and P(x_g), where P(x_g) is parameterized by G. 73 | 74 | When it comes to Wasserstein GAN (WGAN), the objective is to minimize 75 | the Wasserstein (or Earth-Mover) distance instead of the JS-divergence. 76 | See Theorem 3 and Algorithm 1 in the original paper for more details. 77 | We can achieve that (thanks to the Kantorovich-Rubinstein duality) 78 | by first maximizing `D(x) - D(x_g)` in the space of 1-Lipschitz 79 | discriminators D, where x ~ data and x_g ~ fake. 80 | Then, we have the gradient wrt G of the Wasserstein distance equal 81 | to the gradient of -D(G(z)). 82 | Since we assumed that D should be 1-Lipschitz, we can enforce 83 | k-Lipschitzness by clamping the weights of D to be in some fixed box, 84 | which would be approximate up to a scaling factor. 85 | 86 | Enforcing Lipschitzness is done more elegantly in WGAN-GP, 87 | which is just WGAN with gradient penalty (GP). The gradient penalty 88 | is used because of the statement that a differentiable function is 89 | 1-Lipschitz iff it has gradient norm equal to 1 almost everywhere 90 | under P(x) and P(x_g). Hence, the objective will be similar to WGAN, 91 | which is `min_G max_D of D(x) - D(x_g)`, but now we add the gradient 92 | penalty in the D_step such that it will be minimized. 93 | 94 | Links to the papers: 95 | GAN: https://arxiv.org/pdf/1406.2661.pdf 96 | WGAN: https://arxiv.org/pdf/1701.07875.pdf 97 | WGAN-GP: https://arxiv.org/pdf/1704.00028.pdf 98 | """ 99 | 100 | for _ in range(self.D_iters): 101 | # Sample real data from the dataset 102 | sample = self.sample_dataset() 103 | real = sample["before"].to(self.device) 104 | 105 | # Sample latent and train discriminator 106 | latent = self.sample_latent() 107 | D_results = self.D_step(real, latent) 108 | 109 | # Sample latent and train generator 110 | latent = self.sample_latent() 111 | G_results = self.G_step(latent) 112 | 113 | # Record data 114 | self.add_data(**D_results, **G_results) 115 | results = {**D_results, **G_results} 116 | losses = {k: v for k, v in results.items() if k.find("loss") != -1} 117 | D_evals = {k: v for k, v in results.items() if k.find("D_on") != -1} 118 | self.writer.add_scalars("losses", losses, self.iters) 119 | self.writer.add_scalars("D_evals", D_evals, self.iters) 120 | 121 | 122 | def D_step(self, real, latent): 123 | """ 124 | Makes a training step for the discriminator of the model. 125 | 126 | Args: 127 | real: Sample from the dataset. 128 | latent: Sample from the latent space. 129 | 130 | Returns: 131 | D loss and evaluation of D on real and on fake. 132 | """ 133 | 134 | D, G = self.model.D, self.model.G 135 | 136 | # Zero gradients 137 | self.D_optim.zero_grad() 138 | 139 | # Sample fake data from a latent (ignore gradients) 140 | with torch.no_grad(): 141 | fake = G(latent) 142 | 143 | # Classify real and fake data 144 | D_on_real = D(real) 145 | D_on_fake = D(fake) 146 | 147 | # Calculate loss and its gradients 148 | D_loss = get_D_loss(D, real, fake, gan_type=self.model.gan_type, gp_coeff=self.gp_coeff) 149 | D_loss.backward() 150 | 151 | # Calculate gradients and minimize loss 152 | self.D_optim.step() 153 | 154 | # If WGAN, clamp D's weights to ensure k-Lipschitzness 155 | if self.model.gan_type == "wgan": 156 | [p.data.clamp_(*clamp) for p in D.parameters()] 157 | 158 | return { 159 | "D_loss": D_loss.mean().item(), 160 | "D_on_real": D_on_real.mean().item(), 161 | "D_on_fake1": D_on_fake.mean().item() 162 | } 163 | 164 | 165 | def G_step(self, latent): 166 | """ 167 | Makes a training step for the generator of the model. 168 | 169 | Args: 170 | latent: Sample from the latent space. 171 | 172 | Returns: 173 | G loss and evaluation of D on fake. 174 | """ 175 | 176 | D, G = self.model.D, self.model.G 177 | 178 | # Zero gradients 179 | self.G_optim.zero_grad() 180 | 181 | # Sample fake data from latent 182 | fake = G(latent) 183 | 184 | # Classify fake data 185 | D_on_fake = D(fake) 186 | 187 | # Calculate loss and its gradients 188 | G_loss = get_G_loss(D, fake, gan_type=self.model.gan_type) 189 | G_loss.backward() 190 | 191 | # Optimize 192 | self.G_optim.step() 193 | 194 | # Record results 195 | return { 196 | "G_loss": G_loss.mean().item(), 197 | "D_on_fake2": D_on_fake.mean().item(), 198 | } 199 | 200 | 201 | def sample_latent(self): 202 | """ 203 | Samples from the latent space (i.e. input space of the generator). 204 | 205 | Returns: 206 | Sample from the latent space. 207 | """ 208 | 209 | # Calculate latent size and sample from normal distribution 210 | latent_size = [self.batch_size, self.model.num_latents] 211 | latent = torch.randn(latent_size, device=self.device) 212 | 213 | return latent 214 | 215 | 216 | #################### Reporting and Tracking Methods #################### 217 | 218 | 219 | def stop(self): 220 | """ 221 | Stops the trainer and report the result of the experiment. 222 | """ 223 | 224 | losses = self.get_data_containing("loss") 225 | evals = self.get_data_containing("D_on") 226 | 227 | if not self.save_results: 228 | plot_lines(losses, title="Losses") 229 | plot_lines(evals, title="Evals") 230 | return 231 | 232 | # Create experiment directory in the model's directory 233 | experiment_dir = os.path.join(self.results_dir, self.get_experiment_name()) 234 | 235 | # Save model 236 | model_path = os.path.join(experiment_dir, "model.pt") 237 | self.save_model(model_path) 238 | 239 | # Plot losses of D and G 240 | losses_file = os.path.join(experiment_dir, "losses.png") 241 | plot_lines(losses, filename=losses_file, title="Losses of D and G") 242 | 243 | # Plot evals of D on real and fake data 244 | evals_file = os.path.join(experiment_dir, "evals.png") 245 | plot_lines(evals, filename=evals_file, title="Evaluations of D on real and fake data") 246 | 247 | # Create an animation of the generator's progress 248 | animation_file = os.path.join(experiment_dir, "progress.mp4") 249 | create_progress_animation(self._generated_grids, animation_file) 250 | 251 | # Write details of experiment 252 | details_txt = os.path.join(experiment_dir, "repr.txt") 253 | with open(details_txt, "w") as f: 254 | f.write(self.__repr__()) 255 | 256 | 257 | def post_train_step(self): 258 | """ 259 | The post-training step. 260 | """ 261 | super().post_train_step() 262 | 263 | # Check generator's progress by recording its output on a fixed input 264 | if should_generate_grid: 265 | grid = generate_grid(self.model.G, self._fixed_latent) 266 | self._generated_grids.append(grid) 267 | self.writer.add_image("grid", grid, self.iters) 268 | 269 | 270 | def report_stats(self, precision=3): 271 | """ 272 | Reports/prints the training stats to the console. 273 | 274 | Args: 275 | precision: Precision of the float numbers reported. 276 | """ 277 | 278 | report = \ 279 | "[{epoch}/{num_epochs}][{batch}/{num_batches}]\t" \ 280 | "Loss of D = {D_loss:.{p}f}\t" \ 281 | "Loss of G = {G_loss:.{p}f}\t" \ 282 | "D(x) = {D_on_real:.{p}f}\t" \ 283 | "D(G(z)) = {D_on_fake1:.{p}f} / {D_on_fake2:.{p}f}" 284 | 285 | stats = { 286 | "epoch": self.epoch, 287 | "num_epochs": self.num_epochs, 288 | "batch": self.batch, 289 | "num_batches": self.num_batches, 290 | "D_loss": self.get_current_value("D_loss"), 291 | "G_loss": self.get_current_value("G_loss"), 292 | "D_on_real": self.get_current_value("D_on_real"), 293 | "D_on_fake1": self.get_current_value("D_on_fake1"), 294 | "D_on_fake2": self.get_current_value("D_on_fake2"), 295 | "p": precision, 296 | } 297 | 298 | print(report.format(**stats)) 299 | 300 | -------------------------------------------------------------------------------- /src/train.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import yaml 4 | import argparse 5 | import random 6 | import numpy as np 7 | import torch 8 | import torchvision.transforms as transforms 9 | 10 | from dataset.dataset import MakeupDataset, MakeupDataset2 11 | from dataset.transforms import MakeupSampleTransform 12 | 13 | from models.cyclegan import MaskCycleGAN 14 | from models.pairedcyclegan import PairedCycleGAN 15 | 16 | from trainers.cyclegan_trainer import CycleGANTrainer 17 | from trainers.pairedcyclegan_trainer import PairedCycleGANTrainer 18 | from trainers.utils.init_utils import create_weights_init 19 | 20 | 21 | FILE_DIR = os.path.dirname(os.path.realpath(__file__)) 22 | DATASET_DIR = os.path.join(FILE_DIR, "dataset", "data", "processing", "faces") 23 | 24 | 25 | def parse_args(): 26 | """ 27 | Parse training args. 28 | """ 29 | 30 | # Adding positive and non-negative types for arguments type check 31 | def positive(type): 32 | def positive_number(value): 33 | typed_value = type(value) 34 | if not (typed_value > 0): 35 | raise argparse.ArgumentTypeError(f"{value} is not a positive {type.__name__}.") 36 | return typed_value 37 | return positive_number 38 | 39 | def nonnegative(type): 40 | def nonnegative_number(value): 41 | typed_value = type(value) 42 | if not (typed_value >= 0): 43 | raise argparse.ArgumentTypeError(f"{value} is not a non-negative {type.__name__}.") 44 | return typed_value 45 | return nonnegative_number 46 | 47 | 48 | # Initialize parser and add arguments 49 | parser = argparse.ArgumentParser(description="Train a model on a dataset using a trainer.") 50 | 51 | parser.add_argument("-c", "--config", type=str, 52 | help="The key of the configurations of dataset, model, and trainer as defined in 'config.yaml'. " 53 | "This will override all given args for dataset, model, and trainer.") 54 | 55 | parser.add_argument("-r", "--random-seed", type=int, default=123, 56 | help="random seed.") 57 | 58 | ### Dataset Args ### 59 | parser.add_argument("--dataset-dir", type=str, default=DATASET_DIR, 60 | help="directory of the makeup dataset.") 61 | 62 | ### Model Args ### 63 | parser.add_argument("--num-latents", type=positive(int), default=128, 64 | help="number of latent factors from which an image will be generated.") 65 | parser.add_argument("--num-features", type=positive(int), default=64, 66 | help="number of features on the layers of the discriminator (and the generator as well).") 67 | parser.add_argument("--image-channels", type=positive(int), default=3, 68 | help="number of image channels in the dataset.") 69 | parser.add_argument("--image-size", type=positive(int), default=64, 70 | help="resize images to be of dimensions (image_size x image_size).") 71 | parser.add_argument("--gan-type", type=str.lower, default="gan", 72 | choices=("gan", "wgan", "wgan-gp"), 73 | help="type of gan among GAN (default), WGAN (Wasserstein GAN), and WGAN-GP (WGAN with gradient penalty).") 74 | 75 | ### Trainer Args ### 76 | parser.add_argument("--results-dir", type=str, default="results/", 77 | help="directory where the results for each run will be saved.") 78 | parser.add_argument("--pretrained-model-path", type=str, 79 | help="the path of the pretrained part (e.g. makeup remover) of the model.") 80 | parser.add_argument("--model-path", type=str, 81 | help="the path of the model to be loaded (e.g. MakeupGAN).") 82 | 83 | parser.add_argument("--num-gpu", type=nonnegative(int), default=0, 84 | help="number of GPUs to use, if any.") 85 | parser.add_argument("--num-workers", type=nonnegative(int), default=0, 86 | help="number of workers that will be loading the dataset.") 87 | parser.add_argument("--batch-size", type=positive(int), default=4, 88 | help="size of the batch sample from the dataset.") 89 | 90 | parser.add_argument("--D-optimizer", type=str.lower, default="sgd", 91 | help="the name of the optimizer used for training (SGD, Adam, RMSProp)", 92 | choices=("sgd", "adam", "rmsprop"),) 93 | parser.add_argument("--D-lr", type=float, default=1.0e-4, 94 | help="the learning rate, which controls the size of the optimization update.") 95 | parser.add_argument("--D-momentum", type=positive(float), default=0.0, 96 | help="used in SGD and RMSProp optimizers.") 97 | parser.add_argument("--D-betas", type=float, nargs=2, default=(0.9, 0.999), 98 | help="used in Adam optimizer (see torch.optim.Adam for details).") 99 | 100 | parser.add_argument("--G-optimizer", type=str.lower, default="sgd", 101 | help="the name of the optimizer used for training (SGD, Adam, RMSProp)", 102 | choices=("sgd", "adam", "rmsprop"),) 103 | parser.add_argument("--G-lr", type=float, default=1.0e-4, 104 | help="the learning rate, which controls the size of the optimization update.") 105 | parser.add_argument("--G-momentum", type=positive(float), default=0.0, 106 | help="used in SGD and RMSProp optimizers.") 107 | parser.add_argument("--G-betas", type=float, nargs=2, default=(0.9, 0.999), 108 | help="used in Adam optimizer (see torch.optim.Adam for details).") 109 | 110 | parser.add_argument("--D-iters", type=positive(int), default=5, 111 | help="number of iterations to train discriminator every batch.") 112 | parser.add_argument("--clamp", type=float, nargs=2, default=(-0.01, 0.01), 113 | help="used in WGAN for clamping the weights of the discriminator.") 114 | parser.add_argument("--gp-coeff", type=float, default=10.0, 115 | help="a coefficient to multiply with the gradient penalty in the loss of WGAN-GP.") 116 | 117 | parser.add_argument("--report-interval", type=positive(int), default=50, 118 | help="the interval in which a report of the training stats will be shown to the console.") 119 | parser.add_argument("--save-interval", type=positive(int), default=10000, 120 | help="the interval in which the model will be saved.") 121 | parser.add_argument("--generate-grid-interval", type=positive(int), default=200, 122 | help="the interval in which the progress of the generator will be checked and recorded.") 123 | 124 | ### Trainer.run() ### 125 | parser.add_argument("-p", "--pretrain-epochs", type=nonnegative(int), default=0, 126 | help="number of training epochs (i.e. full runs on the dataset).") 127 | parser.add_argument("-n", "--num-epochs", type=nonnegative(int), default=5, 128 | help="number of training epochs (i.e. full runs on the dataset).") 129 | parser.add_argument("-s", "--save-results", action="store_true", 130 | help="save the results of the experiment.") 131 | 132 | # Parse arguments 133 | args = parser.parse_args() 134 | 135 | return args 136 | 137 | 138 | def load_config(config_key, config_file="config.yaml"): 139 | """ 140 | Load a configuration given by config_key from config_file. 141 | 142 | Args: 143 | config_key: Name/label/key of the configuration. 144 | config_file: Name of the config (yaml) file. Should be in current dir. 145 | 146 | Returns: 147 | The configurations as a dict. 148 | 149 | Throws: 150 | KeyError: if config_key is not found in the root level of the config. 151 | """ 152 | with open(config_file) as f: 153 | all_configs = yaml.full_load(f) 154 | try: 155 | return all_configs[config_key] 156 | except KeyError: 157 | exit(f"Config key '{config_key}' is not defined in config file '{config_file}'.") 158 | 159 | 160 | def get_dataset_args(args): 161 | """ 162 | Construct dataset's parameters from args. 163 | 164 | Args: 165 | args: Parsed arguments from command line. 166 | """ 167 | dataset_args = { 168 | "dataset_dir": args.dataset_dir, 169 | } 170 | 171 | return dataset_args 172 | 173 | 174 | def get_model_args(args): 175 | """ 176 | Construct model's parameters from args. 177 | 178 | Args: 179 | args: Parsed arguments from command line. 180 | """ 181 | model_args = { 182 | "num_latents": args.num_latents, 183 | "num_features": args.num_features, 184 | "image_channels": args.image_channels, 185 | "image_size": args.image_size, 186 | "gan_type": args.gan_type, 187 | } 188 | 189 | return model_args 190 | 191 | 192 | def get_trainer_args(args): 193 | """ 194 | Construct trainer's parameters from args. 195 | 196 | Args: 197 | args: Parsed arguments from command line. 198 | """ 199 | trainer_args = { 200 | "results_dir": args.results_dir, 201 | "num_gpu": args.num_gpu, 202 | "num_workers": args.num_workers, 203 | "batch_size": args.batch_size, 204 | 205 | "D_optim_config": { 206 | "optim_choice": args.D_optimizer, 207 | "lr": args.D_lr, 208 | "momentum": args.D_momentum, 209 | "betas": args.D_betas, 210 | }, 211 | "G_optim_config": { 212 | "optim_choice": args.G_optimizer, 213 | "lr": args.G_lr, 214 | "momentum": args.G_momentum, 215 | "betas": args.G_betas, 216 | }, 217 | 218 | "D_iters": args.D_iters, 219 | "clamp": args.clamp, 220 | "gp_coeff": args.gp_coeff, 221 | "report_interval": args.report_interval, 222 | "save_interval": args.save_interval, 223 | "generate_grid_interval": args.generate_grid_interval, 224 | } 225 | 226 | return trainer_args 227 | 228 | 229 | def set_random_seed(seed): 230 | """ 231 | Sets all random seeds to `seed`. 232 | 233 | Args: 234 | seed: Initial random seed. 235 | """ 236 | random.seed(seed) 237 | np.random.seed(seed) 238 | torch.manual_seed(seed) 239 | torch.cuda.manual_seed(seed) 240 | torch.cuda.manual_seed_all(seed) 241 | torch.backends.cudnn.benchmark = False 242 | torch.backends.cudnn.deterministic = True 243 | 244 | 245 | def get_training_args(args): 246 | """ 247 | Return structured args for dataset, model, and trainer from parsed args. 248 | """ 249 | 250 | if args.config is not None: 251 | config = load_config(args.config) 252 | dataset_args = config["dataset"] 253 | model_args = config["model"] 254 | trainer_args = config["trainer"] 255 | else: 256 | dataset_args = get_dataset_args(args) 257 | model_args = get_model_args(args) 258 | trainer_args = get_trainer_args(args) 259 | 260 | return dataset_args, model_args, trainer_args 261 | 262 | 263 | def make_transform(image_size): 264 | """ 265 | Make data transform and return it. 266 | """ 267 | transform_sequence = [ 268 | transforms.Resize((image_size, image_size)), 269 | transforms.RandomAffine(degrees=(-3, 3)), 270 | transforms.RandomHorizontalFlip(), 271 | transforms.ToTensor(), 272 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 273 | ] 274 | transform_sequence = list(map(MakeupSampleTransform, transform_sequence)) 275 | transform = transforms.Compose(transform_sequence) 276 | 277 | return transform 278 | 279 | 280 | def main(args): 281 | """ 282 | Trains the MakeupNet on MakeupDataset using MakeupNetTrainer. 283 | 284 | Args: 285 | args: The arguments passed from the command prompt (see below for more info). 286 | """ 287 | 288 | set_random_seed(args.random_seed) 289 | 290 | # Initialize args for dataset, model, and trainer 291 | dataset_args, model_args, trainer_args = get_training_args(args) 292 | 293 | # Define data transformation and weights initializer 294 | transform = make_transform(model_args["image_size"]) 295 | weights_init = create_weights_init() 296 | 297 | # Train makeup remover using CycleGAN 298 | makeup_gan_dataset = MakeupDataset2(**dataset_args, transform=transform) 299 | makeup_gan = MaskCycleGAN(**model_args) 300 | subtrainer = CycleGANTrainer(makeup_gan, makeup_gan_dataset, 301 | load_model_path=args.pretrained_model_path, 302 | name="makeup_gan", **trainer_args) 303 | subtrainer.run(num_epochs=args.pretrain_epochs, save_results=args.save_results) 304 | 305 | # Train PairedCycleGAN, and assign to it the pre-trained makeup remover 306 | makeup_pcgan_dataset = MakeupDataset2(**dataset_args, transform=transform, with_landmarks=True) 307 | makeup_pcgan = PairedCycleGAN(**model_args, custom_remover=makeup_gan.remover) 308 | trainer = PairedCycleGANTrainer(makeup_pcgan, makeup_pcgan_dataset, 309 | load_model_path=args.model_path, 310 | name="makeup_pcgan", **trainer_args) 311 | trainer.run(num_epochs=args.num_epochs, save_results=args.save_results) 312 | 313 | 314 | if __name__ == "__main__": 315 | args = parse_args() 316 | main(args) 317 | 318 | -------------------------------------------------------------------------------- /src/dataset/search/searcher.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import sys 4 | import time 5 | import argparse 6 | import json 7 | import requests 8 | 9 | # The search engines we are using 10 | SEARCH_ENGINES = ("bing", "google") 11 | 12 | # The endpoints for the API of the search engines 13 | BING_API_ENDPOINT = "https://api.cognitive.microsoft.com/bing/v7.0/images/search" 14 | GOOGLE_API_ENDPOINT = "https://www.googleapis.com/customsearch/v1" 15 | 16 | # The queries of the image search 17 | QUERIES = [ 18 | "makeup before after", 19 | "makeup before after instagram", 20 | "before and after makeup faces", 21 | "makeup transformation faces", 22 | ] 23 | 24 | # Search results limit (this limit is soft/approximate) 25 | MAX_RESULTS = 1e6 26 | 27 | # Get absolute path of this file and force relative-to-file paths 28 | FILE_DIR = os.path.dirname(os.path.realpath(__file__)) 29 | # The checkpoint file recording the search progress 30 | CHECKPOINT = os.path.join(FILE_DIR, "searcher.json") 31 | # The file where image_urls will be exported to 32 | IMAGE_URLS = os.path.join(FILE_DIR, "image_urls.csv") 33 | 34 | 35 | def init_api(search_engines): 36 | """ 37 | Initializes the necessary environment variables for the API requests. 38 | """ 39 | if "bing" in search_engines and "BING_API_KEY" not in os.environ: 40 | os.environ["BING_API_KEY"] = input("Please enter your Bing API Key: ") 41 | if "google" in search_engines and "GOOGLE_API_KEY" not in os.environ: 42 | os.environ["GOOGLE_API_KEY"] = input("Please enter your Google API Key: ") 43 | if "google" in search_engines and "GOOGLE_CX" not in os.environ: 44 | os.environ["GOOGLE_CX"] = input("Please enter your Google Custom Search CX: ") 45 | 46 | 47 | def api_search(endpoint, headers, params): 48 | """ 49 | Performs an API request to a search API (either Bing or Google in our case). 50 | 51 | Args: 52 | search_engine: The name of the search engine to use. 53 | headers: The headers of the API request. 54 | params: The parameters of the API request. 55 | ignore_error: Ignore request error and continue without prompting the user. 56 | 57 | Returns: 58 | A tuple containing the response, as a dictionary, and its status code. 59 | """ 60 | 61 | time.sleep(0.5) # this ensures at most 2 requests per second 62 | result = None 63 | status_code = -1 64 | 65 | try: 66 | response = requests.get(endpoint, headers=headers, params=params) 67 | status_code = response.status_code 68 | response.raise_for_status() 69 | result = response.json() 70 | except requests.exceptions.HTTPError as e: 71 | print("Bad request! ({})".format(status_code)) 72 | print(e) 73 | 74 | return result, status_code 75 | 76 | 77 | ########################### 78 | class DataSearcher: 79 | def __init__(self, queries=[], checkpoint="checkpoint", load_from_checkpoint=True): 80 | 81 | # Sanity checks 82 | for query in queries: assert isinstance(query, str) 83 | assert isinstance(checkpoint, str) and checkpoint != "" 84 | 85 | self.checkpoint = checkpoint 86 | self.queries = queries # the search queries for building the dataset 87 | self.query_index = 0 # the index of the current search query 88 | self.image_urls = [] # the url of the contents (images) 89 | self.reset_search_indices() # reset api-specific search index values 90 | 91 | # Load from checkpoint, if any 92 | if load_from_checkpoint: 93 | self.load() 94 | 95 | def reset_search_indices(self): 96 | """ 97 | Reset the indices that describe the search progress of the current query. 98 | """ 99 | self.bing_offset = 0 # offset value of the image search (for Bing) 100 | self.google_start = 1 # offset value of the image search (for Google) 101 | 102 | 103 | ########################### 104 | def search(self, search_engines=["bing"]): 105 | """ 106 | Search for images from all the queries using Bing's and Google's API. 107 | """ 108 | 109 | # Check if given search engines are string and make them lowercase 110 | for s in search_engines: assert isinstance(s, str) 111 | search_engines = [s.lower() for s in search_engines] 112 | 113 | # Initialize API if not done yet 114 | init_api(search_engines) 115 | 116 | # Try to search for images using the given search_engines 117 | try: 118 | start_time = time.time() # track time 119 | while self.query_index < len(self.queries): 120 | # Get current query 121 | query = self.queries[self.query_index] 122 | 123 | # Search for images 124 | if "bing" in search_engines: self.search_bing(query) 125 | if "google" in search_engines: self.search_google(query) 126 | 127 | # Finished search for this query 128 | self.query_index += 1 129 | self.reset_search_indices() 130 | 131 | print() 132 | print("Total image urls found = {}.".format(len(self.image_urls))) 133 | print("Time elapsed = {:.3f} seconds.".format(time.time() - start_time)) 134 | 135 | # Save final results, and export image urls 136 | self.save() 137 | 138 | except (KeyboardInterrupt, SystemExit): 139 | print("Interrupted.") 140 | self.save() 141 | 142 | except Exception as e: 143 | # Interrupt all exceptions and keyboard interrupt to save progress 144 | print("Error!") 145 | self.save() 146 | print("\nRaising error:") 147 | raise e 148 | 149 | 150 | def search_bing(self, query): 151 | """ 152 | Searches for `query` using Bing image search API. 153 | 154 | Args: 155 | query: query: The query of the search. 156 | """ 157 | 158 | old_image_urls = set(self.image_urls) # to avoid duplicates 159 | totalEstimatedMatches = 1e6 # to ensure that offset is smaller first 160 | 161 | # Define headers and default params of bing image search api 162 | headers = {"Ocp-Apim-Subscription-Key": os.environ["BING_API_KEY"]} 163 | params = { 164 | "q": query, 165 | "offset": self.bing_offset, 166 | "imageType": "photo", 167 | } # "size": "Medium", "imageContent": "Face" or "Portrait", 168 | 169 | # Continue the search until all results are exhausted 170 | print("\nStarting Bing image search for query '%s'." % query) 171 | while self.bing_offset < min(totalEstimatedMatches, MAX_RESULTS): 172 | 173 | # Search for images starting from the specified offset 174 | print("Searching from offset %d ... " % self.bing_offset, end="") 175 | params["offset"] = self.bing_offset 176 | result, status_code = api_search(BING_API_ENDPOINT, headers, params) 177 | 178 | # Checking result of api search 179 | if result is None or "value" not in result: 180 | print("Trying again...") 181 | continue 182 | print("Done.") 183 | 184 | # Search for image urls and filter out the already saved urls 185 | new_image_urls = [image["contentUrl"] for image in result["value"] 186 | if image["contentUrl"] not in old_image_urls] 187 | self.image_urls += new_image_urls 188 | print(" Retrieved {} new image urls.".format(len(new_image_urls))) 189 | 190 | # Update offset and estimated matches 191 | if "totalEstimatedMatches" in result: 192 | totalEstimatedMatches = result["totalEstimatedMatches"] 193 | if "nextOffset" in result: 194 | self.bing_offset = result["nextOffset"] 195 | else: 196 | self.bing_offset += len(result["value"]) 197 | 198 | print("Bing image search for query '{}' done.".format(query)) 199 | print("Retrieved {} new image urls in total.".format( 200 | len(self.image_urls) - len(old_image_urls))) 201 | 202 | 203 | def search_google(self, query): 204 | """ 205 | Searches for `query` using Google custom search API. 206 | 207 | Args: 208 | query: The query of the search. 209 | """ 210 | 211 | old_image_urls = set(self.image_urls) # to avoid duplicates 212 | 213 | # Define headers and default params of google custom search api 214 | params = { 215 | "key": os.environ["GOOGLE_API_KEY"], 216 | "q": query, 217 | "cx": os.environ["GOOGLE_CX"], 218 | "searchType": "image", 219 | "start": 1, 220 | "num": 10, 221 | } 222 | 223 | # Continue the search until all results are exhausted 224 | print("\nStarting Google image search for query: '%s'." % query) 225 | while self.google_start < min(100, MAX_RESULTS): 226 | 227 | # Search for images starting from start index 228 | print("Searching from start index %d ... " % self.google_start, end="") 229 | params["start"] = self.google_start 230 | result, status_code = api_search("google", {}, params) 231 | 232 | # Check results of api search 233 | if result is None: 234 | print("Trying again...") 235 | continue 236 | print("Done") 237 | 238 | # Search for image urls and filter out the already saved urls 239 | new_image_urls = [image["link"] for image in result["items"] 240 | if image["link"] not in old_image_urls] 241 | self.image_urls += new_image_urls 242 | print(" Retrieved {} new image urls.".format(len(new_image_urls))) 243 | 244 | # Update start index 245 | self.google_start += params["num"] 246 | 247 | print("Google image search done. ") 248 | print("Retrieved {} new image urls in total.".format( 249 | len(self.image_urls) - len(old_image_urls))) 250 | 251 | 252 | ########################### 253 | def load(self, checkpoint=None): 254 | """ 255 | Loads the searcher from a json checkpoint file. 256 | 257 | Args: 258 | checkpoint: The name of the checkpoint file. 259 | """ 260 | 261 | if checkpoint is None: checkpoint = self.checkpoint 262 | 263 | print("[*] Loading search progress from '{}'... ".format(checkpoint), end="") 264 | if os.path.isfile(checkpoint): 265 | with open(checkpoint, "r") as f: 266 | dataset_metadata = json.load(f) 267 | print("Loaded.") 268 | self.from_json(dataset_metadata) 269 | else: 270 | print("Couldn't find file.") 271 | if "n" == input("Type anything to start a new search or 'n' to exit: "): 272 | sys.exit() 273 | 274 | 275 | def save(self, checkpoint=None): 276 | """ 277 | Saves the searcher to a json checkpoint file. 278 | 279 | Args: 280 | checkpoint: The name of the checkpoint file. 281 | """ 282 | 283 | if checkpoint is None: checkpoint = self.checkpoint 284 | 285 | print("[*] Saving search progress to '{}'... ".format(checkpoint), end="") 286 | with open(checkpoint, "w") as f: 287 | search_json = self.to_json() 288 | json.dump(search_json, f) 289 | print("Saved.") 290 | 291 | 292 | def from_json(self, search_json): 293 | """ 294 | Copy the data from `search_json` to `self`. 295 | 296 | Args: 297 | search_json: A dict holding the progress data of the given searcher. 298 | """ 299 | 300 | self.query_index = search_json["query_index"] 301 | self.queries = search_json["queries"] 302 | self.bing_offset = search_json["bing_offset"] 303 | self.google_start = search_json["google_start"] 304 | self.image_urls = search_json["image_urls"] 305 | 306 | 307 | def to_json(self): 308 | """ 309 | Copy the data from `self` to `search_json`. 310 | 311 | Returns: 312 | A dict holding the progress data of `self`. 313 | """ 314 | 315 | search_json = {} 316 | search_json["query_index"] = self.query_index 317 | search_json["queries"] = self.queries 318 | search_json["bing_offset"] = self.bing_offset 319 | search_json["google_start"] = self.google_start 320 | search_json["image_urls"] = self.image_urls 321 | 322 | return search_json 323 | 324 | 325 | def export_image_urls(self, fname=IMAGE_URLS): 326 | """ 327 | Creates a simple file of image urls, one url per line 328 | 329 | Args: 330 | fname: The name of the file where the urls will be written. 331 | """ 332 | 333 | with open(fname, "w") as f: 334 | f.writelines(image_url + "\n" for image_url in self.image_urls) 335 | 336 | 337 | ########################### 338 | def main(args): 339 | 340 | searcher_params = { 341 | "queries": args.queries, 342 | "checkpoint": args.checkpoint, 343 | } 344 | 345 | searcher = DataSearcher(**searcher_params) 346 | searcher.search(args.search_engines) 347 | searcher.export_image_urls(args.out) 348 | 349 | 350 | if __name__ == '__main__': 351 | 352 | parser = argparse.ArgumentParser(description="Search images using Bing and Google.") 353 | 354 | parser.add_argument("--search_engines", nargs="+", type=str, default=SEARCH_ENGINES, 355 | help="the search engines to be used.", 356 | choices=SEARCH_ENGINES) 357 | parser.add_argument("-q", "--queries", nargs="+", type=str, default=QUERIES, 358 | help="list of queries to be searched.") 359 | parser.add_argument("--checkpoint", type=str, default=CHECKPOINT, 360 | help="name of checkpoint file.") 361 | parser.add_argument("-o", "--out", type=str, default=IMAGE_URLS, 362 | help="the output file where the urls of the images will be saved.") 363 | 364 | args = parser.parse_args() 365 | 366 | main(args) 367 | 368 | -------------------------------------------------------------------------------- /src/trainers/cyclegan_trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import random 4 | import torch 5 | import torch.nn.functional as F 6 | 7 | from .base_trainer import BaseTrainer 8 | from .utils.init_utils import init_optim 9 | from .utils.gan_utils import * 10 | from .utils.report_utils import * 11 | 12 | 13 | class CycleGANTrainer(BaseTrainer): 14 | """The trainer for CycleGAN.""" 15 | 16 | def __init__(self, model, dataset, 17 | D_optim_config={}, 18 | G_optim_config={}, 19 | D_iters=5, 20 | clamp=(-0.01, 0.01), 21 | before_noise_std=0.01, 22 | after_noise_std=0.01, 23 | generate_grid_interval=200, 24 | constants={}, 25 | **kwargs): 26 | """ 27 | Constructor. 28 | 29 | Args: 30 | model: The model. 31 | dataset: The dataset. 32 | """ 33 | super().__init__(model, dataset, **kwargs) 34 | 35 | self.D_iters = D_iters 36 | self.clamp = clamp 37 | self.before_noise_std = before_noise_std 38 | self.after_noise_std = after_noise_std 39 | self.generate_grid_interval = generate_grid_interval 40 | 41 | # Initialize optimizers for generator and discriminator 42 | self.optims = { 43 | "applier": { 44 | "D": init_optim(self.model.applier.D.parameters(), **D_optim_config), 45 | "G": init_optim(self.model.applier.G.parameters(), **G_optim_config), 46 | }, 47 | "remover": { 48 | "D": init_optim(self.model.remover.D.parameters(), **D_optim_config), 49 | "G": init_optim(self.model.remover.G.parameters(), **G_optim_config), 50 | }, 51 | } 52 | 53 | # TODO: Specify loss type instead (minimax or wasserstein) 54 | self.D_loss_fn = get_D_loss(self.model.gan_type) 55 | self.G_loss_fn = get_G_loss(self.model.gan_type) 56 | 57 | # Initialize all constants required for training 58 | self.constants = self._get_constants(**constants) 59 | 60 | # Generate makeup for a sample no-makeup faces and reference makeup faces 61 | num_test = 20 62 | self._applier_generated_grids = [] 63 | random_indices = random.sample(range(len(self.dataset)), num_test) 64 | self._fixed_before = torch.stack( 65 | [self.dataset[i]["before"] for i in random_indices], dim=0).to(self.device) 66 | 67 | self._remover_generated_grids = [] 68 | random_indices = random.sample(range(len(self.dataset)), num_test) 69 | self._fixed_after = torch.stack( 70 | [self.dataset[i]["after"] for i in random_indices], dim=0).to(self.device) 71 | 72 | 73 | def _get_constants(self, 74 | applier_adversarial=0.1, 75 | remover_adversarial=0.1, 76 | applier_D_grad_penalty=0., 77 | remover_D_grad_penalty=0., 78 | after_identity_robustness=0.0, 79 | before_identity_robustness=0.1, 80 | applier_mask_sparsity=0.1, 81 | remover_mask_sparsity=0.0, 82 | **kwargs): 83 | return { 84 | "applier_adversarial": applier_adversarial, 85 | "remover_adversarial": remover_adversarial, 86 | "applier_D_grad_penalty": applier_D_grad_penalty, 87 | "remover_D_grad_penalty": remover_D_grad_penalty, 88 | "after_identity_robustness": after_identity_robustness, 89 | "before_identity_robustness": before_identity_robustness, 90 | "applier_mask_sparsity": applier_mask_sparsity, 91 | "remover_mask_sparsity": remover_mask_sparsity, 92 | } 93 | 94 | def optims_zero_grad(self, D_or_G): 95 | """ 96 | Zero gradients in all D optimizers or G optimizers. 97 | 98 | Args: 99 | D_or_G: Indicates whether the operation is for D optims or G optims. 100 | Should be either "D" or "G". 101 | """ 102 | [optim[D_or_G].zero_grad() for optim in self.optims.values() if D_or_G in optim] 103 | 104 | 105 | def optims_step(self, D_or_G): 106 | """ 107 | Make an optimization step in all D optimizers or G optimizers. 108 | 109 | Args: 110 | D_or_G: Indicates whether the operation is for D optims or G optims. 111 | Should be either "D" or "G". 112 | """ 113 | [optim[D_or_G].step() for optim in self.optims.values() if D_or_G in optim] 114 | 115 | 116 | def train_step(self): 117 | """ 118 | Makes ones training step. 119 | """ 120 | 121 | ### Train D ### 122 | for _ in range(self.D_iters): 123 | # Sample from dataset 124 | sample = self.sample_dataset() 125 | # Unpack 126 | real_after = sample["after"].to(self.device) 127 | real_before = sample["before"].to(self.device) 128 | # Train 129 | D_results = self.D_step(real_after, real_before) 130 | 131 | ### Train G ### 132 | # Sample from dataset 133 | sample = self.sample_dataset() 134 | # Unpack 135 | real_after = sample["after"].to(self.device) 136 | real_before = sample["before"].to(self.device) 137 | # Train 138 | G_results = self.G_step(real_after, real_before) 139 | 140 | # Record data 141 | self.add_data(**D_results, **G_results) 142 | losses = {"D_loss": D_results["D_loss"], "G_loss": G_results["G_loss"]} 143 | self.writer.add_scalars("Loss", losses, self.iters) 144 | 145 | 146 | def D_step(self, real_after, real_before): 147 | 148 | # Zero gradients and loss 149 | self.optims_zero_grad("D") 150 | 151 | # Sample noise 152 | noise_after = torch.randn_like(real_after) * self.after_noise_std 153 | noise_before = torch.randn_like(real_before) * self.before_noise_std 154 | 155 | # Add noise to real 156 | real_after += noise_after 157 | real_before += noise_before 158 | 159 | # Sample from generators 160 | with torch.no_grad(): 161 | fake_after = self.model.applier.G(real_before) 162 | fake_before = self.model.remover.G(real_after) 163 | 164 | # Add noise to fake 165 | fake_after += noise_after 166 | fake_before += noise_before 167 | 168 | # Classify real and fake images 169 | remover_D_on_real = self.model.remover.D(real_before) 170 | remover_D_on_fake = self.model.remover.D(fake_before) 171 | applier_D_on_real = self.model.applier.D(real_after) 172 | applier_D_on_fake = self.model.applier.D(fake_after) 173 | 174 | # Adversarial losses for after domain, before domain 175 | remover_adv_loss = self.D_loss_fn(remover_D_on_real, remover_D_on_fake) 176 | applier_adv_loss = self.D_loss_fn(applier_D_on_real, applier_D_on_fake) 177 | 178 | # Gradient penalty XXX ? 179 | applier_D_grad_penalty = torch.tensor(0.0) 180 | if self.constants["applier_D_grad_penalty"] > 0: 181 | interpolated_after = random_interpolate(real_after, fake_after) 182 | applier_D_grad_penalty = simple_gradient_penalty(self.model.applier.D, interpolated_after, center=1.0) 183 | 184 | remover_D_grad_penalty = torch.tensor(0.0) 185 | if self.constants["remover_D_grad_penalty"] > 0: 186 | interpolated_before = random_interpolate(real_before, fake_before) 187 | remover_D_grad_penalty = simple_gradient_penalty(self.model.remover.D, interpolated_before, center=1.0) 188 | 189 | # Calculate gradients and minimize loss 190 | D_loss = self.constants["applier_adversarial"] * applier_adv_loss \ 191 | + self.constants["remover_adversarial"] * remover_adv_loss \ 192 | + self.constants["applier_D_grad_penalty"] * applier_D_grad_penalty \ 193 | + self.constants["remover_D_grad_penalty"] * remover_D_grad_penalty 194 | D_loss.backward() 195 | self.optims_step("D") 196 | 197 | return { 198 | "applier_D_on_real": applier_D_on_real.mean().item(), 199 | "remover_D_on_real": remover_D_on_real.mean().item(), 200 | "applier_D_on_fake": applier_D_on_fake.mean().item(), 201 | "remover_D_on_fake": remover_D_on_fake.mean().item(), 202 | "applier_D_grad_penalty": applier_D_grad_penalty.item(), 203 | "remover_D_grad_penalty": remover_D_grad_penalty.item(), 204 | "D_loss": D_loss.item(), 205 | } 206 | 207 | 208 | def G_step(self, real_after, real_before): 209 | 210 | # Zero gradients 211 | self.optims_zero_grad("G") 212 | 213 | # Sample noise 214 | noise_after = torch.randn_like(real_after) * self.after_noise_std 215 | noise_before = torch.randn_like(real_before) * self.before_noise_std 216 | 217 | # Add noise to real 218 | real_after += noise_after 219 | real_before += noise_before 220 | 221 | # Sample from generators 222 | fake_after = self.model.applier.G(real_before) 223 | fake_before = self.model.remover.G(real_after) 224 | 225 | # Add noise to fake 226 | fake_after += noise_after 227 | fake_before += noise_before 228 | 229 | # Classify fake images 230 | applier_D_on_fake = self.model.applier.D(fake_after) 231 | remover_D_on_fake = self.model.remover.D(fake_before) 232 | 233 | # Adversarial loss for after domain, before domain 234 | remover_adv_loss = self.G_loss_fn(remover_D_on_fake) 235 | applier_adv_loss = self.G_loss_fn(applier_D_on_fake) 236 | 237 | # Identity loss for applier.D's domain (after) 238 | after_identity_loss = torch.tensor(0.0) 239 | if self.constants["after_identity_robustness"] > 0: 240 | after_identity_loss = F.l1_loss(real_after, self.model.applier.G(fake_before)) 241 | # Identity loss for remover.D's domain (before) 242 | before_identity_loss = torch.tensor(0.0) 243 | if self.constants["before_identity_robustness"] > 0: 244 | before_identity_loss = F.l1_loss(real_before, self.model.remover.G(fake_after)) 245 | 246 | # Sparsity regularization for applier 247 | applier_sparsity_loss = torch.tensor(0.0) 248 | if self.constants["applier_mask_sparsity"] > 0: 249 | applier_sparsity_loss = F.l1_loss(real_before, fake_after) 250 | # Sparsity regularization for remover 251 | remover_sparsity_loss = torch.tensor(0.0) 252 | if self.constants["remover_mask_sparsity"] > 0: 253 | remover_sparsity_loss = F.l1_loss(real_after, fake_before) 254 | 255 | # Calculate gradients and minimize loss 256 | G_loss = self.constants["applier_adversarial"] * applier_adv_loss \ 257 | + self.constants["remover_adversarial"] * remover_adv_loss \ 258 | + self.constants["before_identity_robustness"] * before_identity_loss \ 259 | + self.constants["after_identity_robustness"] * after_identity_loss \ 260 | + self.constants["applier_mask_sparsity"] * applier_sparsity_loss \ 261 | + self.constants["remover_mask_sparsity"] * remover_sparsity_loss 262 | G_loss.backward() 263 | self.optims_step("G") 264 | 265 | return { 266 | "applier_D_on_fake2": applier_D_on_fake.mean().item(), 267 | "remover_D_on_fake2": remover_D_on_fake.mean().item(), 268 | "before_identity_loss": before_identity_loss.item(), 269 | "after_identity_loss": after_identity_loss.item(), 270 | "applier_sparsity_loss": applier_sparsity_loss.item(), 271 | "remover_sparsity_loss": remover_sparsity_loss.item(), 272 | "G_loss": G_loss.item(), 273 | } 274 | 275 | 276 | #################### Reporting and Tracking Methods #################### 277 | 278 | def post_train_step(self): 279 | """ 280 | The post-training step. 281 | """ 282 | super().post_train_step() 283 | 284 | should_generate_grid = self.iters % self.generate_grid_interval == 0 285 | 286 | # Check generator's progress by recording its output on a fixed input 287 | if should_generate_grid: 288 | applier_grid = generate_G_grid(self.model.applier.G, self._fixed_before) 289 | remover_grid = generate_G_grid(self.model.remover.G, self._fixed_after) 290 | self._applier_generated_grids.append(applier_grid) 291 | self._remover_generated_grids.append(remover_grid) 292 | self.writer.add_image("Grids/applier_grid", applier_grid, self.iters) 293 | self.writer.add_image("Grids/remover_grid", remover_grid, self.iters) 294 | 295 | 296 | def stop(self, lines_to_plot={}): 297 | """ 298 | Stops the trainer and report the result of the experiment. 299 | """ 300 | 301 | losses = {**self.get_data_containing("D_loss"), **self.get_data_containing("G_loss")} 302 | 303 | # XXX 304 | lines_to_plot = { 305 | "Discriminator Evaluations": "D_on", 306 | "Gradient Penalty": "grad_penalty", 307 | "Sparsity Loss": "sparsity", 308 | "Identity Loss": "identity", 309 | } 310 | 311 | if not self.save_results: 312 | plot_lines(losses, title="Losses") 313 | for title, keyword in lines_to_plot.items(): 314 | plot_lines(self.get_data_containing(keyword), title=title) 315 | return 316 | 317 | # Create experiment directory in the model's directory 318 | experiment_dir = os.path.join(self.results_dir, self.get_experiment_name()) 319 | 320 | # Save model 321 | model_path = os.path.join(experiment_dir, "model.pt") 322 | self.save_model(model_path) 323 | 324 | # Plot losses of D and G 325 | losses_file = os.path.join(experiment_dir, "Losses.png") 326 | plot_lines(losses, filename=losses_file, title="Losses") 327 | for title, keyword in lines_to_plot.items(): 328 | line_file = os.path.join(experiment_dir, f"{title}.png") 329 | plot_lines(self.get_data_containing(keyword), filename=line_file, title=title) 330 | 331 | # Create an animation of the generator's progress 332 | remover_animation_file = os.path.join(experiment_dir, "remover_progress.mp4") 333 | applier_animation_file = os.path.join(experiment_dir, "applier_progress.mp4") 334 | create_progress_animation(self._remover_generated_grids, remover_animation_file) 335 | create_progress_animation(self._applier_generated_grids, applier_animation_file) 336 | 337 | # Write details of experiment 338 | details_txt = os.path.join(experiment_dir, "repr.txt") 339 | with open(details_txt, "w") as f: 340 | f.write(self.__repr__()) 341 | 342 | -------------------------------------------------------------------------------- /src/trainers/pairedcyclegan_trainer.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import random 4 | import torch 5 | import torch.nn.functional as F 6 | 7 | from .base_trainer import BaseTrainer 8 | from .utils.init_utils import init_optim 9 | from .utils.gan_utils import * 10 | from .utils.report_utils import * 11 | from .utils.face_morph.face_morph import face_morph 12 | 13 | 14 | class PairedCycleGANTrainer(BaseTrainer): 15 | """ 16 | The trainer for PairedCycleGAN. 17 | https://adoberesearch.ctlprojects.com/wp-content/uploads/2018/04/CVPR2018_Paper3623_Chang.pdf 18 | """ 19 | 20 | def __init__(self, model, dataset, 21 | D_optim_config={}, 22 | G_optim_config={}, 23 | D_iters=5, 24 | clamp=(-0.01, 0.01), 25 | before_noise_std=0.01, 26 | after_noise_std=0.01, 27 | generate_grid_interval=200, 28 | skip_remover_interval=10, 29 | constants={}, 30 | **kwargs): 31 | """ 32 | Constructor. 33 | 34 | Args: 35 | model: The makeup net. 36 | dataset: The makeup dataset. 37 | """ 38 | super().__init__(model, dataset, **kwargs) 39 | 40 | self.D_iters = D_iters 41 | self.clamp = clamp 42 | self.before_noise_std = before_noise_std 43 | self.after_noise_std = after_noise_std 44 | self.generate_grid_interval = generate_grid_interval 45 | self.skip_remover_interval = skip_remover_interval 46 | 47 | # Initialize optimizers for generator and discriminator 48 | self.optims = { 49 | "applier": { 50 | "D": init_optim(self.model.applier.D.parameters(), **D_optim_config), 51 | "G": init_optim(self.model.applier.G.parameters(), **G_optim_config), 52 | }, 53 | "remover": { 54 | "D": init_optim(self.model.remover.D.parameters(), **D_optim_config), 55 | "G": init_optim(self.model.remover.G.parameters(), **G_optim_config), 56 | }, 57 | "style": { 58 | "D": init_optim(self.model.style_D.parameters(), **D_optim_config), 59 | } 60 | } 61 | 62 | # TODO: Specify loss type instead (minimax or wasserstein) 63 | self.D_loss_fn = get_D_loss(self.model.gan_type) 64 | self.G_loss_fn = get_G_loss(self.model.gan_type) 65 | 66 | # Initialize all constants required for training 67 | self.constants = self._get_constants(**constants) 68 | 69 | # Generate makeup for a sample no-makeup faces and reference makeup faces 70 | num_test = 12 71 | self._generated_grids = [] 72 | 73 | random_indices = random.sample(range(len(self.dataset)), num_test) 74 | self._fixed_before = torch.stack( 75 | [self.dataset[i]["before"] for i in random_indices], dim=0).to(self.device) 76 | 77 | random_indices = random.sample(range(len(self.dataset)), num_test) 78 | self._fixed_after = torch.stack( 79 | [self.dataset[i]["after"] for i in random_indices], dim=0).to(self.device) 80 | 81 | 82 | def _get_constants(self, 83 | applier_adversarial=0.1, # lambda_G (0.1 -> 0.5) 84 | remover_adversarial=0.1, # lambda_F (0.1 -> 0.5) 85 | style_adversarial=0.1, # lambda_P (0.1 -> 0.5) 86 | applier_D_grad_penalty=0., 87 | remover_D_grad_penalty=0., 88 | style_D_grad_penalty=0., 89 | after_identity_robustness=0., 90 | before_identity_robustness=0., # lambda_I (1.0) 91 | style_identity_robustness=0., # lambda_S (1.0) 92 | applier_mask_sparsity=0.1, # lambda_R (0.1) 93 | remover_mask_sparsity=0., 94 | **kwargs): 95 | return { 96 | "applier_adversarial": applier_adversarial, 97 | "remover_adversarial": remover_adversarial, 98 | "style_adversarial": style_adversarial, 99 | "applier_D_grad_penalty": applier_D_grad_penalty, 100 | "remover_D_grad_penalty": remover_D_grad_penalty, 101 | "style_D_grad_penalty": style_D_grad_penalty, 102 | "after_identity_robustness": after_identity_robustness, 103 | "before_identity_robustness": before_identity_robustness, 104 | "style_identity_robustness": style_identity_robustness, 105 | "applier_mask_sparsity": applier_mask_sparsity, 106 | "remover_mask_sparsity": remover_mask_sparsity, 107 | } 108 | 109 | 110 | def optims_zero_grad(self, D_or_G): 111 | """ 112 | Zero gradients in all D optimizers or G optimizers. 113 | 114 | Args: 115 | D_or_G: Indicates whether the operation is for D optims or G optims. 116 | Should be either "D" or "G". 117 | """ 118 | [optim[D_or_G].zero_grad() for optim in self.optims.values() if D_or_G in optim] 119 | 120 | 121 | def optims_step(self, D_or_G): 122 | """ 123 | Make an optimization step in all D optimizers or G optimizers. 124 | 125 | Args: 126 | D_or_G: Indicates whether the operation is for D optims or G optims. 127 | Should be either "D" or "G". 128 | """ 129 | [optim[D_or_G].step() for optim in self.optims.values() if D_or_G in optim] 130 | 131 | 132 | def train_step(self): 133 | """ 134 | Makes ones training step. 135 | """ 136 | 137 | ### Train D ### 138 | for _ in range(self.D_iters): 139 | # Sample from dataset 140 | sample = self.sample_dataset() 141 | # Unpack 142 | real_after = sample["after"].to(self.device) 143 | real_before = sample["before"].to(self.device) 144 | lm_after = sample["landmarks"]["after"] 145 | lm_before = sample["landmarks"]["before"] 146 | # Train 147 | D_results = self.D_step(real_after, real_before, lm_after, lm_before) 148 | 149 | ### Train G ### 150 | # Sample from dataset 151 | sample = self.sample_dataset() 152 | # Unpack 153 | real_after = sample["after"].to(self.device) 154 | real_before = sample["before"].to(self.device) 155 | # Train 156 | G_results = self.G_step(real_after, real_before) 157 | 158 | # Record data 159 | self.add_data(**D_results, **G_results) 160 | losses = {"D_loss": D_results["D_loss"], "G_loss": G_results["G_loss"]} 161 | self.writer.add_scalars("Loss", losses, self.iters) 162 | 163 | 164 | def D_step(self, real_after, real_before, lm_after, lm_before): 165 | 166 | # Zero gradients and loss 167 | self.optims_zero_grad("D") 168 | 169 | # Sample noise 170 | noise_after = torch.randn_like(real_after) * self.after_noise_std 171 | noise_before = torch.randn_like(real_before) * self.before_noise_std 172 | 173 | # Add noise to real 174 | real_after += noise_after 175 | real_before += noise_before 176 | 177 | # Sample from generators 178 | with torch.no_grad(): 179 | fake_after = self.model.applier.G(real_before, real_after) 180 | fake_before = self.model.remover.G(real_after) 181 | 182 | # Add noise to fake 183 | fake_after += noise_after 184 | fake_before += noise_before 185 | 186 | # Sample fake styles 187 | real_style = self.sample_real_style(real_after, real_before, lm_after, lm_before) 188 | fake_style = self.sample_fake_style(real_after, fake_after) 189 | # TODO: add noise to style? 190 | 191 | # Classify real and fake images 192 | remover_D_on_real = self.model.remover.D(real_before) 193 | remover_D_on_fake = self.model.remover.D(fake_before) 194 | applier_D_on_real = self.model.applier.D(real_after) 195 | applier_D_on_fake = self.model.applier.D(fake_after) 196 | style_D_on_real = self.model.style_D(real_style) 197 | style_D_on_fake = self.model.style_D(fake_style) 198 | 199 | # Adversarial losses for after domain, before domain 200 | remover_adv_loss = self.D_loss_fn(remover_D_on_real, remover_D_on_fake) 201 | applier_adv_loss = self.D_loss_fn(applier_D_on_real, applier_D_on_fake) 202 | style_adv_loss = self.D_loss_fn(style_D_on_real, style_D_on_fake) 203 | 204 | # Gradient penalty XXX ? 205 | applier_D_grad_penalty = torch.tensor(0.0) 206 | if self.constants["applier_D_grad_penalty"] > 0: 207 | interpolated_after = random_interpolate(real_after, fake_after) 208 | applier_D_grad_penalty = simple_gradient_penalty(self.model.applier.D, interpolated_after, center=1.0) 209 | #applier_D_grad_penalty = simple_gradient_penalty(self.model.applier.D, real_after) 210 | 211 | remover_D_grad_penalty = torch.tensor(0.0) 212 | if self.constants["remover_D_grad_penalty"] > 0: 213 | interpolated_before = random_interpolate(real_before, fake_before) 214 | remover_D_grad_penalty = simple_gradient_penalty(self.model.remover.D, interpolated_before, center=1.0) 215 | #remover_D_grad_penalty = simple_gradient_penalty(self.model.remover.D, real_before) 216 | 217 | style_D_grad_penalty = torch.tensor(0.0) 218 | if self.constants["style_D_grad_penalty"] > 0: 219 | interpolated_style = random_interpolate(real_style, fake_style) 220 | style_D_grad_penalty = simple_gradient_penalty(self.model.style_D, interpolated_style, center=1.0) 221 | #style_D_grad_penalty = simple_gradient_penalty(self.model.style_D, real_style) 222 | pass 223 | 224 | # Calculate gradients and minimize loss 225 | D_loss = self.constants["applier_adversarial"] * applier_adv_loss \ 226 | + self.constants["remover_adversarial"] * remover_adv_loss \ 227 | + self.constants["style_adversarial"] * remover_adv_loss \ 228 | + self.constants["applier_D_grad_penalty"] * applier_D_grad_penalty \ 229 | + self.constants["remover_D_grad_penalty"] * remover_D_grad_penalty \ 230 | + self.constants["style_D_grad_penalty"] * style_D_grad_penalty 231 | D_loss.backward() 232 | 233 | # Make a step of minimizing D's loss 234 | if self.iters % self.skip_remover_interval == 0: 235 | self.optims_step("D") 236 | else: 237 | self.optims["applier"]["D"].step() 238 | 239 | return { 240 | "applier_D_on_real": applier_D_on_real.mean().item(), 241 | "applier_D_on_fake": applier_D_on_fake.mean().item(), 242 | "remover_D_on_real": remover_D_on_real.mean().item(), 243 | "remover_D_on_fake": remover_D_on_fake.mean().item(), 244 | "style_D_on_real": style_D_on_real.mean().item(), 245 | "style_D_on_fake": style_D_on_fake.mean().item(), 246 | "applier_D_grad_penalty": applier_D_grad_penalty.item(), 247 | "remover_D_grad_penalty": remover_D_grad_penalty.item(), 248 | "style_D_grad_penalty": style_D_grad_penalty.item(), 249 | "D_loss": D_loss.item(), 250 | } 251 | 252 | 253 | def G_step(self, real_after, real_before): 254 | 255 | # Zero gradients 256 | self.optims_zero_grad("G") 257 | 258 | # Sample noise 259 | noise_after = torch.randn_like(real_after) * self.after_noise_std 260 | noise_before = torch.randn_like(real_before) * self.before_noise_std 261 | 262 | # Add noise to real 263 | real_after += noise_after 264 | real_before += noise_before 265 | 266 | # Sample from generators 267 | fake_after = self.model.applier.G(real_before, real_after) 268 | fake_before = self.model.remover.G(real_after) 269 | 270 | # Add noise to fake 271 | fake_after += noise_after 272 | fake_before += noise_before 273 | 274 | # Sample fake styles 275 | fake_style = self.sample_fake_style(real_after, fake_after) 276 | 277 | # Classify fake images 278 | remover_D_on_fake = self.model.remover.D(fake_before) 279 | applier_D_on_fake = self.model.applier.D(fake_after) 280 | style_D_on_fake = self.model.style_D(fake_style) 281 | 282 | # Adversarial losses for after domain, before domain 283 | remover_adv_loss = self.G_loss_fn(remover_D_on_fake) 284 | applier_adv_loss = self.G_loss_fn(applier_D_on_fake) 285 | style_adv_loss = self.G_loss_fn(style_D_on_fake) 286 | 287 | # Identity loss for applier.D's domain (after) 288 | after_identity_loss = torch.tensor(0.0) 289 | if self.constants["after_identity_robustness"] > 0: 290 | after_identity_loss = F.l1_loss(real_after, self.model.applier.G(fake_before, real_after)) 291 | # Identity loss for remover.D's domain (before) 292 | before_identity_loss = torch.tensor(0.0) 293 | if self.constants["before_identity_robustness"] > 0: 294 | before_identity_loss = F.l1_loss(real_before, self.model.remover.G(fake_after)) 295 | # Style loss (i.e. style is preserved in fake_after and removed in fake_before) 296 | style_identity_loss = torch.tensor(0.0) 297 | if self.constants["style_identity_robustness"] > 0: 298 | style_identity_loss = F.l1_loss(real_after, self.model.applier.G(fake_before, fake_after)) 299 | 300 | # Sparsity regularization for applier 301 | applier_sparsity_loss = torch.tensor(0.0) 302 | if self.constants["applier_mask_sparsity"] > 0: 303 | applier_sparsity_loss = F.l1_loss(real_before, fake_after) 304 | # Sparsity regularization for remover 305 | remover_sparsity_loss = torch.tensor(0.0) 306 | if self.constants["remover_mask_sparsity"] > 0: 307 | remover_sparsity_loss = F.l1_loss(real_after, fake_before) 308 | 309 | # Calculate gradients and minimize loss 310 | G_loss = self.constants["applier_adversarial"] * applier_adv_loss \ 311 | + self.constants["remover_adversarial"] * remover_adv_loss \ 312 | + self.constants["style_adversarial"] * style_adv_loss \ 313 | + self.constants["before_identity_robustness"] * before_identity_loss \ 314 | + self.constants["after_identity_robustness"] * after_identity_loss \ 315 | + self.constants["style_identity_robustness"] * style_identity_loss \ 316 | + self.constants["applier_mask_sparsity"] * applier_sparsity_loss \ 317 | + self.constants["remover_mask_sparsity"] * remover_sparsity_loss 318 | G_loss.backward() 319 | 320 | # Make a step of minimizing G's loss 321 | if self.iters % self.skip_remover_interval == 0: 322 | self.optims_step("G") 323 | else: 324 | self.optims["applier"]["G"].step() 325 | 326 | return { 327 | "applier_D_on_fake2": applier_D_on_fake.mean().item(), 328 | "remover_D_on_fake2": remover_D_on_fake.mean().item(), 329 | "before_identity_loss": before_identity_loss.item(), 330 | "after_identity_loss": after_identity_loss.item(), 331 | "style_identity_loss": style_identity_loss.item(), 332 | "applier_sparsity_loss": applier_sparsity_loss.item(), 333 | "remover_sparsity_loss": remover_sparsity_loss.item(), 334 | "G_loss": G_loss.item(), 335 | } 336 | 337 | 338 | def sample_real_style(self, real_after, real_before, lm_after, lm_before): 339 | # Morph makeup face to nomakeup face's facial structure for style loss calculation 340 | mask, after2before = self.morph_makeup(real_after, real_before, lm_after, lm_before) 341 | # Prepare real same style pair vs. fake same style pair 342 | return torch.cat([mask * real_after , mask * after2before], dim=1) 343 | 344 | def sample_fake_style(self, real_after, fake_after): 345 | return torch.cat([real_after , fake_after], dim=1) 346 | 347 | def morph_makeup(self, real_after, real_before, lm_after, lm_before): 348 | 349 | tensor2D_to_points = lambda t: [(p[0].item(), p[1].item()) for p in t] 350 | torch_to_numpy = lambda t: t.permute(1, 2, 0).cpu().numpy() 351 | numpy_to_torch = lambda t: torch.from_numpy(t).to(self.device).permute(2, 0, 1) 352 | 353 | batch_size = real_after.size()[0] 354 | mask = torch.ones([batch_size, 1, 1, 1]).to(real_after) 355 | morphed_batch = [] 356 | 357 | for i in range(batch_size): 358 | # Zero mask for no landmarks 359 | if lm_after[i].sum() == 0 or lm_before[i].sum() == 0: 360 | morphed_batch.append(torch.zeros_like(real_before[i])) 361 | mask[i] = 0 362 | else: 363 | morphed = face_morph(torch_to_numpy(real_after[i]), 364 | torch_to_numpy(real_before[i]), 365 | tensor2D_to_points(lm_after[i]), 366 | tensor2D_to_points(lm_before[i])) 367 | morphed_batch.append(numpy_to_torch(morphed)) 368 | 369 | return mask, torch.stack(morphed_batch).to(real_after) 370 | 371 | 372 | #################### Reporting and Tracking Methods #################### 373 | 374 | def post_train_step(self): 375 | """ 376 | The post-training step. 377 | """ 378 | super().post_train_step() 379 | 380 | should_generate_grid = self.iters % self.generate_grid_interval == 0 381 | 382 | # Check generator's progress by recording its output on a fixed input 383 | if should_generate_grid: 384 | grid = generate_makeup_grid(self.model.applier.G, self.model.remover.G, 385 | self._fixed_before, self._fixed_after) 386 | self._generated_grids.append(grid) 387 | self.writer.add_image("grid", grid, self.iters) 388 | 389 | 390 | def stop(self, lines_to_plot={}): 391 | """ 392 | Stops the trainer and report the result of the experiment. 393 | """ 394 | 395 | losses = {**self.get_data_containing("D_loss"), **self.get_data_containing("G_loss")} 396 | 397 | # XXX 398 | lines_to_plot = { 399 | "Discriminator Evaluations": "D_on", 400 | "Gradient Penalty": "grad_penalty", 401 | "Sparsity Loss": "sparsity", 402 | "Identity Loss": "identity", 403 | } 404 | 405 | if not self.save_results: 406 | plot_lines(losses, title="Losses") 407 | for title, keyword in lines_to_plot.items(): 408 | plot_lines(self.get_data_containing(keyword), title=title) 409 | return 410 | 411 | # Create experiment directory in the model's directory 412 | experiment_dir = os.path.join(self.results_dir, self.get_experiment_name()) 413 | 414 | # Save model 415 | model_path = os.path.join(experiment_dir, "model.pt") 416 | self.save_model(model_path) 417 | 418 | # Plot losses of D and G 419 | losses_file = os.path.join(experiment_dir, "Losses.png") 420 | plot_lines(losses, filename=losses_file, title="Losses") 421 | for title, keyword in lines_to_plot.items(): 422 | line_file = os.path.join(experiment_dir, f"{title}.png") 423 | plot_lines(self.get_data_containing(keyword), filename=line_file, title=title) 424 | 425 | # Create an animation of the generator's progress 426 | animation_file = os.path.join(experiment_dir, "pcgan_progress.mp4") 427 | create_progress_animation(self._generated_grids, animation_file) 428 | 429 | # Write details of experiment 430 | details_txt = os.path.join(experiment_dir, "repr.txt") 431 | with open(details_txt, "w") as f: 432 | f.write(self.__repr__()) 433 | 434 | -------------------------------------------------------------------------------- /src/dataset/search/instagram/test_urls.csv: -------------------------------------------------------------------------------- 1 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109023542_1048266978904094_8799263065077844703_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=9_UuKv5xXGEAX_Z4n1E&se=7&oh=a96fc3dddd92fc26086cd488e98185dd&oe=5F41A20A&ig_cache_key=MjM1ODE5MzI4MzYyMjgwMDA2OA%3D%3D.2 2 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/115823529_898542503884729_8882321682738076881_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=IIPefsUcIoMAX-TEf1u&se=7&oh=14026679506bb9244442cf4b4ce3bcaa&oe=5F3ED8BE&ig_cache_key=MjM1ODE5MzIzODYxODk4ODE2OA%3D%3D.2 3 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/115712474_631930961032176_2109389559577304544_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=me0oAhj_oikAX8ifyL8&se=7&oh=73fde53adf81d2adcc599ed97f3ac1b4&oe=5F408D17&ig_cache_key=MjM1ODE5MzIzODYwMjM5MDU4NA%3D%3D.2 4 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110022036_2652290438316703_6138000363887847999_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=107&_nc_ohc=PZbbi3TJbuQAX_myhff&se=7&oh=b3899d473654a88c45c30c4fc430fc76&oe=5F424AF1&ig_cache_key=MjM1ODE5MzIyMzE5MjgzNDE1Nw%3D%3D.2 5 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109565382_162464432067396_6692868526708305267_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=keCSzlP9y4QAX_edtYQ&se=7&oh=92b76b4a52148a3120cf00312df7f8ee&oe=5F3FDCD1&ig_cache_key=MjM1ODE5MzIyMzE1OTQwMzQ3OQ%3D%3D.2 6 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/112709192_2779490552155240_441635795208826613_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=0HPh8a37bGYAX96iPRD&se=7&oh=0f79ca8c8f3bb863dbc35fda58f38462&oe=5F40A02D&ig_cache_key=MjM1ODE5MzIyMzI1MTc3NjI5Ng%3D%3D.2 7 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110015498_2683186101929389_1043307852434097001_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=roFH-y00ii4AX-AQmNm&se=7&oh=1a94ac03f0b93925b5d6cff72bc058bd&oe=5F4204F2&ig_cache_key=MjM1ODE5MzIyMzE1MTA5MDc0MA%3D%3D.2 8 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/113357469_167269141520871_1512078067076254555_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=T_94Ewk4hd4AX8tIFOK&se=7&oh=2fe10418e1a06bf93867c7eeb164fe6a&oe=5F3F70E1&ig_cache_key=MjM1ODE5MzIyMzE3NjE5ODAxOA%3D%3D.2 9 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110436046_584877755550592_8571189079656954904_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=FuYnrgd8DEcAX9IRhqm&se=7&oh=8077904648a0fb4906ab73e9571a0695&oe=5F41E1DA&ig_cache_key=MjM1ODE5MzIyMzIwMTIzNzM3NQ%3D%3D.2 10 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109532628_1138978969802894_1255420135490048217_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=3J7M_-0_RiMAX8YEWGn&oh=c2f977526e216b28f08d99b82278e1f1&oe=5F3F1038&ig_cache_key=MjM1ODE5MzIyMzE2Nzg5MTIzNw%3D%3D.2 11 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110226013_982261208881350_3375817104494482570_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=yH_aIRHxoY8AX-P0ZX1&se=7&oh=7ca03aad9af2b635e909e0ceaa3a5c82&oe=5F41D72C&ig_cache_key=MjM1ODE5MzE5MzE2MTIzMjQwMw%3D%3D.2 12 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110025726_2983454491782371_6385544722100803011_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=5iAHRrWHH4IAX9im2rG&se=7&oh=4126932e38b53faf8b6ff31c7f28e4c2&oe=5F3E9C87&ig_cache_key=MjM1ODE5MzE5MzE4NjM5MTQ1MQ%3D%3D.2 13 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110290861_295056038215530_2616386739378519706_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=SajPxJpK3-0AX-AqpLM&oh=c8465c3938ad6c106232de1f5a2ea66d&oe=5F421F92&ig_cache_key=MjM1ODE5Mjc2NTcxNzg5MzA4Mw%3D%3D.2 14 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109721248_1691536871010854_3636867597305554682_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=-H8QrHDvJe4AX-ByF-H&se=7&oh=3a02399a92870cf46f213e82fc690728&oe=5F3EA1E1&ig_cache_key=MjM1ODE5MzE4ODkyMzA5MTQ2NA%3D%3D.2 15 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109465383_1279373899078461_7640397167589846487_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=109&_nc_ohc=GcdOIOvSZX8AX8HvveZ&se=7&oh=b8f340aa93439f0c8210d67f5a83b6fc&oe=5F40EC05&ig_cache_key=MjM1ODE5MzE2Mjg2MDkzNzgxOQ%3D%3D.2 16 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109293120_313534540017758_3325020615777581326_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=q0BVf7FXmpgAX9XYxzZ&se=7&oh=1ee5093723e29d55246aa1c49ae9693f&oe=5F41B46F&ig_cache_key=MjM1ODE5MzE2Mjg3NzczNTA2Nw%3D%3D.2 17 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110384296_3435101969846955_2842453792541667885_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=_ZMdWP5Wjp0AX82o2fc&se=7&oh=e2e8ebe786a3c2d5b1d8210ba5dff981&oe=5F40113B&ig_cache_key=MjM1ODE5MzE0MzU2ODE4ODA3Nw%3D%3D.2 18 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109733080_1566961816815791_4368218694864342957_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=nOxXzJkytBMAX8jlVLu&se=7&oh=e3ac586d9de177143aa6be0538bf884e&oe=5F4217A0&ig_cache_key=MjM1ODE5MzExMTI1MzgzNTY4MA%3D%3D.2 19 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/115567013_1003221293467113_4566918249642406076_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=109&_nc_ohc=6Y-BeC8FBXMAX-m8iEO&se=7&oh=237005f82d0b4a675a1ec5c324a2eb5e&oe=5F3F063C&ig_cache_key=MjM1ODE5MzA5NzI5NTE1OTIyNw%3D%3D.2 20 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109800039_167522618204429_7205822265837791979_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=_2MC6x1BV3sAX8bSDSm&se=7&oh=d09f7119ba70935e8947347aced87aba&oe=5F426729&ig_cache_key=MjM1ODE5MzA5NzI4NjY3MTQ0OQ%3D%3D.2 21 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109568394_321072085964982_6916669572910616233_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=uyRcfauCQ0MAX9Btt_v&se=7&oh=d6f93d638e894f3d068892d863cdf733&oe=5F4191A1&ig_cache_key=MjM1ODE5MzA3Njk0NDkyMTU0Nw%3D%3D.2 22 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109504882_2610820892465022_1822107516956855614_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=F-4mlrZN55gAX8f_yuL&se=7&oh=4e3946390890fab2681863da9d4a80b8&oe=5F3FE57A&ig_cache_key=MjM1ODE5MzA3Njk2MTYzNDA3MA%3D%3D.2 23 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/108009205_771017830374340_2849989725456680769_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=baB7JHkNI50AX_zUUB2&oh=4ed522c92d7b0f91cf7761f8f51bf76f&oe=5F3F503C&ig_cache_key=MjM1ODE5MjgxMTI3MDY3MDgzNw%3D%3D.2 24 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/115991972_2615536172001118_5869178628796887387_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=hhckywAH56kAX8EnjWT&se=7&oh=9904f53d181d3d404e83ff3eaa871db3&oe=5F405EB2&ig_cache_key=MjM1ODE5MzAzNTM5NTE3MjU2OQ%3D%3D.2 25 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110183502_733117717510363_3542182485949144384_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=107&_nc_ohc=JlYTti0wxrEAX949Az0&oh=4cde14acfe0c9038a81a64f31967d3d7&oe=5F423C57&ig_cache_key=MjM1ODE4NzI0NzYzMjkyMzAyOQ%3D%3D.2 26 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109769229_142744150792489_3687334175115679476_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=WTQ5jVtsjAUAX8B7AeD&se=7&oh=249bde827ea4dccbbb6948ca59071374&oe=5F423543&ig_cache_key=MjM1ODE5Mjk3Nzc3NDc4MzAzMA%3D%3D.2 27 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/115837185_117899153098694_1401210484251906320_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=EX2tO0uQHnAAX9iyZG1&se=7&oh=1c43d8178329c56abe0869615c3f326d&oe=5F3F15CE&ig_cache_key=MjM1ODE5Mjk3MzIyMDgwODU2OQ%3D%3D.2 28 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109820094_3071216956264747_8708644770846521_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=107&_nc_ohc=TUBWnjnCBDkAX_6aGUI&se=7&oh=a73e21693d65b478612cb1b4cc2311c2&oe=5F423F13&ig_cache_key=MjM1ODE5Mjk3MjU4MzM2NTcyMQ%3D%3D.2 29 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110236539_3137676252993843_1596596941307255222_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=HBRtQ8yrZKgAX_FfuEn&se=7&oh=908a4136c2f4b024cab67d3e03a48c26&oe=5F401767&ig_cache_key=MjM1ODE5Mjk2NDEwMDcxNDY1MA%3D%3D.2 30 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/111306590_1609183955914204_1834100253626382566_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=yEkSlu4QtucAX8YW8Fx&oh=b3543144d1c1f84304831feb980bcadf&oe=5F4044CC&ig_cache_key=MjM1ODE5Mjk1NzUyNTc1Nzk5Ng%3D%3D.2 31 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/113526368_161211678838455_1650663637394024723_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=yWsN1UhpfJoAX-QZC-k&se=7&oh=cf2230741e49769276bf7095d7c9cd1b&oe=5F411DCD&ig_cache_key=MjM1ODE5Mjk0NDE2MjE5NzAwMg%3D%3D.2 32 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/114017462_278332883590181_1532916439839417436_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=wM-rLgBjEqEAX8yuC8_&oh=c28000d76244f9c1fac6c674e43739a2&oe=5F409DE9&ig_cache_key=MjM1ODE5MjU5OTQzMjc5ODM4OQ%3D%3D.2 33 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/111700545_121444896305545_3881878700383035139_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=vrg3UtzWIX8AX9muDw6&oh=ea4e3d331ce4ecae7be02db30bf6fc77&oe=5F423B8C&ig_cache_key=MjM1ODE5Mjg1MTE5OTYzMzcwMw%3D%3D.2 34 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110293958_975880422861978_6973418548360021780_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=je1icjsgo2cAX8-jANN&se=7&oh=6b43cd0ce1c846e7cee20c21421971af&oe=5F3EDE32&ig_cache_key=MjM1ODE5Mjg1MDgwNDM2OTk4Mw%3D%3D.2 35 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/110516840_1931250840344184_3565288097598089463_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=uz4rV6UC_L0AX8kxW_C&se=7&oh=e664ca1a063263eaf81360614fae6a8c&oe=5F412DB6&ig_cache_key=MjM1ODE5Mjg1MDgxMjU5OTg3OQ%3D%3D.2 36 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/110022043_145902723783254_6390199469658474655_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=Vw8L5d__4IkAX9zCqIs&se=7&oh=413e213349f81bc4f7e170c06ba232e3&oe=5F3F60BA&ig_cache_key=MjM1ODE5Mjg1MDgyOTUxODc2OA%3D%3D.2 37 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110319604_1423988697795277_6004701666725992250_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=coEya2PTd7AAX_7Kwjs&se=7&oh=127dfb853f723d7c6f721d88c8a857f8&oe=5F3F1090&ig_cache_key=MjM1ODE5Mjg1MDg0NjI0MTQzNg%3D%3D.2 38 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109345300_284205686346877_4602614770197201226_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=9sgWid09XZgAX9M4Szz&se=7&oh=eda08b0239bbadc21ac6a43585b2ff39&oe=5F402043&ig_cache_key=MjM1ODE5Mjg1MDg1NDY3NDk0MA%3D%3D.2 39 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110394228_300747741276085_2966516460827873189_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=gfSvQtsTlkwAX8SCiqW&se=7&oh=7e566ababbb70bcbcdc510e52dbd352c&oe=5F3FAF08&ig_cache_key=MjM1ODE5Mjg1MDg4ODAzOTYxNg%3D%3D.2 40 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109726217_4109004535838702_7754373245983673884_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=r09USrY2YJIAX884Lwt&se=7&oh=4c7956fce5c0c90ef78b445d8ada586f&oe=5F3ED759&ig_cache_key=MjM1ODE5Mjg0MDI1MTUwNTc5NA%3D%3D.2 41 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/115698082_2667112460217831_5936970646537114650_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=FCCRn0rnDcwAX_dl0WS&se=7&oh=9934eece8a0de4dd2533122733cbd7c0&oe=5F3F5635&ig_cache_key=MjM1ODE5Mjg0MDIyNjE4ODU4NQ%3D%3D.2 42 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110030742_610736449571470_3461216467751559586_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=TlJu3rPAmEgAX-iUxFo&se=7&oh=7363a50a6c4dd78d17c24e15480f5166&oe=5F4073A0&ig_cache_key=MjM1ODE5Mjg0MDI0MzA0NTg4OA%3D%3D.2 43 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/113500310_621102721848546_7443896584389530778_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=hfQHW-2pVV8AX-C3tMG&oh=ff202ceaced1234eda37730c715d9236&oe=5F3F4E78&ig_cache_key=MjM1ODE5MjgzMzc5MzE3ODU2Nw%3D%3D.2 44 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/115885751_287333069041931_3217534486921446719_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=ORg-B4kr3jMAX_4YrIr&oh=5eeff8b5936fdf930bc1570f487cdaca&oe=5F417670&ig_cache_key=MjM1ODE5MjgzOTI2OTMyMDQ1Nw%3D%3D.2 45 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110719363_134653421621012_2141058574566790079_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=c14mX8cAkCYAX_mnPzN&se=7&oh=bec950a81287732351756878c6d02faa&oe=5F404708&ig_cache_key=MjM1ODE5Mjc5MzUxNzkwNTgyMg%3D%3D.2 46 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109759805_3196607333693637_1335882719450259487_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=109&_nc_ohc=dp6sgwjiWv0AX9tCesb&se=8&oh=c1e140cb37fd010a003cf6c06b53256f&oe=5F420351&ig_cache_key=MjM1ODE5Mjc5MjE2OTQ5MTE3Mw%3D%3D.2 47 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/111337147_2631430720293159_7048732582061557353_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=SCH5WRjCSUgAX-yWncY&oh=a685faf650d9efaa5995d17d5f1cbbba&oe=5F41463F&ig_cache_key=MjM1ODE5Mjc4Mzk5MDA1Mjk5Mg%3D%3D.2 48 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110206442_103689774723922_7430273381326749738_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=lKwUDK9OpcQAX80uJxC&oh=d75935dea30ce2f6f2a03ad48cacdf77&oe=5F3FCB31&ig_cache_key=MjM1ODE5Mjc4NDgwMjY5ODI2Mg%3D%3D.2 49 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110239105_165115995074481_6572753583538356139_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=109&_nc_ohc=lg23xnQB2qoAX9_MhCD&se=7&oh=54460b0b37b865c904f4b9e184f7e948&oe=5F409F0B&ig_cache_key=MjM1ODE5MjcwOTEwNDg2NDMwNQ%3D%3D.2 50 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109908759_183209463186328_4381605365016125129_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=9jCCc4B66ewAX_W0axw&se=7&oh=e76f3f62b10de38aeb347442893c15db&oe=5F40EA41&ig_cache_key=MjM1ODE5MjcwNDU5MDUyMTQ3MA%3D%3D.2 51 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110205970_2747686211998789_2906352538298112149_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=8LDHJw56BmQAX-BWo8A&oh=cd72c5304c22fbb7ec550962406275a2&oe=5F41FC07&ig_cache_key=MjM1ODE5MjUwMzEyMzI2NDEwNQ%3D%3D.2 52 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110038172_2493992284225204_4886788721042971756_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=4_3hEOudVHMAX-U-dnP&se=8&oh=07419cc5227e231889d52c0a9470fe8e&oe=5F3F1BF6&ig_cache_key=MjM1ODE5MjY4NjQyMjcwNDkwNw%3D%3D.2 53 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/106722160_936243263547454_1357851901133074186_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=9LHRny9aBQoAX_zQl3g&se=7&oh=8a7d588c3086647c63d0cca456ee97b9&oe=5F3F889B&ig_cache_key=MjM1ODE5MjY4MzE3NjI3MjQyOA%3D%3D.2 54 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110014517_176440890515375_8053197587414641037_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=BHOHPrVww7IAX_p2XVW&se=7&oh=13e378b75a1266a8e424aac61fb8367f&oe=5F415DF8&ig_cache_key=MjM1ODE5MjY4MzIwOTc0NzczOA%3D%3D.2 55 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109790869_2675531642719501_174038232347959437_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=3Vfg0c_mabMAX8iiS69&se=7&oh=9da9857a905f41d6ffba17ccb39f6258&oe=5F3EC61F&ig_cache_key=MjM1ODE5MjY4MzE4NDU4NDA0Mg%3D%3D.2 56 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/115740457_3086191868163925_9028878156873965302_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=W8v6ioaUxTsAX_5xRjX&se=7&oh=484834b6d50115f63dc133700601a547&oe=5F404806&ig_cache_key=MjM1ODE5MjY4MzE5MzA3NTk1NA%3D%3D.2 57 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109538552_2037019716442948_3438439919662551768_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=QbvgPXbOfRAAX9SdnzI&se=7&oh=b9f9cdf07528d60a770c3d800f88ea05&oe=5F421876&ig_cache_key=MjM1ODE5MjY3ODQ1MzUwMTU5Nw%3D%3D.2 58 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/111119625_288481812385349_6179780122840046848_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=wrzONQFl53YAX8kKI_z&se=7&oh=4deb953555323c6ed67856a8781f6cbc&oe=5F41C7B2&ig_cache_key=MjM1ODE5MjY3ODQyMDA1MTQ2Mw%3D%3D.2 59 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/111641659_728439281064167_761753113317162692_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=L2HIus_Tv0IAX-YbuDn&se=7&oh=1b411d6015c5ac13bfac432a559d2a11&oe=5F41C18E&ig_cache_key=MjM1ODE5MjY3ODQ0NTE3MDk0NQ%3D%3D.2 60 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110201933_172198110976191_5374286189853480412_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=yDm0qihKhbAAX_aNM3Y&se=7&oh=34fffac091f7d185fb3331f9656f72a6&oe=5F425627&ig_cache_key=MjM1ODE5MjY3ODQzNjY5MDcxNQ%3D%3D.2 61 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110034803_3246799812065559_9114674962075842774_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=4Z5NqWEq_dAAX-c0Mfy&oh=03a7adb43607cfaa377b090931cb3885&oe=5F412E83&ig_cache_key=MjM1ODE5MjY4MjEyNTU2NTczMA%3D%3D.2 62 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110033968_977381682706162_9125505874201518535_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=HJShP1-jfNsAX9YOBXE&se=7&oh=6d156219148c860434c0181422d43d3c&oe=5F40644D&ig_cache_key=MjM1ODE5MjYzNzY3NjM5NTMzMg%3D%3D.2 63 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109699545_162566905399312_6378811754371421063_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=A-7MFy8sFSoAX-9p8ZM&se=7&oh=d1631ba92af515f5e1dd0f6e5ba7ef05&oe=5F418173&ig_cache_key=MjM1ODE5MjU0MDUyNzQ2MzM1Ng%3D%3D.2 64 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110112075_710852236423717_1081748666098569035_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=WH3a8Xr2lKEAX9i6ffw&se=7&oh=ccec2030b31ec2b0adb19c48c7b2cd86&oe=5F3F7944&ig_cache_key=MjM1ODE5MjQ4NDg4NDAxMzk5MA%3D%3D.2 65 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109788641_285653259437952_517998588579603811_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=jcaWDdAf7q4AX9c2tt7&oh=777c63af8d6dee2fd16bc73e377e5531&oe=5F3E8F20&ig_cache_key=MjM1ODE5MjQ3NTAzNTk3NDgxMg%3D%3D.2 66 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/114150274_126023619178957_1088291940284819446_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=101&_nc_ohc=yT8hGXpVd2wAX8sIEVx&oh=435bec1cbe6486f2281f574860f5ed6e&oe=5F3F2B52&ig_cache_key=MjM1ODE5MjQ3NTA1MjkxNjAxMA%3D%3D.2 67 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109834220_182036973280656_3318894535529072090_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=Pf39uWtZdBUAX8vkvI7&se=7&oh=4c36ec0981c4e3c81c37005a4831f2dc&oe=5F42431C&ig_cache_key=MjM1ODE5MjQ3MDM2NTU5Nzk5MA%3D%3D.2 68 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/113448609_2761083874168596_7655122216201561893_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=9pBoCq4YcFMAX9waebP&oh=2c8ed7a949c1d5cf703cc9ac667514c6&oe=5F3EB237&ig_cache_key=MjM1ODE5MjQ1MzczNzMzNjc2Mg%3D%3D.2 69 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109448629_155173102841192_323495014509150525_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=iz7fR6XBATIAX9X3bE6&se=7&oh=01a678acf43b94d54d9ec3259686059f&oe=5F3E992F&ig_cache_key=MjM1ODE5MjQ0NjM4ODcwMTcyOQ%3D%3D.2 70 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109905909_882330152175536_1505474007901032518_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=MbovO2Y0tIAAX_g9Wyc&se=7&oh=f356215ddcd7bf0f47868f2a5dacd853&oe=5F426844&ig_cache_key=MjM1ODE5MjQ0NjQ2NDE3NTUwOQ%3D%3D.2 71 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110199418_150348383380990_6843513086231888571_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=K3ku6pDIAGcAX8mBO99&se=7&oh=fe2dc76c34681d7b7de6b80d8e601ac8&oe=5F3EC788&ig_cache_key=MjM1ODE5MjQ0NjQwNTQ5MTg3NQ%3D%3D.2 72 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/111064729_1151900861852993_6285721207785064742_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=W2A7WuhYAhcAX_1ZYkj&se=7&oh=408fd9b505d0af3dd08397831dab292c&oe=5F409F3A&ig_cache_key=MjM1ODE5MjQyMzQ2NTA5ODE5MQ%3D%3D.2 73 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/109444565_143540347398945_5103294438911895788_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=109&_nc_ohc=P5RDTjlXsFsAX8CCBO8&oh=370af9753a3e32fd583b0b0dcf2d80c0&oe=5F40D06D&ig_cache_key=MjM1ODE5MjM4OTIwNTMxMjAzNQ%3D%3D.2 74 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/110001032_283717109710935_5201985497548666847_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=iyuFo0MIccIAX_OMWjR&oh=31a9c1033f0cb7c53fc8200e427b8617&oe=5F413F65&ig_cache_key=MjM1ODE5MjM1NjQ4ODcxMTEwOA%3D%3D.2 75 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109926795_298185267955923_3292401273502469241_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=sfpujCsDIpYAX82jLOd&se=7&oh=42839e7f2e50cac1e3834e8a3a6725d7&oe=5F3E7EBD&ig_cache_key=MjM1ODE5MjM1MTE2MjU3OTY4OA%3D%3D.2 76 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109450355_592106551677250_7932071833615317959_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=17QxsyguOD8AX8mt3hc&se=7&oh=fbf1507ffdea1abeff51680c2731bf14&oe=5F409858&ig_cache_key=MjM1ODE5MjM1MTE0NTc1ODgyNQ%3D%3D.2 77 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109214538_666291570627558_7563599765006750588_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=X22YYJnZE8UAX_lYraJ&oh=a36dda4818ce78da94f5be24c4322f21&oe=5F3F7D50&ig_cache_key=MjM1ODE5MDYyNzY3MjYzNTY0OA%3D%3D.2 78 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/110394224_317944712675888_8108304761130447660_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=cefZ-o2gGOgAX8IV6Yx&oh=27d1d22681498dda7039b9e7a3e399a4&oe=5F417268&ig_cache_key=MjM1ODE5MjMxODIyOTQ2MzM1OA%3D%3D.2 79 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109811554_316664786200007_7600855018306679327_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=lPB78oy4WdAAX_TJPMs&oh=420468eefaa579146e408b0ddc80946c&oe=5F411190&ig_cache_key=MjM1ODE5MjI4OTY5ODI2NTA3OA%3D%3D.2 80 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109820097_3157889917660371_1636294710954425798_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=IIQGhjQu4dkAX-0mxXm&se=7&oh=f1aa9d91e591f56976273ab6821a84f4&oe=5F4115CF&ig_cache_key=MjM1ODE5MjE4NjA3NTc4ODQxMg%3D%3D.2 81 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109587979_276861756925532_3013726442028125122_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=OQZ6OPoeqhIAX8x1HUX&se=7&oh=9fb5f6ab79079f5b04fba52c24e8600e&oe=5F3EFEF4&ig_cache_key=MjM1ODE5MjE3NzA5MDQ3MTg5MA%3D%3D.2 82 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109120947_161934842094286_6747831048267401594_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=bmmwMNCTcGYAX-ish83&oh=2627f3b6db74275455a11e1f11a42517&oe=5F3F9741&ig_cache_key=MjM1ODE5MjE0MzQ1OTQ3NTQ2OQ%3D%3D.2 83 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110303233_1778865248921608_2069203707590440428_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=8wfaqSA1t9oAX_WwejJ&oh=dc203d94a9dfa801a102e62aa85ac493&oe=5F414F9F&ig_cache_key=MjM1ODE5MjA1NzM0MzAzNTMwNg%3D%3D.2 84 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110697315_146715520371718_8420708585688494346_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=Wl4I6bNDdUUAX8LAB-Z&oh=1a6e1f03bce91f760d5ca95df6c86b5d&oe=5F3F3CAE&ig_cache_key=MjM1ODE5MjA1NzM2MDAyODMyNQ%3D%3D.2 85 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/111064731_178755590352641_3665165690963706264_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=oN1uwDuJ70kAX9Jk6pA&oh=4263110d8254a3667177d1b983d6301a&oe=5F426B58&ig_cache_key=MjM1ODE5MjAzMzc5NjIwNjQ2MQ%3D%3D.2 86 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109788914_1190032384662914_1133192431757037704_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=vbuqCWcMdY4AX8n3nPc&se=7&oh=5e8d40a29d32f0693ee0fb788d60874b&oe=5F4092C9&ig_cache_key=MjM1ODE5MjAyNDE5MDM2ODg0MA%3D%3D.2 87 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/114419471_1460931010756894_8399311359018369844_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=s6c7W0mUf-AAX-vAhTI&oh=ee1af661b63642184f63bdf2fe4b722a&oe=5F3FD297&ig_cache_key=MjM1ODE5MjAwNDkzMTc1MjQwMQ%3D%3D.2 88 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/110334067_840315019708876_4600103570391874179_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=5Xj6i8J7kDYAX-TAD-8&oh=8cb9c9b0b9c6137da9f7463a925a2fa1&oe=5F3F4A48&ig_cache_key=MjM1ODE4NzU3ODgxNzQ4MTY0NA%3D%3D.2 89 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109902529_151766129821172_6375794575183271702_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=pOBgAMfNJGcAX8tLT0I&oh=3657784c8c0909d456531b700100eb3c&oe=5F422288&ig_cache_key=MjM1ODE5MTAzMzkzMjU2MzM2Mw%3D%3D.2 90 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109703376_2649636335254210_8331203640798085732_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=110&_nc_ohc=RhvwesXIJ50AX-3Vcmj&se=8&oh=a6a1e3666493db447eea5744a6b95cde&oe=5F3E82D8&ig_cache_key=MjM1ODE5MTk4OTAxNjg5NTc4OQ%3D%3D.2 91 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/fr/e15/s1080x1080/109470145_153997162992116_3385186563840264747_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=THk22SD3ANAAX9UD53w&oh=3cfd21a7f30d27745507652426caf9c5&oe=5F3EE7BB&ig_cache_key=MjM1ODE5MTk3Mzk3NzczNjI4Ng%3D%3D.2 92 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/110338417_333199687695414_681262827962096897_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=106&_nc_ohc=sbGjno5dzLMAX8UCar_&se=7&oh=539a46b3955ff13dbeba59c794fc88a2&oe=5F3EFC63&ig_cache_key=MjM1ODE5MTk2MDEzNTAzMTg4Nw%3D%3D.2 93 | https://instagram.ffjr1-2.fna.fbcdn.net/v/t51.2885-15/e35/109777128_118933339643668_3826391321913608625_n.jpg?_nc_ht=instagram.ffjr1-2.fna.fbcdn.net&_nc_cat=104&_nc_ohc=ZQCZi73fp9AAX80x0fF&se=7&oh=067f2faaab8f9e29d0d5cc58efbbe17b&oe=5F3EB8FE&ig_cache_key=MjM1ODE5MTk2MDExODIzMjk2MQ%3D%3D.2 94 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/113448607_2931553153637292_3668290273986032704_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=100&_nc_ohc=MPrvbY0ePlAAX_gVAXQ&se=7&oh=0ac64477c3ce8f45255f0bba3f985901&oe=5F3F3A34&ig_cache_key=MjM1ODE5MTk2MDE1MTYxNTIzMQ%3D%3D.2 95 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/109551791_2356283614676942_3587463730842654671_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=102&_nc_ohc=fQvFlk-JHAgAX-XrvYp&se=7&oh=de83dcd06c4aeedbb466018d4874ee0d&oe=5F41119A&ig_cache_key=MjM1ODE5MTk1NTEyOTA4MjU2MQ%3D%3D.2 96 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109784662_3132992713456079_2626530753204889845_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=I5g0Dmfw960AX_vO3Zg&se=7&oh=a0f02548e5cc5b2e7b7aa355cfa6ccf1&oe=5F3EF47B&ig_cache_key=MjM1ODE5MTk1NTE1NDE4NTcyMQ%3D%3D.2 97 | https://instagram.ffjr1-1.fna.fbcdn.net/v/t51.2885-15/e35/110314044_1019616668463077_4514593920022620249_n.jpg?_nc_ht=instagram.ffjr1-1.fna.fbcdn.net&_nc_cat=105&_nc_ohc=7xpjYF0DLC8AX8ULu38&se=7&oh=c8946d9decc59b4a390f88f3dbf765a4&oe=5F41A056&ig_cache_key=MjM1ODE5MDg1NTUyMjYzMDczOQ%3D%3D.2 98 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109736257_183782669833287_2855974071210549461_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=103&_nc_ohc=AJS_yQ-ZhQ0AX8zQQ_q&se=7&oh=14c36d45f1e55f957f066620c8417294&oe=5F420DC9&ig_cache_key=MjM1ODE5MTg0MTM4NzMxMDIyNw%3D%3D.2 99 | https://instagram.ffjr1-3.fna.fbcdn.net/v/t51.2885-15/e35/109513308_287359325865452_3885889812404839630_n.jpg?_nc_ht=instagram.ffjr1-3.fna.fbcdn.net&_nc_cat=108&_nc_ohc=S6536ApKxRMAX9UYbfa&se=7&oh=2b1158832c657ddc4e0fcd422f95b86e&oe=5F412496&ig_cache_key=MjM1ODE5MTgzOTU3NjAwNDEyMw%3D%3D.2 100 | https://instagram.ffjr1-4.fna.fbcdn.net/v/t51.2885-15/e35/109722974_700687400494691_3485269402481264947_n.jpg?_nc_ht=instagram.ffjr1-4.fna.fbcdn.net&_nc_cat=111&_nc_ohc=ci-rRE7uHWsAX9cE_4g&oh=3fff8bf446b402afc71746b0b9c36d45&oe=5F3F7197&ig_cache_key=MjM1ODE4OTgxMTgwNTY4MTAzOQ%3D%3D.2 101 | --------------------------------------------------------------------------------