├── .gitignore
├── InterFaceGAN++.ipynb
├── LICENSE
├── README.md
├── README_old.md
├── boundaries
├── stylegan2_ffhq_z
│ ├── boundary_5_o_Clock_Shadow.npy
│ ├── boundary_Arched_Eyebrows.npy
│ ├── boundary_Attractive.npy
│ ├── boundary_Bags_Under_Eyes.npy
│ ├── boundary_Bald.npy
│ ├── boundary_Bangs.npy
│ ├── boundary_Big_Lips.npy
│ ├── boundary_Big_Nose.npy
│ ├── boundary_Black_Hair.npy
│ ├── boundary_Blond_Hair.npy
│ ├── boundary_Brown_Hair.npy
│ ├── boundary_Bushy_Eyebrows.npy
│ ├── boundary_Chubby.npy
│ ├── boundary_Double_Chin.npy
│ ├── boundary_Eyeglasses.npy
│ ├── boundary_Gender.npy
│ ├── boundary_Goatee.npy
│ ├── boundary_Gray_Hair.npy
│ ├── boundary_Heavy_Makeup.npy
│ ├── boundary_High_Cheekbones.npy
│ ├── boundary_Mouth_Slightly_Open.npy
│ ├── boundary_Mustache.npy
│ ├── boundary_Narrow_Eyes.npy
│ ├── boundary_No_Beard.npy
│ ├── boundary_Oval_Face.npy
│ ├── boundary_Pale_Skin.npy
│ ├── boundary_Pointy_Nose.npy
│ ├── boundary_Receding_Hairline.npy
│ ├── boundary_Rosy_Cheeks.npy
│ ├── boundary_Sideburns.npy
│ ├── boundary_Smiling.npy
│ ├── boundary_Straight_Hair.npy
│ ├── boundary_Wavy_Hair.npy
│ ├── boundary_Wearing_Earrings.npy
│ ├── boundary_Wearing_Hat.npy
│ ├── boundary_Wearing_Lipstick.npy
│ ├── boundary_Wearing_Necklace.npy
│ ├── boundary_Wearing_Necktie.npy
│ └── boundary_Young.npy
├── stylegan3_ffhq_z
│ ├── boundary_5_o_Clock_Shadow.npy
│ ├── boundary_Arched_Eyebrows.npy
│ ├── boundary_Attractive.npy
│ ├── boundary_Bags_Under_Eyes.npy
│ ├── boundary_Bald.npy
│ ├── boundary_Bangs.npy
│ ├── boundary_Big_Lips.npy
│ ├── boundary_Big_Nose.npy
│ ├── boundary_Black_Hair.npy
│ ├── boundary_Blond_Hair.npy
│ ├── boundary_Brown_Hair.npy
│ ├── boundary_Bushy_Eyebrows.npy
│ ├── boundary_Chubby.npy
│ ├── boundary_Double_Chin.npy
│ ├── boundary_Eyeglasses.npy
│ ├── boundary_Gender.npy
│ ├── boundary_Goatee.npy
│ ├── boundary_Gray_Hair.npy
│ ├── boundary_Heavy_Makeup.npy
│ ├── boundary_High_Cheekbones.npy
│ ├── boundary_Mouth_Slightly_Open.npy
│ ├── boundary_Mustache.npy
│ ├── boundary_Narrow_Eyes.npy
│ ├── boundary_No_Beard.npy
│ ├── boundary_Oval_Face.npy
│ ├── boundary_Pale_Skin.npy
│ ├── boundary_Pointy_Nose.npy
│ ├── boundary_Receding_Hairline.npy
│ ├── boundary_Rosy_Cheeks.npy
│ ├── boundary_Sideburns.npy
│ ├── boundary_Smiling.npy
│ ├── boundary_Straight_Hair.npy
│ ├── boundary_Wavy_Hair.npy
│ ├── boundary_Wearing_Earrings.npy
│ ├── boundary_Wearing_Hat.npy
│ ├── boundary_Wearing_Lipstick.npy
│ ├── boundary_Wearing_Necklace.npy
│ ├── boundary_Wearing_Necktie.npy
│ └── boundary_Young.npy
├── stylegan_ffhq_w
│ ├── boundary_5_o_Clock_Shadow.npy
│ ├── boundary_Arched_Eyebrows.npy
│ ├── boundary_Attractive.npy
│ ├── boundary_Bags_Under_Eyes.npy
│ ├── boundary_Bald.npy
│ ├── boundary_Bangs.npy
│ ├── boundary_Big_Lips.npy
│ ├── boundary_Big_Nose.npy
│ ├── boundary_Black_Hair.npy
│ ├── boundary_Blond_Hair.npy
│ ├── boundary_Brown_Hair.npy
│ ├── boundary_Bushy_Eyebrows.npy
│ ├── boundary_Chubby.npy
│ ├── boundary_Double_Chin.npy
│ ├── boundary_Eyeglasses.npy
│ ├── boundary_Gender.npy
│ ├── boundary_Goatee.npy
│ ├── boundary_Gray_Hair.npy
│ ├── boundary_Heavy_Makeup.npy
│ ├── boundary_High_Cheekbones.npy
│ ├── boundary_Mouth_Slightly_Open.npy
│ ├── boundary_Mustache.npy
│ ├── boundary_Narrow_Eyes.npy
│ ├── boundary_No_Beard.npy
│ ├── boundary_Oval_Face.npy
│ ├── boundary_Pale_Skin.npy
│ ├── boundary_Pointy_Nose.npy
│ ├── boundary_Receding_Hairline.npy
│ ├── boundary_Rosy_Cheeks.npy
│ ├── boundary_Sideburns.npy
│ ├── boundary_Smiling.npy
│ ├── boundary_Straight_Hair.npy
│ ├── boundary_Wavy_Hair.npy
│ ├── boundary_Wearing_Earrings.npy
│ ├── boundary_Wearing_Hat.npy
│ ├── boundary_Wearing_Lipstick.npy
│ ├── boundary_Wearing_Necklace.npy
│ ├── boundary_Wearing_Necktie.npy
│ └── boundary_Young.npy
└── stylegan_ffhq_z
│ ├── boundary_5_o_Clock_Shadow.npy
│ ├── boundary_Arched_Eyebrows.npy
│ ├── boundary_Attractive.npy
│ ├── boundary_Bags_Under_Eyes.npy
│ ├── boundary_Bald.npy
│ ├── boundary_Bangs.npy
│ ├── boundary_Big_Lips.npy
│ ├── boundary_Big_Nose.npy
│ ├── boundary_Black_Hair.npy
│ ├── boundary_Blond_Hair.npy
│ ├── boundary_Brown_Hair.npy
│ ├── boundary_Bushy_Eyebrows.npy
│ ├── boundary_Chubby.npy
│ ├── boundary_Double_Chin.npy
│ ├── boundary_Eyeglasses.npy
│ ├── boundary_Gender.npy
│ ├── boundary_Goatee.npy
│ ├── boundary_Gray_Hair.npy
│ ├── boundary_Heavy_Makeup.npy
│ ├── boundary_High_Cheekbones.npy
│ ├── boundary_Mouth_Slightly_Open.npy
│ ├── boundary_Mustache.npy
│ ├── boundary_Narrow_Eyes.npy
│ ├── boundary_No_Beard.npy
│ ├── boundary_Oval_Face.npy
│ ├── boundary_Pale_Skin.npy
│ ├── boundary_Pointy_Nose.npy
│ ├── boundary_Receding_Hairline.npy
│ ├── boundary_Rosy_Cheeks.npy
│ ├── boundary_Sideburns.npy
│ ├── boundary_Smiling.npy
│ ├── boundary_Straight_Hair.npy
│ ├── boundary_Wavy_Hair.npy
│ ├── boundary_Wearing_Earrings.npy
│ ├── boundary_Wearing_Hat.npy
│ ├── boundary_Wearing_Lipstick.npy
│ ├── boundary_Wearing_Necklace.npy
│ ├── boundary_Wearing_Necktie.npy
│ └── boundary_Young.npy
├── dnnlib
├── __init__.py
└── util.py
├── docs
├── InterFaceGAN.ipynb
├── assets
│ ├── age.gif
│ ├── artifact.gif
│ ├── bootstrap.min.css
│ ├── expression.gif
│ ├── eyeglasses.gif
│ ├── font.css
│ ├── ganalyze.jpg
│ ├── gandissection.jpg
│ ├── gender.gif
│ ├── genforce.png
│ ├── higan.jpg
│ ├── pose.gif
│ ├── steerability.jpg
│ ├── style.css
│ └── teaser.jpg
└── index.html
├── edit.py
├── generate_data.py
├── images
├── bald1.gif
├── bald2.gif
├── blond.gif
├── gray_hair.gif
├── high_cheekbones.gif
├── makeup.gif
├── sg2.jpeg
├── sg2_not_young.jpeg
├── sg3_beard.jpeg
├── sg3_before.jpeg
├── sg_before.jpeg
└── sg_grey_hair.jpeg
├── models
├── __init__.py
├── base_generator.py
├── model_settings.py
├── pggan_generator.py
├── pggan_generator_model.py
├── pggan_tf_official
│ ├── LICENSE.txt
│ ├── README.md
│ ├── config.py
│ ├── dataset.py
│ ├── dataset_tool.py
│ ├── legacy.py
│ ├── loss.py
│ ├── metrics
│ │ ├── __init__.py
│ │ ├── frechet_inception_distance.py
│ │ ├── inception_score.py
│ │ ├── ms_ssim.py
│ │ └── sliced_wasserstein.py
│ ├── misc.py
│ ├── networks.py
│ ├── requirements-pip.txt
│ ├── tfutil.py
│ ├── train.py
│ └── util_scripts.py
├── pretrain
│ └── Pretrained_Models_Should_Be_Placed_Here
├── stylegan2_generator.py
├── stylegan3_generator.py
├── stylegan3_official_network.py
├── stylegan_generator.py
├── stylegan_generator_model.py
└── stylegan_tf_official
│ ├── LICENSE.txt
│ ├── README.md
│ ├── config.py
│ ├── dataset_tool.py
│ ├── dnnlib
│ ├── __init__.py
│ ├── submission
│ │ ├── __init__.py
│ │ ├── _internal
│ │ │ └── run.py
│ │ ├── run_context.py
│ │ └── submit.py
│ ├── tflib
│ │ ├── __init__.py
│ │ ├── autosummary.py
│ │ ├── network.py
│ │ ├── optimizer.py
│ │ └── tfutil.py
│ └── util.py
│ ├── generate_figures.py
│ ├── metrics
│ ├── __init__.py
│ ├── frechet_inception_distance.py
│ ├── linear_separability.py
│ ├── metric_base.py
│ └── perceptual_path_length.py
│ ├── pretrained_example.py
│ ├── run_metrics.py
│ ├── train.py
│ └── training
│ ├── __init__.py
│ ├── dataset.py
│ ├── loss.py
│ ├── misc.py
│ ├── networks_progan.py
│ ├── networks_stylegan.py
│ └── training_loop.py
├── torch_utils
├── __init__.py
├── custom_ops.py
├── misc.py
├── ops
│ ├── __init__.py
│ ├── bias_act.cpp
│ ├── bias_act.cu
│ ├── bias_act.h
│ ├── bias_act.py
│ ├── conv2d_gradfix.py
│ ├── conv2d_resample.py
│ ├── filtered_lrelu.cpp
│ ├── filtered_lrelu.cu
│ ├── filtered_lrelu.h
│ ├── filtered_lrelu.py
│ ├── filtered_lrelu_ns.cu
│ ├── filtered_lrelu_rd.cu
│ ├── filtered_lrelu_wr.cu
│ ├── fma.py
│ ├── grid_sample_gradfix.py
│ ├── upfirdn2d.cpp
│ ├── upfirdn2d.cu
│ ├── upfirdn2d.h
│ └── upfirdn2d.py
├── persistence.py
└── training_stats.py
├── train_boundary.py
└── utils
├── __init__.py
├── logger.py
└── manipulator.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 |
4 | *.jpg
5 | *.png
6 | *.jpeg
7 | *.npy
8 | log.txt
9 |
10 | /results/
11 | /models/pretrain/
12 | /data/
13 | !/boundaries/*.npy
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Younes Belkada
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_5_o_Clock_Shadow.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_5_o_Clock_Shadow.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Arched_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Arched_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Attractive.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Attractive.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Bags_Under_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Bags_Under_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Bald.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Bald.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Bangs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Bangs.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Big_Lips.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Big_Lips.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Big_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Big_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Black_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Black_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Blond_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Blond_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Brown_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Brown_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Bushy_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Bushy_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Chubby.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Chubby.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Double_Chin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Double_Chin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Eyeglasses.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Eyeglasses.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Gender.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Goatee.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Goatee.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Gray_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Gray_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Heavy_Makeup.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Heavy_Makeup.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_High_Cheekbones.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_High_Cheekbones.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Mouth_Slightly_Open.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Mouth_Slightly_Open.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Mustache.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Mustache.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Narrow_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Narrow_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_No_Beard.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_No_Beard.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Oval_Face.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Oval_Face.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Pale_Skin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Pale_Skin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Pointy_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Pointy_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Receding_Hairline.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Receding_Hairline.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Rosy_Cheeks.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Rosy_Cheeks.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Sideburns.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Sideburns.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Smiling.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Smiling.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Straight_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Straight_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wavy_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wavy_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wearing_Earrings.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wearing_Earrings.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wearing_Hat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wearing_Hat.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wearing_Lipstick.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wearing_Lipstick.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wearing_Necklace.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wearing_Necklace.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Wearing_Necktie.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Wearing_Necktie.npy
--------------------------------------------------------------------------------
/boundaries/stylegan2_ffhq_z/boundary_Young.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan2_ffhq_z/boundary_Young.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_5_o_Clock_Shadow.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_5_o_Clock_Shadow.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Arched_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Arched_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Attractive.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Attractive.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Bags_Under_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Bags_Under_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Bald.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Bald.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Bangs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Bangs.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Big_Lips.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Big_Lips.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Big_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Big_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Black_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Black_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Blond_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Blond_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Brown_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Brown_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Bushy_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Bushy_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Chubby.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Chubby.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Double_Chin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Double_Chin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Eyeglasses.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Eyeglasses.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Gender.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Goatee.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Goatee.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Gray_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Gray_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Heavy_Makeup.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Heavy_Makeup.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_High_Cheekbones.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_High_Cheekbones.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Mouth_Slightly_Open.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Mouth_Slightly_Open.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Mustache.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Mustache.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Narrow_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Narrow_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_No_Beard.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_No_Beard.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Oval_Face.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Oval_Face.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Pale_Skin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Pale_Skin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Pointy_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Pointy_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Receding_Hairline.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Receding_Hairline.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Rosy_Cheeks.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Rosy_Cheeks.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Sideburns.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Sideburns.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Smiling.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Smiling.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Straight_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Straight_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wavy_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wavy_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wearing_Earrings.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wearing_Earrings.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wearing_Hat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wearing_Hat.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wearing_Lipstick.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wearing_Lipstick.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wearing_Necklace.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wearing_Necklace.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Wearing_Necktie.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Wearing_Necktie.npy
--------------------------------------------------------------------------------
/boundaries/stylegan3_ffhq_z/boundary_Young.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan3_ffhq_z/boundary_Young.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_5_o_Clock_Shadow.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_5_o_Clock_Shadow.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Arched_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Arched_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Attractive.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Attractive.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Bags_Under_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Bags_Under_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Bald.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Bald.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Bangs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Bangs.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Big_Lips.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Big_Lips.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Big_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Big_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Black_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Black_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Blond_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Blond_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Brown_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Brown_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Bushy_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Bushy_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Chubby.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Chubby.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Double_Chin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Double_Chin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Eyeglasses.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Eyeglasses.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Gender.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Goatee.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Goatee.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Gray_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Gray_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Heavy_Makeup.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Heavy_Makeup.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_High_Cheekbones.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_High_Cheekbones.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Mouth_Slightly_Open.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Mouth_Slightly_Open.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Mustache.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Mustache.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Narrow_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Narrow_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_No_Beard.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_No_Beard.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Oval_Face.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Oval_Face.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Pale_Skin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Pale_Skin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Pointy_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Pointy_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Receding_Hairline.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Receding_Hairline.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Rosy_Cheeks.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Rosy_Cheeks.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Sideburns.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Sideburns.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Smiling.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Smiling.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Straight_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Straight_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wavy_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wavy_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wearing_Earrings.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wearing_Earrings.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wearing_Hat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wearing_Hat.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wearing_Lipstick.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wearing_Lipstick.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wearing_Necklace.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wearing_Necklace.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Wearing_Necktie.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Wearing_Necktie.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_w/boundary_Young.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_w/boundary_Young.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_5_o_Clock_Shadow.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_5_o_Clock_Shadow.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Arched_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Arched_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Attractive.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Attractive.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Bags_Under_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Bags_Under_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Bald.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Bald.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Bangs.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Bangs.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Big_Lips.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Big_Lips.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Big_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Big_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Black_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Black_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Blond_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Blond_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Brown_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Brown_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Bushy_Eyebrows.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Bushy_Eyebrows.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Chubby.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Chubby.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Double_Chin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Double_Chin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Eyeglasses.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Eyeglasses.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Gender.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Goatee.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Goatee.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Gray_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Gray_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Heavy_Makeup.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Heavy_Makeup.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_High_Cheekbones.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_High_Cheekbones.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Mouth_Slightly_Open.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Mouth_Slightly_Open.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Mustache.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Mustache.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Narrow_Eyes.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Narrow_Eyes.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_No_Beard.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_No_Beard.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Oval_Face.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Oval_Face.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Pale_Skin.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Pale_Skin.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Pointy_Nose.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Pointy_Nose.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Receding_Hairline.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Receding_Hairline.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Rosy_Cheeks.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Rosy_Cheeks.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Sideburns.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Sideburns.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Smiling.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Smiling.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Straight_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Straight_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wavy_Hair.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wavy_Hair.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wearing_Earrings.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wearing_Earrings.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wearing_Hat.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wearing_Hat.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wearing_Lipstick.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wearing_Lipstick.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wearing_Necklace.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wearing_Necklace.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Wearing_Necktie.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Wearing_Necktie.npy
--------------------------------------------------------------------------------
/boundaries/stylegan_ffhq_z/boundary_Young.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/boundaries/stylegan_ffhq_z/boundary_Young.npy
--------------------------------------------------------------------------------
/dnnlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | from .util import EasyDict, make_cache_dir_path
10 |
--------------------------------------------------------------------------------
/docs/assets/age.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/age.gif
--------------------------------------------------------------------------------
/docs/assets/artifact.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/artifact.gif
--------------------------------------------------------------------------------
/docs/assets/expression.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/expression.gif
--------------------------------------------------------------------------------
/docs/assets/eyeglasses.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/eyeglasses.gif
--------------------------------------------------------------------------------
/docs/assets/font.css:
--------------------------------------------------------------------------------
1 | /* Homepage Font */
2 |
3 | /* latin-ext */
4 | @font-face {
5 | font-family: 'Lato';
6 | font-style: normal;
7 | font-weight: 400;
8 | src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v16/S6uyw4BMUTPHjxAwXjeu.woff2) format('woff2');
9 | unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF;
10 | }
11 |
12 | /* latin */
13 | @font-face {
14 | font-family: 'Lato';
15 | font-style: normal;
16 | font-weight: 400;
17 | src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v16/S6uyw4BMUTPHjx4wXg.woff2) format('woff2');
18 | unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD;
19 | }
20 |
21 | /* latin-ext */
22 | @font-face {
23 | font-family: 'Lato';
24 | font-style: normal;
25 | font-weight: 700;
26 | src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v16/S6u9w4BMUTPHh6UVSwaPGR_p.woff2) format('woff2');
27 | unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF;
28 | }
29 |
30 | /* latin */
31 | @font-face {
32 | font-family: 'Lato';
33 | font-style: normal;
34 | font-weight: 700;
35 | src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v16/S6u9w4BMUTPHh6UVSwiPGQ.woff2) format('woff2');
36 | unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD;
37 | }
38 |
--------------------------------------------------------------------------------
/docs/assets/ganalyze.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/ganalyze.jpg
--------------------------------------------------------------------------------
/docs/assets/gandissection.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/gandissection.jpg
--------------------------------------------------------------------------------
/docs/assets/gender.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/gender.gif
--------------------------------------------------------------------------------
/docs/assets/genforce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/genforce.png
--------------------------------------------------------------------------------
/docs/assets/higan.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/higan.jpg
--------------------------------------------------------------------------------
/docs/assets/pose.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/pose.gif
--------------------------------------------------------------------------------
/docs/assets/steerability.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/steerability.jpg
--------------------------------------------------------------------------------
/docs/assets/style.css:
--------------------------------------------------------------------------------
1 | /* Body */
2 | body {
3 | background: #e3e5e8;
4 | color: #ffffff;
5 | font-family: 'Lato', Verdana, Helvetica, sans-serif;
6 | font-weight: 300;
7 | font-size: 14pt;
8 | }
9 |
10 | /* Hyperlinks */
11 | a {text-decoration: none;}
12 | a:link {color: #1772d0;}
13 | a:visited {color: #1772d0;}
14 | a:active {color: red;}
15 | a:hover {color: #f09228;}
16 |
17 | /* Pre-formatted Text */
18 | pre {
19 | margin: 5pt 0;
20 | border: 0;
21 | font-size: 12pt;
22 | background: #fcfcfc;
23 | }
24 |
25 | /* Project Page Style */
26 | /* Section */
27 | .section {
28 | width: 768pt;
29 | min-height: 100pt;
30 | margin: 15pt auto;
31 | padding: 20pt 30pt;
32 | border: 1pt hidden #000;
33 | text-align: justify;
34 | color: #000000;
35 | background: #ffffff;
36 | }
37 |
38 | /* Header (Title and Logo) */
39 | .section .header {
40 | min-height: 80pt;
41 | margin-top: 30pt;
42 | }
43 | .section .header .logo {
44 | width: 80pt;
45 | margin-left: 10pt;
46 | float: left;
47 | }
48 | .section .header .logo img {
49 | width: 80pt;
50 | object-fit: cover;
51 | }
52 | .section .header .title {
53 | margin: 0 120pt;
54 | text-align: center;
55 | font-size: 22pt;
56 | }
57 |
58 | /* Author */
59 | .section .author {
60 | margin: 5pt 0;
61 | text-align: center;
62 | font-size: 16pt;
63 | }
64 |
65 | /* Institution */
66 | .section .institution {
67 | margin: 5pt 0;
68 | text-align: center;
69 | font-size: 16pt;
70 | }
71 |
72 | /* Hyperlink (such as Paper and Code) */
73 | .section .link {
74 | margin: 5pt 0;
75 | text-align: center;
76 | font-size: 16pt;
77 | }
78 |
79 | /* Teaser */
80 | .section .teaser {
81 | margin: 20pt 0;
82 | text-align: center;
83 | }
84 | .section .teaser img {
85 | width: 95%;
86 | }
87 |
88 | /* Section Title */
89 | .section .title {
90 | text-align: center;
91 | font-size: 22pt;
92 | margin: 5pt 0 15pt 0; /* top right bottom left */
93 | }
94 |
95 | /* Section Body */
96 | .section .body {
97 | margin-bottom: 15pt;
98 | text-align: justify;
99 | font-size: 14pt;
100 | }
101 |
102 | /* BibTeX */
103 | .section .bibtex {
104 | margin: 5pt 0;
105 | text-align: left;
106 | font-size: 22pt;
107 | }
108 |
109 | /* Related Work */
110 | .section .ref {
111 | margin: 20pt 0 10pt 0; /* top right bottom left */
112 | text-align: left;
113 | font-size: 18pt;
114 | font-weight: bold;
115 | }
116 |
117 | /* Citation */
118 | .section .citation {
119 | min-height: 60pt;
120 | margin: 10pt 0;
121 | }
122 | .section .citation .image {
123 | width: 120pt;
124 | float: left;
125 | }
126 | .section .citation .image img {
127 | max-height: 60pt;
128 | width: 120pt;
129 | object-fit: cover;
130 | }
131 | .section .citation .comment{
132 | margin-left: 130pt;
133 | text-align: left;
134 | font-size: 14pt;
135 | }
136 |
--------------------------------------------------------------------------------
/docs/assets/teaser.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/docs/assets/teaser.jpg
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | InterFaceGAN
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
32 |
33 |
39 |
40 | 1 The Chinese University of Hong Kong
41 | 2 The Chinese University of Hong Kong, Shenzhen
42 |
43 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
Overview
59 |
60 | We find that the latent code for well-trained generative models, such as PGGAN and StyleGAN,
61 | actually learns a disentangled representation after some linear transformations.
62 | Based on our analysis, we propose a simple and general technique, called InterFaceGAN ,
63 | for semantic face editing in latent space.
64 | We manage to control the pose as well as other facial attributes, such as gender, age, eyeglasses.
65 | More importantly, we are able to correct the artifacts made by GANs.
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
Results
74 |
75 | We manipulate the following attributes with PGGAN.
76 |
77 |
78 |
79 | Pose
80 | Age
81 | Gender
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 | Expression
90 | Eyeglasses
91 | Artifacts
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 | Check more results in the following video.
101 |
102 |
103 |
104 | VIDEO
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
BibTeX
117 |
118 | @inproceedings{shen2020interpreting,
119 | title = {Interpreting the Latent Space of GANs for Semantic Face Editing},
120 | author = {Shen, Yujun and Gu, Jinjin and Tang, Xiaoou and Zhou, Bolei},
121 | booktitle = {CVPR},
122 | year = {2020}
123 | }
124 |
125 | @article{shen2020interfacegan,
126 | title = {InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs},
127 | author = {Shen, Yujun and Yang, Ceyuan and Tang, Xiaoou and Zhou, Bolei},
128 | journal = {TPAMI},
129 | year = {2020}
130 | }
131 |
132 |
133 |
Related Work
134 |
145 |
156 |
167 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
--------------------------------------------------------------------------------
/edit.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Edits latent codes with respect to given boundary.
3 |
4 | Basically, this file takes latent codes and a semantic boundary as inputs, and
5 | then shows how the image synthesis will change if the latent codes is moved
6 | towards the given boundary.
7 |
8 | NOTE: If you want to use W or W+ space of StyleGAN, please do not randomly
9 | sample the latent code, since neither W nor W+ space is subject to Gaussian
10 | distribution. Instead, please use `generate_data.py` to get the latent vectors
11 | from W or W+ space first, and then use `--input_latent_codes_path` option to
12 | pass in the latent vectors.
13 | """
14 |
15 | import os.path
16 | import argparse
17 | import cv2
18 | import numpy as np
19 | from tqdm import tqdm
20 |
21 | from models.model_settings import MODEL_POOL
22 | from models.pggan_generator import PGGANGenerator
23 | from models.stylegan_generator import StyleGANGenerator
24 | from models.stylegan3_generator import StyleGAN3Generator
25 | from models.stylegan2_generator import StyleGAN2Generator
26 | from utils.logger import setup_logger
27 | from utils.manipulator import linear_interpolate
28 |
29 |
30 | def parse_args():
31 | """Parses arguments."""
32 | parser = argparse.ArgumentParser(
33 | description='Edit image synthesis with given semantic boundary.')
34 | parser.add_argument('-m', '--model_name', type=str, required=True,
35 | choices=list(MODEL_POOL),
36 | help='Name of the model for generation. (required)')
37 | parser.add_argument('-o', '--output_dir', type=str, required=True,
38 | help='Directory to save the output results. (required)')
39 | parser.add_argument('-b', '--boundary_path', type=str, required=True,
40 | help='Path to the semantic boundary. (required)')
41 | parser.add_argument('-i', '--input_latent_codes_path', type=str, default='',
42 | help='If specified, will load latent codes from given '
43 | 'path instead of randomly sampling. (optional)')
44 | parser.add_argument('-n', '--num', type=int, default=1,
45 | help='Number of images for editing. This field will be '
46 | 'ignored if `input_latent_codes_path` is specified. '
47 | '(default: 1)')
48 | parser.add_argument('-s', '--latent_space_type', type=str, default='z',
49 | choices=['z', 'Z', 'w', 'W', 'wp', 'wP', 'Wp', 'WP'],
50 | help='Latent space used in Style GAN. (default: `Z`)')
51 | parser.add_argument('--start_distance', type=float, default=-3.0,
52 | help='Start point for manipulation in latent space. '
53 | '(default: -3.0)')
54 | parser.add_argument('--end_distance', type=float, default=3.0,
55 | help='End point for manipulation in latent space. '
56 | '(default: 3.0)')
57 | parser.add_argument('--steps', type=int, default=10,
58 | help='Number of steps for image editing. (default: 10)')
59 |
60 | return parser.parse_args()
61 |
62 |
63 | def main():
64 | """Main function."""
65 | args = parse_args()
66 | logger = setup_logger(args.output_dir, logger_name='edit')
67 |
68 | logger.info(f'Initializing generator.')
69 | gan_type = MODEL_POOL[args.model_name]['gan_type']
70 | if gan_type == 'pggan':
71 | model = PGGANGenerator(args.model_name, logger)
72 | kwargs = {}
73 | elif gan_type == 'stylegan':
74 | model = StyleGANGenerator(args.model_name, logger)
75 | kwargs = {'latent_space_type': args.latent_space_type}
76 | elif gan_type == 'stylegan3':
77 | model = StyleGAN3Generator(args.model_name, logger)
78 | kwargs = {'latent_space_type': args.latent_space_type}
79 | elif gan_type == 'stylegan2':
80 | model = StyleGAN2Generator(args.model_name, logger)
81 | kwargs = {'latent_space_type': args.latent_space_type}
82 | else:
83 | raise NotImplementedError(f'Not implemented GAN type `{gan_type}`!')
84 |
85 | logger.info(f'Preparing boundary.')
86 | if not os.path.isfile(args.boundary_path):
87 | raise ValueError(f'Boundary `{args.boundary_path}` does not exist!')
88 | boundary = np.load(args.boundary_path)
89 | np.save(os.path.join(args.output_dir, 'boundary.npy'), boundary)
90 |
91 | logger.info(f'Preparing latent codes.')
92 | if os.path.isfile(args.input_latent_codes_path):
93 | logger.info(f' Load latent codes from `{args.input_latent_codes_path}`.')
94 | latent_codes = np.load(args.input_latent_codes_path)
95 | latent_codes = model.preprocess(latent_codes, **kwargs)
96 | else:
97 | logger.info(f' Sample latent codes randomly.')
98 | latent_codes = model.easy_sample(args.num, **kwargs)
99 | np.save(os.path.join(args.output_dir, 'latent_codes.npy'), latent_codes)
100 | total_num = latent_codes.shape[0]
101 |
102 | logger.info(f'Editing {total_num} samples.')
103 | for sample_id in tqdm(range(total_num), leave=False):
104 | interpolations = linear_interpolate(latent_codes[sample_id:sample_id + 1],
105 | boundary,
106 | start_distance=args.start_distance,
107 | end_distance=args.end_distance,
108 | steps=args.steps)
109 | interpolation_id = 0
110 | for interpolations_batch in model.get_batch_inputs(interpolations):
111 | if gan_type == 'pggan':
112 | outputs = model.easy_synthesize(interpolations_batch)
113 | elif gan_type == 'stylegan':
114 | outputs = model.easy_synthesize(interpolations_batch, **kwargs)
115 | elif gan_type in ['stylegan3', 'stylegan2']:
116 | outputs = model.easy_synthesize(interpolations_batch, **kwargs)
117 | for image in outputs['image']:
118 | save_path = os.path.join(args.output_dir,
119 | f'{sample_id:03d}_{interpolation_id:03d}.jpg')
120 | cv2.imwrite(save_path, image[:, :, ::-1])
121 | interpolation_id += 1
122 | assert interpolation_id == args.steps
123 | logger.debug(f' Finished sample {sample_id:3d}.')
124 | logger.info(f'Successfully edited {total_num} samples.')
125 |
126 |
127 | if __name__ == '__main__':
128 | main()
129 |
--------------------------------------------------------------------------------
/generate_data.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Generates a collection of images with specified model.
3 |
4 | Commonly, this file is used for data preparation. More specifically, before
5 | exploring the hidden semantics from the latent space, user need to prepare a
6 | collection of images. These images can be used for further attribute prediction.
7 | In this way, it is able to build a relationship between input latent codes and
8 | the corresponding attribute scores.
9 | """
10 |
11 | import os.path
12 | import argparse
13 | from collections import defaultdict
14 | import cv2
15 | import numpy as np
16 | from tqdm import tqdm
17 |
18 | from models.model_settings import MODEL_POOL
19 | from models.pggan_generator import PGGANGenerator
20 | from models.stylegan_generator import StyleGANGenerator
21 | from models.stylegan3_generator import StyleGAN3Generator
22 | from models.stylegan2_generator import StyleGAN2Generator
23 | from utils.logger import setup_logger
24 |
25 |
26 | def parse_args():
27 | """Parses arguments."""
28 | parser = argparse.ArgumentParser(
29 | description='Generate images with given model.')
30 | parser.add_argument('-m', '--model_name', type=str, required=True,
31 | choices=list(MODEL_POOL),
32 | help='Name of the model for generation. (required)')
33 | parser.add_argument('-o', '--output_dir', type=str, required=True,
34 | help='Directory to save the output results. (required)')
35 | parser.add_argument('-i', '--latent_codes_path', type=str, default='',
36 | help='If specified, will load latent codes from given '
37 | 'path instead of randomly sampling. (optional)')
38 | parser.add_argument('-n', '--num', type=int, default=1,
39 | help='Number of images to generate. This field will be '
40 | 'ignored if `latent_codes_path` is specified. '
41 | '(default: 1)')
42 | parser.add_argument('-s', '--latent_space_type', type=str, default='z',
43 | choices=['z', 'Z', 'w', 'W', 'wp', 'wP', 'Wp', 'WP'],
44 | help='Latent space used in Style GAN. (default: `Z`)')
45 | parser.add_argument('-S', '--generate_style', action='store_true',
46 | help='If specified, will generate layer-wise style codes '
47 | 'in Style GAN. (default: do not generate styles)')
48 | parser.add_argument('-I', '--generate_image', action='store_false',
49 | help='If specified, will skip generating images in '
50 | 'Style GAN. (default: generate images)')
51 |
52 | parser.add_argument('-r', '--resize', type=int, default=224,
53 | help='Resize the image at the specified size')
54 |
55 | return parser.parse_args()
56 |
57 |
58 | def main():
59 | """Main function."""
60 | args = parse_args()
61 | logger = setup_logger(args.output_dir, logger_name='generate_data')
62 |
63 | logger.info(f'Initializing generator.')
64 | gan_type = MODEL_POOL[args.model_name]['gan_type']
65 | if gan_type == 'pggan':
66 | model = PGGANGenerator(args.model_name, logger)
67 | kwargs = {}
68 | elif gan_type == 'stylegan':
69 | model = StyleGANGenerator(args.model_name, logger)
70 | kwargs = {'latent_space_type': args.latent_space_type}
71 | elif gan_type == 'stylegan3':
72 | model = StyleGAN3Generator(args.model_name, logger)
73 | kwargs = {'latent_space_type': args.latent_space_type}
74 | elif gan_type == 'stylegan2':
75 | model = StyleGAN2Generator(args.model_name, logger)
76 | kwargs = {'latent_space_type': args.latent_space_type}
77 | else:
78 | raise NotImplementedError(f'Not implemented GAN type `{gan_type}`!')
79 |
80 | logger.info(f'Preparing latent codes.')
81 | if os.path.isfile(args.latent_codes_path):
82 | logger.info(f' Load latent codes from `{args.latent_codes_path}`.')
83 | latent_codes = np.load(args.latent_codes_path)
84 | latent_codes = model.preprocess(latent_codes, **kwargs)
85 | else:
86 | logger.info(f' Sample latent codes randomly.')
87 | latent_codes = model.easy_sample(args.num, **kwargs)
88 | total_num = latent_codes.shape[0]
89 |
90 | logger.info(f'Generating {total_num} samples.')
91 | results = defaultdict(list)
92 | pbar = tqdm(total=total_num, leave=False)
93 | for latent_codes_batch in model.get_batch_inputs(latent_codes):
94 | if gan_type == 'pggan':
95 | outputs = model.easy_synthesize(latent_codes_batch)
96 | elif gan_type == 'stylegan':
97 | outputs = model.easy_synthesize(latent_codes_batch,
98 | **kwargs,
99 | generate_style=args.generate_style,
100 | generate_image=args.generate_image)
101 | elif gan_type == 'stylegan3' or gan_type == 'stylegan2':
102 | outputs = model.easy_synthesize(latent_codes_batch,
103 | **kwargs,
104 | generate_style=args.generate_style,
105 | generate_image=args.generate_image)
106 | for key, val in outputs.items():
107 | if key == 'image':
108 | for image in val:
109 | image = image[:, :, ::-1]
110 | save_path = os.path.join(args.output_dir, f'{pbar.n:06d}.jpg')
111 | if args.resize:
112 | resize_dim = (args.resize, args.resize)
113 | image = cv2.resize(image, resize_dim, interpolation = cv2.INTER_CUBIC)
114 | cv2.imwrite(save_path, image)
115 | pbar.update(1)
116 | else:
117 | results[key].append(val)
118 | if 'image' not in outputs:
119 | pbar.update(latent_codes_batch.shape[0])
120 | if pbar.n % 1000 == 0 or pbar.n == total_num:
121 | logger.debug(f' Finish {pbar.n:6d} samples.')
122 | pbar.close()
123 |
124 | logger.info(f'Saving results.')
125 | for key, val in results.items():
126 | save_path = os.path.join(args.output_dir, f'{key}.npy')
127 | np.save(save_path, np.concatenate(val, axis=0))
128 |
129 |
130 | if __name__ == '__main__':
131 | main()
132 |
--------------------------------------------------------------------------------
/images/bald1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/bald1.gif
--------------------------------------------------------------------------------
/images/bald2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/bald2.gif
--------------------------------------------------------------------------------
/images/blond.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/blond.gif
--------------------------------------------------------------------------------
/images/gray_hair.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/gray_hair.gif
--------------------------------------------------------------------------------
/images/high_cheekbones.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/high_cheekbones.gif
--------------------------------------------------------------------------------
/images/makeup.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/makeup.gif
--------------------------------------------------------------------------------
/images/sg2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg2.jpeg
--------------------------------------------------------------------------------
/images/sg2_not_young.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg2_not_young.jpeg
--------------------------------------------------------------------------------
/images/sg3_beard.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg3_beard.jpeg
--------------------------------------------------------------------------------
/images/sg3_before.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg3_before.jpeg
--------------------------------------------------------------------------------
/images/sg_before.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg_before.jpeg
--------------------------------------------------------------------------------
/images/sg_grey_hair.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/images/sg_grey_hair.jpeg
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/models/__init__.py
--------------------------------------------------------------------------------
/models/model_settings.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Contains basic configurations for models used in this project.
3 |
4 | Please download the public released models from the following two repositories
5 | OR train your own models, and then put them into `pretrain` folder.
6 |
7 | ProgressiveGAN: https://github.com/tkarras/progressive_growing_of_gans
8 | StyleGAN: https://github.com/NVlabs/stylegan
9 | StyleGAN:
10 |
11 | NOTE: Any new model should be registered in `MODEL_POOL` before using.
12 | """
13 |
14 | import os.path
15 |
16 | BASE_DIR = os.path.dirname(os.path.relpath(__file__))
17 |
18 | MODEL_DIR = BASE_DIR + '/pretrain'
19 |
20 | MODEL_POOL = {
21 | 'pggan_celebahq': {
22 | 'tf_model_path': MODEL_DIR + '/karras2018iclr-celebahq-1024x1024.pkl',
23 | 'model_path': MODEL_DIR + '/pggan_celebahq.pth',
24 | 'gan_type': 'pggan',
25 | 'dataset_name': 'celebahq',
26 | 'latent_space_dim': 512,
27 | 'resolution': 1024,
28 | 'min_val': -1.0,
29 | 'max_val': 1.0,
30 | 'output_channels': 3,
31 | 'channel_order': 'RGB',
32 | 'fused_scale': False,
33 | },
34 | 'stylegan_celebahq': {
35 | 'tf_model_path':
36 | MODEL_DIR + '/karras2019stylegan-celebahq-1024x1024.pkl',
37 | 'model_path': MODEL_DIR + '/stylegan_celebahq.pth',
38 | 'gan_type': 'stylegan',
39 | 'dataset_name': 'celebahq',
40 | 'latent_space_dim': 512,
41 | 'w_space_dim': 512,
42 | 'resolution': 1024,
43 | 'min_val': -1.0,
44 | 'max_val': 1.0,
45 | 'output_channels': 3,
46 | 'channel_order': 'RGB',
47 | 'fused_scale': 'auto',
48 | },
49 | 'stylegan_ffhq': {
50 | 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl',
51 | 'model_path': MODEL_DIR + '/stylegan_ffhq.pth',
52 | 'gan_type': 'stylegan',
53 | 'dataset_name': 'ffhq',
54 | 'latent_space_dim': 512,
55 | 'w_space_dim': 512,
56 | 'resolution': 1024,
57 | 'min_val': -1.0,
58 | 'max_val': 1.0,
59 | 'output_channels': 3,
60 | 'channel_order': 'RGB',
61 | 'fused_scale': 'auto',
62 | },
63 | 'stylegan2_ffhq': {
64 | 'tf_model_path': MODEL_DIR + '/karras2019stylegan-ffhq-1024x1024.pkl',
65 | 'model_path': MODEL_DIR + '/stylegan2-ffhq-1024x1024.pkl',
66 | 'gan_type': 'stylegan2',
67 | 'dataset_name': 'ffhq',
68 | 'latent_space_dim': 512,
69 | 'w_space_dim': 512,
70 | 'c_space_dim': 512,
71 | 'resolution': 1024,
72 | 'min_val': -1.0,
73 | 'max_val': 1.0,
74 | 'output_channels': 3,
75 | 'channel_order': 'RGB',
76 | 'fused_scale': 'auto',
77 | },
78 | 'stylegan3_ffhq': {
79 | 'model_path': MODEL_DIR + '/stylegan3-t-ffhq-1024x1024.pkl',
80 | 'gan_type': 'stylegan3',
81 | 'dataset_name': 'ffhq',
82 | 'latent_space_dim': 512,
83 | 'w_space_dim': 512,
84 | 'c_space_dim': 512,
85 | 'resolution': 1024,
86 | 'min_val': -1.0,
87 | 'max_val': 1.0,
88 | 'output_channels': 3,
89 | 'channel_order': 'RGB',
90 | 'fused_scale': 'auto',
91 | },
92 | }
93 |
94 | # Settings for StyleGAN.
95 | STYLEGAN_TRUNCATION_PSI = 0.7 # 1.0 means no truncation
96 | STYLEGAN_TRUNCATION_LAYERS = 8 # 0 means no truncation
97 | STYLEGAN_RANDOMIZE_NOISE = False
98 |
99 | # Settings for model running.
100 | USE_CUDA = True
101 |
102 | MAX_IMAGES_ON_DEVICE = 8
103 |
--------------------------------------------------------------------------------
/models/pggan_generator.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Contains the generator class of ProgressiveGAN.
3 |
4 | Basically, this class is derived from the `BaseGenerator` class defined in
5 | `base_generator.py`.
6 | """
7 |
8 | import os
9 | import numpy as np
10 |
11 | import torch
12 |
13 | from . import model_settings
14 | from .pggan_generator_model import PGGANGeneratorModel
15 | from .base_generator import BaseGenerator
16 |
17 | __all__ = ['PGGANGenerator']
18 |
19 |
20 | class PGGANGenerator(BaseGenerator):
21 | """Defines the generator class of ProgressiveGAN."""
22 |
23 | def __init__(self, model_name, logger=None):
24 | super().__init__(model_name, logger)
25 | assert self.gan_type == 'pggan'
26 |
27 | def build(self):
28 | self.check_attr('fused_scale')
29 | self.model = PGGANGeneratorModel(resolution=self.resolution,
30 | fused_scale=self.fused_scale,
31 | output_channels=self.output_channels)
32 |
33 | def load(self):
34 | self.logger.info(f'Loading pytorch model from `{self.model_path}`.')
35 | self.model.load_state_dict(torch.load(self.model_path))
36 | self.logger.info(f'Successfully loaded!')
37 | self.lod = self.model.lod.to(self.cpu_device).tolist()
38 | self.logger.info(f' `lod` of the loaded model is {self.lod}.')
39 |
40 | def convert_tf_model(self, test_num=10):
41 | import sys
42 | import pickle
43 | import tensorflow as tf
44 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
45 | sys.path.append(model_settings.BASE_DIR + '/pggan_tf_official')
46 |
47 | self.logger.info(f'Loading tensorflow model from `{self.tf_model_path}`.')
48 | tf.InteractiveSession()
49 | with open(self.tf_model_path, 'rb') as f:
50 | _, _, tf_model = pickle.load(f)
51 | self.logger.info(f'Successfully loaded!')
52 |
53 | self.logger.info(f'Converting tensorflow model to pytorch version.')
54 | tf_vars = dict(tf_model.__getstate__()['variables'])
55 | state_dict = self.model.state_dict()
56 | for pth_var_name, tf_var_name in self.model.pth_to_tf_var_mapping.items():
57 | if 'ToRGB_lod' in tf_var_name:
58 | lod = int(tf_var_name[len('ToRGB_lod')])
59 | lod_shift = 10 - int(np.log2(self.resolution))
60 | tf_var_name = tf_var_name.replace(f'{lod}', f'{lod - lod_shift}')
61 | if tf_var_name not in tf_vars:
62 | self.logger.debug(f'Variable `{tf_var_name}` does not exist in '
63 | f'tensorflow model.')
64 | continue
65 | self.logger.debug(f' Converting `{tf_var_name}` to `{pth_var_name}`.')
66 | var = torch.from_numpy(np.array(tf_vars[tf_var_name]))
67 | if 'weight' in pth_var_name:
68 | if 'layer0.conv' in pth_var_name:
69 | var = var.view(var.shape[0], -1, 4, 4).permute(1, 0, 2, 3).flip(2, 3)
70 | elif 'Conv0_up' in tf_var_name:
71 | var = var.permute(0, 1, 3, 2)
72 | else:
73 | var = var.permute(3, 2, 0, 1)
74 | state_dict[pth_var_name] = var
75 | self.logger.info(f'Successfully converted!')
76 |
77 | self.logger.info(f'Saving pytorch model to `{self.model_path}`.')
78 | torch.save(state_dict, self.model_path)
79 | self.logger.info(f'Successfully saved!')
80 |
81 | self.load()
82 |
83 | # Official tensorflow model can only run on GPU.
84 | if test_num <= 0 or not tf.test.is_built_with_cuda():
85 | return
86 | self.logger.info(f'Testing conversion results.')
87 | self.model.eval().to(self.run_device)
88 | label_dim = tf_model.input_shapes[1][1]
89 | tf_fake_label = np.zeros((1, label_dim), np.float32)
90 | total_distance = 0.0
91 | for i in range(test_num):
92 | latent_code = self.easy_sample(1)
93 | tf_output = tf_model.run(latent_code, tf_fake_label)
94 | pth_output = self.synthesize(latent_code)['image']
95 | distance = np.average(np.abs(tf_output - pth_output))
96 | self.logger.debug(f' Test {i:03d}: distance {distance:.6e}.')
97 | total_distance += distance
98 | self.logger.info(f'Average distance is {total_distance / test_num:.6e}.')
99 |
100 | def sample(self, num):
101 | assert num > 0
102 | return np.random.randn(num, self.latent_space_dim).astype(np.float32)
103 |
104 | def preprocess(self, latent_codes):
105 | if not isinstance(latent_codes, np.ndarray):
106 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
107 |
108 | latent_codes = latent_codes.reshape(-1, self.latent_space_dim)
109 | norm = np.linalg.norm(latent_codes, axis=1, keepdims=True)
110 | latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim)
111 | return latent_codes.astype(np.float32)
112 |
113 | def synthesize(self, latent_codes):
114 | if not isinstance(latent_codes, np.ndarray):
115 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
116 | latent_codes_shape = latent_codes.shape
117 | if not (len(latent_codes_shape) == 2 and
118 | latent_codes_shape[0] <= self.batch_size and
119 | latent_codes_shape[1] == self.latent_space_dim):
120 | raise ValueError(f'Latent_codes should be with shape [batch_size, '
121 | f'latent_space_dim], where `batch_size` no larger than '
122 | f'{self.batch_size}, and `latent_space_dim` equal to '
123 | f'{self.latent_space_dim}!\n'
124 | f'But {latent_codes_shape} received!')
125 |
126 | zs = torch.from_numpy(latent_codes).type(torch.FloatTensor)
127 | zs = zs.to(self.run_device)
128 | images = self.model(zs)
129 | results = {
130 | 'z': latent_codes,
131 | 'image': self.get_value(images),
132 | }
133 | return results
134 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/models/pggan_tf_official/README.md
--------------------------------------------------------------------------------
/models/pggan_tf_official/legacy.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | import pickle
9 | import inspect
10 | import numpy as np
11 |
12 | import tfutil
13 | import networks
14 |
15 | #----------------------------------------------------------------------------
16 | # Custom unpickler that is able to load network pickles produced by
17 | # the old Theano implementation.
18 |
19 | class LegacyUnpickler(pickle.Unpickler):
20 | def __init__(self, *args, **kwargs):
21 | super().__init__(*args, **kwargs)
22 |
23 | def find_class(self, module, name):
24 | if module == 'network' and name == 'Network':
25 | return tfutil.Network
26 | return super().find_class(module, name)
27 |
28 | #----------------------------------------------------------------------------
29 | # Import handler for tfutil.Network that silently converts networks produced
30 | # by the old Theano implementation to a suitable format.
31 |
32 | theano_gan_remap = {
33 | 'G_paper': 'G_paper',
34 | 'G_progressive_8': 'G_paper',
35 | 'D_paper': 'D_paper',
36 | 'D_progressive_8': 'D_paper'}
37 |
38 | def patch_theano_gan(state):
39 | if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap:
40 | return state
41 |
42 | spec = dict(state['build_func_spec'])
43 | func = spec.pop('func')
44 | resolution = spec.get('resolution', 32)
45 | resolution_log2 = int(np.log2(resolution))
46 | use_wscale = spec.get('use_wscale', True)
47 |
48 | assert spec.pop('label_size', 0) == 0
49 | assert spec.pop('use_batchnorm', False) == False
50 | assert spec.pop('tanh_at_end', None) is None
51 | assert spec.pop('mbstat_func', 'Tstdeps') == 'Tstdeps'
52 | assert spec.pop('mbstat_avg', 'all') == 'all'
53 | assert spec.pop('mbdisc_kernels', None) is None
54 | spec.pop( 'use_gdrop', True) # doesn't make a difference
55 | assert spec.pop('use_layernorm', False) == False
56 | spec[ 'fused_scale'] = False
57 | spec[ 'mbstd_group_size'] = 16
58 |
59 | vars = []
60 | param_iter = iter(state['param_values'])
61 | relu = np.sqrt(2); linear = 1.0
62 | def flatten2(w): return w.reshape(w.shape[0], -1)
63 | def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1]))
64 | def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w
65 | def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))]
66 |
67 | if func.startswith('G'):
68 | vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3)))
69 | vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
70 | for res in range(3, resolution_log2 + 1):
71 | vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
72 | vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
73 | for lod in range(0, resolution_log2 - 1):
74 | vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis])
75 |
76 | if func.startswith('D'):
77 | vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis])
78 | for res in range(resolution_log2, 2, -1):
79 | vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
80 | vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
81 | vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis])
82 | vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
83 | vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose())
84 | vars += layer('4x4/Dense1', linear, next(param_iter))
85 |
86 | vars += [('lod', state['toplevel_params']['cur_lod'])]
87 |
88 | return {
89 | 'version': 2,
90 | 'name': func,
91 | 'build_module_src': inspect.getsource(networks),
92 | 'build_func_name': theano_gan_remap[func],
93 | 'static_kwargs': spec,
94 | 'variables': vars}
95 |
96 | tfutil.network_import_handlers.append(patch_theano_gan)
97 |
98 | #----------------------------------------------------------------------------
99 | # Import handler for tfutil.Network that ignores unsupported/deprecated
100 | # networks produced by older versions of the code.
101 |
102 | def ignore_unknown_theano_network(state):
103 | if 'version' in state:
104 | return state
105 |
106 | print('Ignoring unknown Theano network:', state['build_func_spec']['func'])
107 | return {
108 | 'version': 2,
109 | 'name': 'Dummy',
110 | 'build_module_src': 'def dummy(input, **kwargs): input.set_shape([None, 1]); return input',
111 | 'build_func_name': 'dummy',
112 | 'static_kwargs': {},
113 | 'variables': []}
114 |
115 | tfutil.network_import_handlers.append(ignore_unknown_theano_network)
116 |
117 | #----------------------------------------------------------------------------
118 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/loss.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | import numpy as np
9 | import tensorflow as tf
10 |
11 | import tfutil
12 |
13 | #----------------------------------------------------------------------------
14 | # Convenience func that casts all of its arguments to tf.float32.
15 |
16 | def fp32(*values):
17 | if len(values) == 1 and isinstance(values[0], tuple):
18 | values = values[0]
19 | values = tuple(tf.cast(v, tf.float32) for v in values)
20 | return values if len(values) >= 2 else values[0]
21 |
22 | #----------------------------------------------------------------------------
23 | # Generator loss function used in the paper (WGAN + AC-GAN).
24 |
25 | def G_wgan_acgan(G, D, opt, training_set, minibatch_size,
26 | cond_weight = 1.0): # Weight of the conditioning term.
27 |
28 | latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
29 | labels = training_set.get_random_labels_tf(minibatch_size)
30 | fake_images_out = G.get_output_for(latents, labels, is_training=True)
31 | fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
32 | loss = -fake_scores_out
33 |
34 | if D.output_shapes[1][1] > 0:
35 | with tf.name_scope('LabelPenalty'):
36 | label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
37 | loss += label_penalty_fakes * cond_weight
38 | return loss
39 |
40 | #----------------------------------------------------------------------------
41 | # Discriminator loss function used in the paper (WGAN-GP + AC-GAN).
42 |
43 | def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
44 | wgan_lambda = 10.0, # Weight for the gradient penalty term.
45 | wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
46 | wgan_target = 1.0, # Target value for gradient magnitudes.
47 | cond_weight = 1.0): # Weight of the conditioning terms.
48 |
49 | latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
50 | fake_images_out = G.get_output_for(latents, labels, is_training=True)
51 | real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True))
52 | fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
53 | real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
54 | fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
55 | loss = fake_scores_out - real_scores_out
56 |
57 | with tf.name_scope('GradientPenalty'):
58 | mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
59 | mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
60 | mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
61 | mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
62 | mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
63 | mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
64 | mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
65 | mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
66 | gradient_penalty = tf.square(mixed_norms - wgan_target)
67 | loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
68 |
69 | with tf.name_scope('EpsilonPenalty'):
70 | epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
71 | loss += epsilon_penalty * wgan_epsilon
72 |
73 | if D.output_shapes[1][1] > 0:
74 | with tf.name_scope('LabelPenalty'):
75 | label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
76 | label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
77 | label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
78 | label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
79 | loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
80 | return loss
81 |
82 | #----------------------------------------------------------------------------
83 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # empty
2 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/metrics/inception_score.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 Wojciech Zaremba
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | # Adapted from the original implementation by Wojciech Zaremba.
16 | # Source: https://github.com/openai/improved-gan/blob/master/inception_score/model.py
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import os.path
23 | import sys
24 | import tarfile
25 |
26 | import numpy as np
27 | from six.moves import urllib
28 | import tensorflow as tf
29 | import glob
30 | import scipy.misc
31 | import math
32 | import sys
33 |
34 | MODEL_DIR = '/tmp/imagenet'
35 |
36 | DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
37 | softmax = None
38 |
39 | # Call this function with list of images. Each of elements should be a
40 | # numpy array with values ranging from 0 to 255.
41 | def get_inception_score(images, splits=10):
42 | assert(type(images) == list)
43 | assert(type(images[0]) == np.ndarray)
44 | assert(len(images[0].shape) == 3)
45 | #assert(np.max(images[0]) > 10) # EDIT: commented out
46 | #assert(np.min(images[0]) >= 0.0)
47 | inps = []
48 | for img in images:
49 | img = img.astype(np.float32)
50 | inps.append(np.expand_dims(img, 0))
51 | bs = 100
52 | with tf.Session() as sess:
53 | preds = []
54 | n_batches = int(math.ceil(float(len(inps)) / float(bs)))
55 | for i in range(n_batches):
56 | #sys.stdout.write(".") # EDIT: commented out
57 | #sys.stdout.flush()
58 | inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
59 | inp = np.concatenate(inp, 0)
60 | pred = sess.run(softmax, {'ExpandDims:0': inp})
61 | preds.append(pred)
62 | preds = np.concatenate(preds, 0)
63 | scores = []
64 | for i in range(splits):
65 | part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
66 | kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
67 | kl = np.mean(np.sum(kl, 1))
68 | scores.append(np.exp(kl))
69 | return np.mean(scores), np.std(scores)
70 |
71 | # This function is called automatically.
72 | def _init_inception():
73 | global softmax
74 | if not os.path.exists(MODEL_DIR):
75 | os.makedirs(MODEL_DIR)
76 | filename = DATA_URL.split('/')[-1]
77 | filepath = os.path.join(MODEL_DIR, filename)
78 | if not os.path.exists(filepath):
79 | def _progress(count, block_size, total_size):
80 | sys.stdout.write('\r>> Downloading %s %.1f%%' % (
81 | filename, float(count * block_size) / float(total_size) * 100.0))
82 | sys.stdout.flush()
83 | filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
84 | print()
85 | statinfo = os.stat(filepath)
86 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
87 | tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) # EDIT: increased indent
88 | with tf.gfile.FastGFile(os.path.join(
89 | MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
90 | graph_def = tf.GraphDef()
91 | graph_def.ParseFromString(f.read())
92 | _ = tf.import_graph_def(graph_def, name='')
93 | # Works with an arbitrary minibatch size.
94 | with tf.Session() as sess:
95 | pool3 = sess.graph.get_tensor_by_name('pool_3:0')
96 | ops = pool3.graph.get_operations()
97 | for op_idx, op in enumerate(ops):
98 | for o in op.outputs:
99 | shape = o.get_shape()
100 | shape = [s.value for s in shape]
101 | new_shape = []
102 | for j, s in enumerate(shape):
103 | if s == 1 and j == 0:
104 | new_shape.append(None)
105 | else:
106 | new_shape.append(s)
107 | try:
108 | o._shape = tf.TensorShape(new_shape)
109 | except ValueError:
110 | o._shape_val = tf.TensorShape(new_shape) # EDIT: added for compatibility with tensorflow 1.6.0
111 | w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
112 | logits = tf.matmul(tf.squeeze(pool3), w)
113 | softmax = tf.nn.softmax(logits)
114 |
115 | #if softmax is None: # EDIT: commented out
116 | # _init_inception() # EDIT: commented out
117 |
118 | #----------------------------------------------------------------------------
119 | # EDIT: added
120 |
121 | class API:
122 | def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
123 | import config
124 | globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception')
125 | self.sess = tf.get_default_session()
126 | _init_inception()
127 |
128 | def get_metric_names(self):
129 | return ['IS_mean', 'IS_std']
130 |
131 | def get_metric_formatting(self):
132 | return ['%-10.4f', '%-10.4f']
133 |
134 | def begin(self, mode):
135 | assert mode in ['warmup', 'reals', 'fakes']
136 | self.images = []
137 |
138 | def feed(self, mode, minibatch):
139 | self.images.append(minibatch.transpose(0, 2, 3, 1))
140 |
141 | def end(self, mode):
142 | images = list(np.concatenate(self.images))
143 | with self.sess.as_default():
144 | mean, std = get_inception_score(images)
145 | return [mean, std]
146 |
147 | #----------------------------------------------------------------------------
148 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/metrics/sliced_wasserstein.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | import numpy as np
9 | import scipy.ndimage
10 |
11 | #----------------------------------------------------------------------------
12 |
13 | def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image):
14 | S = minibatch.shape # (minibatch, channel, height, width)
15 | assert len(S) == 4 and S[1] == 3
16 | N = nhoods_per_image * S[0]
17 | H = nhood_size // 2
18 | nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1]
19 | img = nhood // nhoods_per_image
20 | x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1))
21 | y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1))
22 | idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x
23 | return minibatch.flat[idx]
24 |
25 | #----------------------------------------------------------------------------
26 |
27 | def finalize_descriptors(desc):
28 | if isinstance(desc, list):
29 | desc = np.concatenate(desc, axis=0)
30 | assert desc.ndim == 4 # (neighborhood, channel, height, width)
31 | desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True)
32 | desc /= np.std(desc, axis=(0, 2, 3), keepdims=True)
33 | desc = desc.reshape(desc.shape[0], -1)
34 | return desc
35 |
36 | #----------------------------------------------------------------------------
37 |
38 | def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat):
39 | assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component)
40 | results = []
41 | for repeat in range(dir_repeats):
42 | dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction)
43 | dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction
44 | dirs = dirs.astype(np.float32)
45 | projA = np.matmul(A, dirs) # (neighborhood, direction)
46 | projB = np.matmul(B, dirs)
47 | projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction
48 | projB = np.sort(projB, axis=0)
49 | dists = np.abs(projA - projB) # pointwise wasserstein distances
50 | results.append(np.mean(dists)) # average over neighborhoods and directions
51 | return np.mean(results) # average over repeats
52 |
53 | #----------------------------------------------------------------------------
54 |
55 | def downscale_minibatch(minibatch, lod):
56 | if lod == 0:
57 | return minibatch
58 | t = minibatch.astype(np.float32)
59 | for i in range(lod):
60 | t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25
61 | return np.round(t).clip(0, 255).astype(np.uint8)
62 |
63 | #----------------------------------------------------------------------------
64 |
65 | gaussian_filter = np.float32([
66 | [1, 4, 6, 4, 1],
67 | [4, 16, 24, 16, 4],
68 | [6, 24, 36, 24, 6],
69 | [4, 16, 24, 16, 4],
70 | [1, 4, 6, 4, 1]]) / 256.0
71 |
72 | def pyr_down(minibatch): # matches cv2.pyrDown()
73 | assert minibatch.ndim == 4
74 | return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2]
75 |
76 | def pyr_up(minibatch): # matches cv2.pyrUp()
77 | assert minibatch.ndim == 4
78 | S = minibatch.shape
79 | res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype)
80 | res[:, :, ::2, ::2] = minibatch
81 | return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror')
82 |
83 | def generate_laplacian_pyramid(minibatch, num_levels):
84 | pyramid = [np.float32(minibatch)]
85 | for i in range(1, num_levels):
86 | pyramid.append(pyr_down(pyramid[-1]))
87 | pyramid[-2] -= pyr_up(pyramid[-1])
88 | return pyramid
89 |
90 | def reconstruct_laplacian_pyramid(pyramid):
91 | minibatch = pyramid[-1]
92 | for level in pyramid[-2::-1]:
93 | minibatch = pyr_up(minibatch) + level
94 | return minibatch
95 |
96 | #----------------------------------------------------------------------------
97 |
98 | class API:
99 | def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
100 | self.nhood_size = 7
101 | self.nhoods_per_image = 128
102 | self.dir_repeats = 4
103 | self.dirs_per_repeat = 128
104 | self.resolutions = []
105 | res = image_shape[1]
106 | while res >= 16:
107 | self.resolutions.append(res)
108 | res //= 2
109 |
110 | def get_metric_names(self):
111 | return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg']
112 |
113 | def get_metric_formatting(self):
114 | return ['%-13.4f'] * len(self.get_metric_names())
115 |
116 | def begin(self, mode):
117 | assert mode in ['warmup', 'reals', 'fakes']
118 | self.descriptors = [[] for res in self.resolutions]
119 |
120 | def feed(self, mode, minibatch):
121 | for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))):
122 | desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image)
123 | self.descriptors[lod].append(desc)
124 |
125 | def end(self, mode):
126 | desc = [finalize_descriptors(d) for d in self.descriptors]
127 | del self.descriptors
128 | if mode in ['warmup', 'reals']:
129 | self.desc_real = desc
130 | dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)]
131 | del desc
132 | dist = [d * 1e3 for d in dist] # multiply by 10^3
133 | return dist + [np.mean(dist)]
134 |
135 | #----------------------------------------------------------------------------
136 |
--------------------------------------------------------------------------------
/models/pggan_tf_official/requirements-pip.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.13.3
2 | scipy>=1.0.0
3 | tensorflow-gpu>=1.6.0
4 | moviepy>=0.2.3.2
5 | Pillow>=3.1.1
6 | lmdb>=0.93
7 | opencv-python>=3.4.0.12
8 | cryptography>=2.1.4
9 | h5py>=2.7.1
10 | six>=1.11.0
11 |
--------------------------------------------------------------------------------
/models/pretrain/Pretrained_Models_Should_Be_Placed_Here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/models/pretrain/Pretrained_Models_Should_Be_Placed_Here
--------------------------------------------------------------------------------
/models/stylegan2_generator.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Contains the generator class of StyleGAN.
3 |
4 | Basically, this class is derived from the `BaseGenerator` class defined in
5 | `base_generator.py`.
6 | """
7 |
8 | import os
9 | import numpy as np
10 | import pickle
11 | from PIL import Image
12 |
13 | from typing import List, Optional, Tuple, Union
14 |
15 | import torch
16 |
17 | from . import model_settings
18 | from .stylegan3_official_network import StyleGAN3GeneratorModel
19 | from .base_generator import BaseGenerator
20 |
21 | __all__ = ['StyleGANGenerator']
22 |
23 | def make_transform(translate: Tuple[float,float], angle: float):
24 | m = np.eye(3)
25 | s = np.sin(angle/360.0*np.pi*2)
26 | c = np.cos(angle/360.0*np.pi*2)
27 | m[0][0] = c
28 | m[0][1] = s
29 | m[0][2] = translate[0]
30 | m[1][0] = -s
31 | m[1][1] = c
32 | m[1][2] = translate[1]
33 | return m
34 |
35 | class StyleGAN2Generator(BaseGenerator):
36 | """Defines the generator class of StyleGAN.
37 |
38 | Different from conventional GAN, StyleGAN introduces a disentangled latent
39 | space (i.e., W space) besides the normal latent space (i.e., Z space). Then,
40 | the disentangled latent code, w, is fed into each convolutional layer to
41 | modulate the `style` of the synthesis through AdaIN (Adaptive Instance
42 | Normalization) layer. Normally, the w's fed into all layers are the same. But,
43 | they can actually be different to make different layers get different styles.
44 | Accordingly, an extended space (i.e. W+ space) is used to gather all w's
45 | together. Taking the official StyleGAN model trained on FF-HQ dataset as an
46 | instance, there are
47 | (1) Z space, with dimension (512,)
48 | (2) W space, with dimension (512,)
49 | (3) W+ space, with dimension (18, 512)
50 | """
51 |
52 | def __init__(self, model_name, logger=None):
53 | self.truncation_psi = model_settings.STYLEGAN_TRUNCATION_PSI
54 | self.truncation_layers = model_settings.STYLEGAN_TRUNCATION_LAYERS
55 | self.randomize_noise = model_settings.STYLEGAN_RANDOMIZE_NOISE
56 | self.model_specific_vars = ['truncation.truncation']
57 | super().__init__(model_name, logger)
58 | self.num_layers = (int(np.log2(self.resolution)) - 1) * 2
59 | assert self.gan_type in ['stylegan3', 'stylegan2']
60 |
61 | def build(self):
62 | self.check_attr('w_space_dim')
63 | self.check_attr('fused_scale')
64 | self.model = StyleGAN3GeneratorModel(
65 | img_resolution=self.resolution,
66 | w_dim=self.w_space_dim,
67 | z_dim=self.latent_space_dim,
68 | c_dim=self.c_space_dim,
69 | img_channels=3
70 | )
71 |
72 |
73 | def load(self):
74 | self.logger.info(f'Loading pytorch model from `{self.model_path}`.')
75 | with open(self.model_path, 'rb') as f:
76 | self.model = pickle.load(f)['G_ema']
77 | self.logger.info(f'Successfully loaded!')
78 | # self.lod = self.model.synthesis.lod.to(self.cpu_device).tolist()
79 | # self.logger.info(f' `lod` of the loaded model is {self.lod}.')
80 |
81 |
82 | def sample(self, num, latent_space_type='Z'):
83 | """Samples latent codes randomly.
84 |
85 | Args:
86 | num: Number of latent codes to sample. Should be positive.
87 | latent_space_type: Type of latent space from which to sample latent code.
88 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
89 |
90 | Returns:
91 | A `numpy.ndarray` as sampled latend codes.
92 |
93 | Raises:
94 | ValueError: If the given `latent_space_type` is not supported.
95 | """
96 | latent_space_type = latent_space_type.upper()
97 | if latent_space_type == 'Z':
98 | latent_codes = np.random.randn(num, self.latent_space_dim)
99 | elif latent_space_type == 'W':
100 | latent_codes = np.random.randn(num, self.w_space_dim)
101 | elif latent_space_type == 'WP':
102 | latent_codes = np.random.randn(num, self.num_layers, self.w_space_dim)
103 | else:
104 | raise ValueError(f'Latent space type `{latent_space_type}` is invalid!')
105 |
106 | return latent_codes.astype(np.float32)
107 |
108 | def preprocess(self, latent_codes, latent_space_type='Z'):
109 | """Preprocesses the input latent code if needed.
110 |
111 | Args:
112 | latent_codes: The input latent codes for preprocessing.
113 | latent_space_type: Type of latent space to which the latent codes belong.
114 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
115 |
116 | Returns:
117 | The preprocessed latent codes which can be used as final input for the
118 | generator.
119 |
120 | Raises:
121 | ValueError: If the given `latent_space_type` is not supported.
122 | """
123 | if not isinstance(latent_codes, np.ndarray):
124 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
125 |
126 | latent_space_type = latent_space_type.upper()
127 | if latent_space_type == 'Z':
128 | latent_codes = latent_codes.reshape(-1, self.latent_space_dim)
129 | norm = np.linalg.norm(latent_codes, axis=1, keepdims=True)
130 | latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim)
131 | elif latent_space_type == 'W':
132 | latent_codes = latent_codes.reshape(-1, self.w_space_dim)
133 | elif latent_space_type == 'WP':
134 | latent_codes = latent_codes.reshape(-1, self.num_layers, self.w_space_dim)
135 | else:
136 | raise ValueError(f'Latent space type `{latent_space_type}` is invalid!')
137 |
138 | return latent_codes.astype(np.float32)
139 |
140 | def easy_sample(self, num, latent_space_type='Z'):
141 | return self.sample(num, latent_space_type)
142 |
143 | def synthesize(self,
144 | latent_codes,
145 | latent_space_type='Z',
146 | generate_style=False,
147 | generate_image=True):
148 | """Synthesizes images with given latent codes.
149 |
150 | One can choose whether to generate the layer-wise style codes.
151 |
152 | Args:
153 | latent_codes: Input latent codes for image synthesis.
154 | latent_space_type: Type of latent space to which the latent codes belong.
155 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
156 | generate_style: Whether to generate the layer-wise style codes. (default:
157 | False)
158 | generate_image: Whether to generate the final image synthesis. (default:
159 | True)
160 |
161 | Returns:
162 | A dictionary whose values are raw outputs from the generator.
163 | """
164 | if not isinstance(latent_codes, np.ndarray):
165 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
166 |
167 | results = {}
168 | translate = (0,0)
169 | rotate=0.0
170 | z = torch.from_numpy(latent_codes).to(self.run_device)
171 | label = torch.zeros([1, self.c_space_dim]).to(self.run_device)
172 |
173 | if hasattr(self.model.synthesis, 'input'):
174 | m = make_transform(translate, rotate)
175 | m = np.linalg.inv(m)
176 | self.model.synthesis.input.transform.copy_(torch.from_numpy(m))
177 |
178 | ws = self.model.mapping(z, label)
179 | #wps = self.model.truncation(w)
180 | img = self.model(z, label)
181 | img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
182 | img = img.cpu().numpy()
183 |
184 | results['image'] = img
185 | results['z'] = latent_codes
186 | results['w'] = ws.detach().cpu().numpy()
187 | #results['wp'] = wps.detach().cpu().numpy()
188 |
189 | return results
190 |
--------------------------------------------------------------------------------
/models/stylegan3_generator.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Contains the generator class of StyleGAN.
3 |
4 | Basically, this class is derived from the `BaseGenerator` class defined in
5 | `base_generator.py`.
6 | """
7 |
8 | import os
9 | import numpy as np
10 | import pickle
11 | from PIL import Image
12 |
13 | from typing import List, Optional, Tuple, Union
14 |
15 | import torch
16 |
17 | from . import model_settings
18 | from .stylegan3_official_network import StyleGAN3GeneratorModel
19 | from .base_generator import BaseGenerator
20 |
21 | __all__ = ['StyleGANGenerator']
22 |
23 | def make_transform(translate: Tuple[float,float], angle: float):
24 | m = np.eye(3)
25 | s = np.sin(angle/360.0*np.pi*2)
26 | c = np.cos(angle/360.0*np.pi*2)
27 | m[0][0] = c
28 | m[0][1] = s
29 | m[0][2] = translate[0]
30 | m[1][0] = -s
31 | m[1][1] = c
32 | m[1][2] = translate[1]
33 | return m
34 |
35 | class StyleGAN3Generator(BaseGenerator):
36 | """Defines the generator class of StyleGAN.
37 |
38 | Different from conventional GAN, StyleGAN introduces a disentangled latent
39 | space (i.e., W space) besides the normal latent space (i.e., Z space). Then,
40 | the disentangled latent code, w, is fed into each convolutional layer to
41 | modulate the `style` of the synthesis through AdaIN (Adaptive Instance
42 | Normalization) layer. Normally, the w's fed into all layers are the same. But,
43 | they can actually be different to make different layers get different styles.
44 | Accordingly, an extended space (i.e. W+ space) is used to gather all w's
45 | together. Taking the official StyleGAN model trained on FF-HQ dataset as an
46 | instance, there are
47 | (1) Z space, with dimension (512,)
48 | (2) W space, with dimension (512,)
49 | (3) W+ space, with dimension (18, 512)
50 | """
51 |
52 | def __init__(self, model_name, logger=None):
53 | self.truncation_psi = model_settings.STYLEGAN_TRUNCATION_PSI
54 | self.truncation_layers = model_settings.STYLEGAN_TRUNCATION_LAYERS
55 | self.randomize_noise = model_settings.STYLEGAN_RANDOMIZE_NOISE
56 | self.model_specific_vars = ['truncation.truncation']
57 | super().__init__(model_name, logger)
58 | self.num_layers = (int(np.log2(self.resolution)) - 1) * 2
59 | assert self.gan_type in ['stylegan3', 'stylegan2']
60 |
61 | def build(self):
62 | self.check_attr('w_space_dim')
63 | self.check_attr('fused_scale')
64 | self.model = StyleGAN3GeneratorModel(
65 | img_resolution=self.resolution,
66 | w_dim=self.w_space_dim,
67 | z_dim=self.latent_space_dim,
68 | c_dim=self.c_space_dim,
69 | img_channels=3
70 | )
71 |
72 |
73 | def load(self):
74 | self.logger.info(f'Loading pytorch model from `{self.model_path}`.')
75 | with open(self.model_path, 'rb') as f:
76 | self.model = pickle.load(f)['G_ema']
77 | self.logger.info(f'Successfully loaded!')
78 | # self.lod = self.model.synthesis.lod.to(self.cpu_device).tolist()
79 | # self.logger.info(f' `lod` of the loaded model is {self.lod}.')
80 |
81 |
82 | def sample(self, num, latent_space_type='Z'):
83 | """Samples latent codes randomly.
84 |
85 | Args:
86 | num: Number of latent codes to sample. Should be positive.
87 | latent_space_type: Type of latent space from which to sample latent code.
88 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
89 |
90 | Returns:
91 | A `numpy.ndarray` as sampled latend codes.
92 |
93 | Raises:
94 | ValueError: If the given `latent_space_type` is not supported.
95 | """
96 | latent_space_type = latent_space_type.upper()
97 | if latent_space_type == 'Z':
98 | latent_codes = np.random.randn(num, self.latent_space_dim)
99 | elif latent_space_type == 'W':
100 | latent_codes = np.random.randn(num, self.w_space_dim)
101 | elif latent_space_type == 'WP':
102 | latent_codes = np.random.randn(num, self.num_layers, self.w_space_dim)
103 | else:
104 | raise ValueError(f'Latent space type `{latent_space_type}` is invalid!')
105 |
106 | return latent_codes.astype(np.float32)
107 |
108 | def preprocess(self, latent_codes, latent_space_type='Z'):
109 | """Preprocesses the input latent code if needed.
110 |
111 | Args:
112 | latent_codes: The input latent codes for preprocessing.
113 | latent_space_type: Type of latent space to which the latent codes belong.
114 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
115 |
116 | Returns:
117 | The preprocessed latent codes which can be used as final input for the
118 | generator.
119 |
120 | Raises:
121 | ValueError: If the given `latent_space_type` is not supported.
122 | """
123 | if not isinstance(latent_codes, np.ndarray):
124 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
125 |
126 | latent_space_type = latent_space_type.upper()
127 | if latent_space_type == 'Z':
128 | latent_codes = latent_codes.reshape(-1, self.latent_space_dim)
129 | norm = np.linalg.norm(latent_codes, axis=1, keepdims=True)
130 | latent_codes = latent_codes / norm * np.sqrt(self.latent_space_dim)
131 | elif latent_space_type == 'W':
132 | latent_codes = latent_codes.reshape(-1, self.w_space_dim)
133 | elif latent_space_type == 'WP':
134 | latent_codes = latent_codes.reshape(-1, self.num_layers, self.w_space_dim)
135 | else:
136 | raise ValueError(f'Latent space type `{latent_space_type}` is invalid!')
137 |
138 | return latent_codes.astype(np.float32)
139 |
140 | def easy_sample(self, num, latent_space_type='Z'):
141 | return self.sample(num, latent_space_type)
142 |
143 | def synthesize(self,
144 | latent_codes,
145 | latent_space_type='Z',
146 | generate_style=False,
147 | generate_image=True):
148 | """Synthesizes images with given latent codes.
149 |
150 | One can choose whether to generate the layer-wise style codes.
151 |
152 | Args:
153 | latent_codes: Input latent codes for image synthesis.
154 | latent_space_type: Type of latent space to which the latent codes belong.
155 | Only [`Z`, `W`, `WP`] are supported. Case insensitive. (default: `Z`)
156 | generate_style: Whether to generate the layer-wise style codes. (default:
157 | False)
158 | generate_image: Whether to generate the final image synthesis. (default:
159 | True)
160 |
161 | Returns:
162 | A dictionary whose values are raw outputs from the generator.
163 | """
164 | if not isinstance(latent_codes, np.ndarray):
165 | raise ValueError(f'Latent codes should be with type `numpy.ndarray`!')
166 |
167 | results = {}
168 | translate = (0,0)
169 | rotate=0.0
170 | z = torch.from_numpy(latent_codes).to(self.run_device)
171 | label = torch.zeros([1, self.c_space_dim]).to(self.run_device)
172 |
173 | if hasattr(self.model.synthesis, 'input'):
174 | m = make_transform(translate, rotate)
175 | m = np.linalg.inv(m)
176 | self.model.synthesis.input.transform.copy_(torch.from_numpy(m))
177 |
178 | ws = self.model.mapping(z, label)
179 | #wps = self.model.truncation(w)
180 | img = self.model(z, label)
181 | img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
182 | img = img.cpu().numpy()
183 |
184 | results['image'] = img
185 | results['z'] = latent_codes
186 | results['w'] = ws.detach().cpu().numpy()
187 | #results['wp'] = wps.detach().cpu().numpy()
188 |
189 | return results
190 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Global configuration."""
9 |
10 | #----------------------------------------------------------------------------
11 | # Paths.
12 |
13 | result_dir = 'results'
14 | data_dir = 'datasets'
15 | cache_dir = 'cache'
16 | run_dir_ignore = ['results', 'datasets', 'cache']
17 |
18 | #----------------------------------------------------------------------------
19 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/dnnlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import submission
9 |
10 | from .submission.run_context import RunContext
11 |
12 | from .submission.submit import SubmitTarget
13 | from .submission.submit import PathType
14 | from .submission.submit import SubmitConfig
15 | from .submission.submit import get_path_from_template
16 | from .submission.submit import submit_run
17 |
18 | from .util import EasyDict
19 |
20 | submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
21 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/dnnlib/submission/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import run_context
9 | from . import submit
10 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/dnnlib/submission/_internal/run.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Helper for launching run functions in computing clusters.
9 |
10 | During the submit process, this file is copied to the appropriate run dir.
11 | When the job is launched in the cluster, this module is the first thing that
12 | is run inside the docker container.
13 | """
14 |
15 | import os
16 | import pickle
17 | import sys
18 |
19 | # PYTHONPATH should have been set so that the run_dir/src is in it
20 | import dnnlib
21 |
22 | def main():
23 | if not len(sys.argv) >= 4:
24 | raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
25 |
26 | run_dir = str(sys.argv[1])
27 | task_name = str(sys.argv[2])
28 | host_name = str(sys.argv[3])
29 |
30 | submit_config_path = os.path.join(run_dir, "submit_config.pkl")
31 |
32 | # SubmitConfig should have been pickled to the run dir
33 | if not os.path.exists(submit_config_path):
34 | raise RuntimeError("SubmitConfig pickle file does not exist!")
35 |
36 | submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
37 | dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
38 |
39 | submit_config.task_name = task_name
40 | submit_config.host_name = host_name
41 |
42 | dnnlib.submission.submit.run_wrapper(submit_config)
43 |
44 | if __name__ == "__main__":
45 | main()
46 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/dnnlib/submission/run_context.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Helpers for managing the run/training loop."""
9 |
10 | import datetime
11 | import json
12 | import os
13 | import pprint
14 | import time
15 | import types
16 |
17 | from typing import Any
18 |
19 | from . import submit
20 |
21 |
22 | class RunContext(object):
23 | """Helper class for managing the run/training loop.
24 |
25 | The context will hide the implementation details of a basic run/training loop.
26 | It will set things up properly, tell if run should be stopped, and then cleans up.
27 | User should call update periodically and use should_stop to determine if run should be stopped.
28 |
29 | Args:
30 | submit_config: The SubmitConfig that is used for the current run.
31 | config_module: The whole config module that is used for the current run.
32 | max_epoch: Optional cached value for the max_epoch variable used in update.
33 | """
34 |
35 | def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None):
36 | self.submit_config = submit_config
37 | self.should_stop_flag = False
38 | self.has_closed = False
39 | self.start_time = time.time()
40 | self.last_update_time = time.time()
41 | self.last_update_interval = 0.0
42 | self.max_epoch = max_epoch
43 |
44 | # pretty print the all the relevant content of the config module to a text file
45 | if config_module is not None:
46 | with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f:
47 | filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))}
48 | pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False)
49 |
50 | # write out details about the run to a text file
51 | self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")}
52 | with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f:
53 | pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
54 |
55 | def __enter__(self) -> "RunContext":
56 | return self
57 |
58 | def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
59 | self.close()
60 |
61 | def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None:
62 | """Do general housekeeping and keep the state of the context up-to-date.
63 | Should be called often enough but not in a tight loop."""
64 | assert not self.has_closed
65 |
66 | self.last_update_interval = time.time() - self.last_update_time
67 | self.last_update_time = time.time()
68 |
69 | if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")):
70 | self.should_stop_flag = True
71 |
72 | max_epoch_val = self.max_epoch if max_epoch is None else max_epoch
73 |
74 | def should_stop(self) -> bool:
75 | """Tell whether a stopping condition has been triggered one way or another."""
76 | return self.should_stop_flag
77 |
78 | def get_time_since_start(self) -> float:
79 | """How much time has passed since the creation of the context."""
80 | return time.time() - self.start_time
81 |
82 | def get_time_since_last_update(self) -> float:
83 | """How much time has passed since the last call to update."""
84 | return time.time() - self.last_update_time
85 |
86 | def get_last_update_interval(self) -> float:
87 | """How much time passed between the previous two calls to update."""
88 | return self.last_update_interval
89 |
90 | def close(self) -> None:
91 | """Close the context and clean up.
92 | Should only be called once."""
93 | if not self.has_closed:
94 | # update the run.txt with stopping time
95 | self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ")
96 | with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f:
97 | pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
98 |
99 | self.has_closed = True
100 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/dnnlib/tflib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import autosummary
9 | from . import network
10 | from . import optimizer
11 | from . import tfutil
12 |
13 | from .tfutil import *
14 | from .network import Network
15 |
16 | from .optimizer import Optimizer
17 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | # empty
9 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/metrics/frechet_inception_distance.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Frechet Inception Distance (FID)."""
9 |
10 | import os
11 | import numpy as np
12 | import scipy
13 | import tensorflow as tf
14 | import dnnlib.tflib as tflib
15 |
16 | from metrics import metric_base
17 | from training import misc
18 |
19 | #----------------------------------------------------------------------------
20 |
21 | class FID(metric_base.MetricBase):
22 | def __init__(self, num_images, minibatch_per_gpu, **kwargs):
23 | super().__init__(**kwargs)
24 | self.num_images = num_images
25 | self.minibatch_per_gpu = minibatch_per_gpu
26 |
27 | def _evaluate(self, Gs, num_gpus):
28 | minibatch_size = num_gpus * self.minibatch_per_gpu
29 | inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl
30 | activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
31 |
32 | # Calculate statistics for reals.
33 | cache_file = self._get_cache_file_for_reals(num_images=self.num_images)
34 | os.makedirs(os.path.dirname(cache_file), exist_ok=True)
35 | if os.path.isfile(cache_file):
36 | mu_real, sigma_real = misc.load_pkl(cache_file)
37 | else:
38 | for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):
39 | begin = idx * minibatch_size
40 | end = min(begin + minibatch_size, self.num_images)
41 | activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)
42 | if end == self.num_images:
43 | break
44 | mu_real = np.mean(activations, axis=0)
45 | sigma_real = np.cov(activations, rowvar=False)
46 | misc.save_pkl((mu_real, sigma_real), cache_file)
47 |
48 | # Construct TensorFlow graph.
49 | result_expr = []
50 | for gpu_idx in range(num_gpus):
51 | with tf.device('/gpu:%d' % gpu_idx):
52 | Gs_clone = Gs.clone()
53 | inception_clone = inception.clone()
54 | latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
55 | images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True)
56 | images = tflib.convert_images_to_uint8(images)
57 | result_expr.append(inception_clone.get_output_for(images))
58 |
59 | # Calculate statistics for fakes.
60 | for begin in range(0, self.num_images, minibatch_size):
61 | end = min(begin + minibatch_size, self.num_images)
62 | activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
63 | mu_fake = np.mean(activations, axis=0)
64 | sigma_fake = np.cov(activations, rowvar=False)
65 |
66 | # Calculate FID.
67 | m = np.square(mu_fake - mu_real).sum()
68 | s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member
69 | dist = m + np.trace(sigma_fake + sigma_real - 2*s)
70 | self._report_result(np.real(dist))
71 |
72 | #----------------------------------------------------------------------------
73 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/metrics/metric_base.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Common definitions for GAN metrics."""
9 |
10 | import os
11 | import time
12 | import hashlib
13 | import numpy as np
14 | import tensorflow as tf
15 | import dnnlib
16 | import dnnlib.tflib as tflib
17 |
18 | import config
19 | from training import misc
20 | from training import dataset
21 |
22 | #----------------------------------------------------------------------------
23 | # Standard metrics.
24 |
25 | fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
26 | ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
27 | ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
28 | ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
29 | ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
30 | ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
31 | dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
32 |
33 | #----------------------------------------------------------------------------
34 | # Base class for metrics.
35 |
36 | class MetricBase:
37 | def __init__(self, name):
38 | self.name = name
39 | self._network_pkl = None
40 | self._dataset_args = None
41 | self._mirror_augment = None
42 | self._results = []
43 | self._eval_time = None
44 |
45 | def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
46 | self._network_pkl = network_pkl
47 | self._dataset_args = dataset_args
48 | self._mirror_augment = mirror_augment
49 | self._results = []
50 |
51 | if (dataset_args is None or mirror_augment is None) and run_dir is not None:
52 | run_config = misc.parse_config_for_previous_run(run_dir)
53 | self._dataset_args = dict(run_config['dataset'])
54 | self._dataset_args['shuffle_mb'] = 0
55 | self._mirror_augment = run_config['train'].get('mirror_augment', False)
56 |
57 | time_begin = time.time()
58 | with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
59 | _G, _D, Gs = misc.load_pkl(self._network_pkl)
60 | self._evaluate(Gs, num_gpus=num_gpus)
61 | self._eval_time = time.time() - time_begin
62 |
63 | if log_results:
64 | result_str = self.get_result_str()
65 | if run_dir is not None:
66 | log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
67 | with dnnlib.util.Logger(log, 'a'):
68 | print(result_str)
69 | else:
70 | print(result_str)
71 |
72 | def get_result_str(self):
73 | network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
74 | if len(network_name) > 29:
75 | network_name = '...' + network_name[-26:]
76 | result_str = '%-30s' % network_name
77 | result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
78 | for res in self._results:
79 | result_str += ' ' + self.name + res.suffix + ' '
80 | result_str += res.fmt % res.value
81 | return result_str
82 |
83 | def update_autosummaries(self):
84 | for res in self._results:
85 | tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
86 |
87 | def _evaluate(self, Gs, num_gpus):
88 | raise NotImplementedError # to be overridden by subclasses
89 |
90 | def _report_result(self, value, suffix='', fmt='%-10.4f'):
91 | self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
92 |
93 | def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
94 | all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
95 | all_args.update(self._dataset_args)
96 | all_args.update(kwargs)
97 | md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
98 | dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
99 | return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
100 |
101 | def _iterate_reals(self, minibatch_size):
102 | dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
103 | while True:
104 | images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
105 | if self._mirror_augment:
106 | images = misc.apply_mirror_augment(images)
107 | yield images
108 |
109 | def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
110 | while True:
111 | latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
112 | fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
113 | images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
114 | yield images
115 |
116 | #----------------------------------------------------------------------------
117 | # Group of multiple metrics.
118 |
119 | class MetricGroup:
120 | def __init__(self, metric_kwarg_list):
121 | self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
122 |
123 | def run(self, *args, **kwargs):
124 | for metric in self.metrics:
125 | metric.run(*args, **kwargs)
126 |
127 | def get_result_str(self):
128 | return ' '.join(metric.get_result_str() for metric in self.metrics)
129 |
130 | def update_autosummaries(self):
131 | for metric in self.metrics:
132 | metric.update_autosummaries()
133 |
134 | #----------------------------------------------------------------------------
135 | # Dummy metric for debugging purposes.
136 |
137 | class DummyMetric(MetricBase):
138 | def _evaluate(self, Gs, num_gpus):
139 | _ = Gs, num_gpus
140 | self._report_result(0.0)
141 |
142 | #----------------------------------------------------------------------------
143 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/metrics/perceptual_path_length.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Perceptual Path Length (PPL)."""
9 |
10 | import numpy as np
11 | import tensorflow as tf
12 | import dnnlib.tflib as tflib
13 |
14 | from metrics import metric_base
15 | from training import misc
16 |
17 | #----------------------------------------------------------------------------
18 |
19 | # Normalize batch of vectors.
20 | def normalize(v):
21 | return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True))
22 |
23 | # Spherical interpolation of a batch of vectors.
24 | def slerp(a, b, t):
25 | a = normalize(a)
26 | b = normalize(b)
27 | d = tf.reduce_sum(a * b, axis=-1, keepdims=True)
28 | p = t * tf.math.acos(d)
29 | c = normalize(b - d * a)
30 | d = a * tf.math.cos(p) + c * tf.math.sin(p)
31 | return normalize(d)
32 |
33 | #----------------------------------------------------------------------------
34 |
35 | class PPL(metric_base.MetricBase):
36 | def __init__(self, num_samples, epsilon, space, sampling, minibatch_per_gpu, **kwargs):
37 | assert space in ['z', 'w']
38 | assert sampling in ['full', 'end']
39 | super().__init__(**kwargs)
40 | self.num_samples = num_samples
41 | self.epsilon = epsilon
42 | self.space = space
43 | self.sampling = sampling
44 | self.minibatch_per_gpu = minibatch_per_gpu
45 |
46 | def _evaluate(self, Gs, num_gpus):
47 | minibatch_size = num_gpus * self.minibatch_per_gpu
48 |
49 | # Construct TensorFlow graph.
50 | distance_expr = []
51 | for gpu_idx in range(num_gpus):
52 | with tf.device('/gpu:%d' % gpu_idx):
53 | Gs_clone = Gs.clone()
54 | noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]
55 |
56 | # Generate random latents and interpolation t-values.
57 | lat_t01 = tf.random_normal([self.minibatch_per_gpu * 2] + Gs_clone.input_shape[1:])
58 | lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0)
59 |
60 | # Interpolate in W or Z.
61 | if self.space == 'w':
62 | dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, None, is_validation=True)
63 | dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2]
64 | dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis])
65 | dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon)
66 | dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape)
67 | else: # space == 'z'
68 | lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2]
69 | lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis])
70 | lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon)
71 | lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape)
72 | dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, None, is_validation=True)
73 |
74 | # Synthesize images.
75 | with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch
76 | images = Gs_clone.components.synthesis.get_output_for(dlat_e01, is_validation=True, randomize_noise=False)
77 |
78 | # Crop only the face region.
79 | c = int(images.shape[2] // 8)
80 | images = images[:, :, c*3 : c*7, c*2 : c*6]
81 |
82 | # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
83 | if images.shape[2] > 256:
84 | factor = images.shape[2] // 256
85 | images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
86 | images = tf.reduce_mean(images, axis=[3,5])
87 |
88 | # Scale dynamic range from [-1,1] to [0,255] for VGG.
89 | images = (images + 1) * (255 / 2)
90 |
91 | # Evaluate perceptual distance.
92 | img_e0, img_e1 = images[0::2], images[1::2]
93 | distance_measure = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2') # vgg16_zhang_perceptual.pkl
94 | distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2))
95 |
96 | # Sampling loop.
97 | all_distances = []
98 | for _ in range(0, self.num_samples, minibatch_size):
99 | all_distances += tflib.run(distance_expr)
100 | all_distances = np.concatenate(all_distances, axis=0)
101 |
102 | # Reject outliers.
103 | lo = np.percentile(all_distances, 1, interpolation='lower')
104 | hi = np.percentile(all_distances, 99, interpolation='higher')
105 | filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances)
106 | self._report_result(np.mean(filtered_distances))
107 |
108 | #----------------------------------------------------------------------------
109 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/pretrained_example.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Minimal script for generating an image using pre-trained StyleGAN generator."""
9 |
10 | import os
11 | import pickle
12 | import numpy as np
13 | import PIL.Image
14 | import dnnlib
15 | import dnnlib.tflib as tflib
16 | import config
17 |
18 | def main():
19 | # Initialize TensorFlow.
20 | tflib.init_tf()
21 |
22 | # Load pre-trained network.
23 | url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
24 | with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
25 | _G, _D, Gs = pickle.load(f)
26 | # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
27 | # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
28 | # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
29 |
30 | # Print network details.
31 | Gs.print_layers()
32 |
33 | # Pick latent vector.
34 | rnd = np.random.RandomState(5)
35 | latents = rnd.randn(1, Gs.input_shape[1])
36 |
37 | # Generate image.
38 | fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
39 | images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
40 |
41 | # Save image.
42 | os.makedirs(config.result_dir, exist_ok=True)
43 | png_filename = os.path.join(config.result_dir, 'example.png')
44 | PIL.Image.fromarray(images[0], 'RGB').save(png_filename)
45 |
46 | if __name__ == "__main__":
47 | main()
48 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/run_metrics.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Main entry point for training StyleGAN and ProGAN networks."""
9 |
10 | import dnnlib
11 | from dnnlib import EasyDict
12 | import dnnlib.tflib as tflib
13 |
14 | import config
15 | from metrics import metric_base
16 | from training import misc
17 |
18 | #----------------------------------------------------------------------------
19 |
20 | def run_pickle(submit_config, metric_args, network_pkl, dataset_args, mirror_augment):
21 | ctx = dnnlib.RunContext(submit_config)
22 | tflib.init_tf()
23 | print('Evaluating %s metric on network_pkl "%s"...' % (metric_args.name, network_pkl))
24 | metric = dnnlib.util.call_func_by_name(**metric_args)
25 | print()
26 | metric.run(network_pkl, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=submit_config.num_gpus)
27 | print()
28 | ctx.close()
29 |
30 | #----------------------------------------------------------------------------
31 |
32 | def run_snapshot(submit_config, metric_args, run_id, snapshot):
33 | ctx = dnnlib.RunContext(submit_config)
34 | tflib.init_tf()
35 | print('Evaluating %s metric on run_id %s, snapshot %s...' % (metric_args.name, run_id, snapshot))
36 | run_dir = misc.locate_run_dir(run_id)
37 | network_pkl = misc.locate_network_pkl(run_dir, snapshot)
38 | metric = dnnlib.util.call_func_by_name(**metric_args)
39 | print()
40 | metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
41 | print()
42 | ctx.close()
43 |
44 | #----------------------------------------------------------------------------
45 |
46 | def run_all_snapshots(submit_config, metric_args, run_id):
47 | ctx = dnnlib.RunContext(submit_config)
48 | tflib.init_tf()
49 | print('Evaluating %s metric on all snapshots of run_id %s...' % (metric_args.name, run_id))
50 | run_dir = misc.locate_run_dir(run_id)
51 | network_pkls = misc.list_network_pkls(run_dir)
52 | metric = dnnlib.util.call_func_by_name(**metric_args)
53 | print()
54 | for idx, network_pkl in enumerate(network_pkls):
55 | ctx.update('', idx, len(network_pkls))
56 | metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
57 | print()
58 | ctx.close()
59 |
60 | #----------------------------------------------------------------------------
61 |
62 | def main():
63 | submit_config = dnnlib.SubmitConfig()
64 |
65 | # Which metrics to evaluate?
66 | metrics = []
67 | metrics += [metric_base.fid50k]
68 | #metrics += [metric_base.ppl_zfull]
69 | #metrics += [metric_base.ppl_wfull]
70 | #metrics += [metric_base.ppl_zend]
71 | #metrics += [metric_base.ppl_wend]
72 | #metrics += [metric_base.ls]
73 | #metrics += [metric_base.dummy]
74 |
75 | # Which networks to evaluate them on?
76 | tasks = []
77 | tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl
78 | #tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)]
79 | #tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)]
80 |
81 | # How many GPUs to use?
82 | submit_config.num_gpus = 1
83 | #submit_config.num_gpus = 2
84 | #submit_config.num_gpus = 4
85 | #submit_config.num_gpus = 8
86 |
87 | # Execute.
88 | submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
89 | submit_config.run_dir_ignore += config.run_dir_ignore
90 | for task in tasks:
91 | for metric in metrics:
92 | submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name)
93 | if task.run_func_name.endswith('run_snapshot'):
94 | submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot)
95 | if task.run_func_name.endswith('run_all_snapshots'):
96 | submit_config.run_desc += '-%s' % task.run_id
97 | submit_config.run_desc += '-%dgpu' % submit_config.num_gpus
98 | dnnlib.submit_run(submit_config, metric_args=metric, **task)
99 |
100 | #----------------------------------------------------------------------------
101 |
102 | if __name__ == "__main__":
103 | main()
104 |
105 | #----------------------------------------------------------------------------
106 |
--------------------------------------------------------------------------------
/models/stylegan_tf_official/training/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | # empty
9 |
--------------------------------------------------------------------------------
/torch_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | # empty
10 |
--------------------------------------------------------------------------------
/torch_utils/custom_ops.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import glob
10 | import hashlib
11 | import importlib
12 | import os
13 | import re
14 | import shutil
15 | import uuid
16 |
17 | import torch
18 | import torch.utils.cpp_extension
19 | from torch.utils.file_baton import FileBaton
20 |
21 | #----------------------------------------------------------------------------
22 | # Global options.
23 |
24 | verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
25 |
26 | #----------------------------------------------------------------------------
27 | # Internal helper funcs.
28 |
29 | def _find_compiler_bindir():
30 | patterns = [
31 | 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
32 | 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
33 | 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
34 | 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
35 | ]
36 | for pattern in patterns:
37 | matches = sorted(glob.glob(pattern))
38 | if len(matches):
39 | return matches[-1]
40 | return None
41 |
42 | #----------------------------------------------------------------------------
43 |
44 | def _get_mangled_gpu_name():
45 | name = torch.cuda.get_device_name().lower()
46 | out = []
47 | for c in name:
48 | if re.match('[a-z0-9_-]+', c):
49 | out.append(c)
50 | else:
51 | out.append('-')
52 | return ''.join(out)
53 |
54 | #----------------------------------------------------------------------------
55 | # Main entry point for compiling and loading C++/CUDA plugins.
56 |
57 | _cached_plugins = dict()
58 |
59 | def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
60 | assert verbosity in ['none', 'brief', 'full']
61 | if headers is None:
62 | headers = []
63 | if source_dir is not None:
64 | sources = [os.path.join(source_dir, fname) for fname in sources]
65 | headers = [os.path.join(source_dir, fname) for fname in headers]
66 |
67 | # Already cached?
68 | if module_name in _cached_plugins:
69 | return _cached_plugins[module_name]
70 |
71 | # Print status.
72 | if verbosity == 'full':
73 | print(f'Setting up PyTorch plugin "{module_name}"...')
74 | elif verbosity == 'brief':
75 | print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
76 | verbose_build = (verbosity == 'full')
77 |
78 | # Compile and load.
79 | try: # pylint: disable=too-many-nested-blocks
80 | # Make sure we can find the necessary compiler binaries.
81 | if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
82 | compiler_bindir = _find_compiler_bindir()
83 | if compiler_bindir is None:
84 | raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
85 | os.environ['PATH'] += ';' + compiler_bindir
86 |
87 | # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
88 | # break the build or unnecessarily restrict what's available to nvcc.
89 | # Unset it to let nvcc decide based on what's available on the
90 | # machine.
91 | os.environ['TORCH_CUDA_ARCH_LIST'] = ''
92 |
93 | # Incremental build md5sum trickery. Copies all the input source files
94 | # into a cached build directory under a combined md5 digest of the input
95 | # source files. Copying is done only if the combined digest has changed.
96 | # This keeps input file timestamps and filenames the same as in previous
97 | # extension builds, allowing for fast incremental rebuilds.
98 | #
99 | # This optimization is done only in case all the source files reside in
100 | # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
101 | # environment variable is set (we take this as a signal that the user
102 | # actually cares about this.)
103 | #
104 | # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
105 | # around the *.cu dependency bug in ninja config.
106 | #
107 | all_source_files = sorted(sources + headers)
108 | all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
109 | if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
110 |
111 | # Compute combined hash digest for all source files.
112 | hash_md5 = hashlib.md5()
113 | for src in all_source_files:
114 | with open(src, 'rb') as f:
115 | hash_md5.update(f.read())
116 |
117 | # Select cached build directory name.
118 | source_digest = hash_md5.hexdigest()
119 | build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
120 | cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
121 |
122 | if not os.path.isdir(cached_build_dir):
123 | tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
124 | os.makedirs(tmpdir)
125 | for src in all_source_files:
126 | shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
127 | try:
128 | os.replace(tmpdir, cached_build_dir) # atomic
129 | except OSError:
130 | # source directory already exists, delete tmpdir and its contents.
131 | shutil.rmtree(tmpdir)
132 | if not os.path.isdir(cached_build_dir): raise
133 |
134 | # Compile.
135 | cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
136 | torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
137 | verbose=verbose_build, sources=cached_sources, **build_kwargs)
138 | else:
139 | torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
140 |
141 | # Load.
142 | module = importlib.import_module(module_name)
143 |
144 | except:
145 | if verbosity == 'brief':
146 | print('Failed!')
147 | raise
148 |
149 | # Print status and add to cache dict.
150 | if verbosity == 'full':
151 | print(f'Done setting up PyTorch plugin "{module_name}".')
152 | elif verbosity == 'brief':
153 | print('Done.')
154 | _cached_plugins[module_name] = module
155 | return module
156 |
157 | #----------------------------------------------------------------------------
158 |
--------------------------------------------------------------------------------
/torch_utils/ops/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | # empty
10 |
--------------------------------------------------------------------------------
/torch_utils/ops/bias_act.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 | #include
11 | #include
12 | #include "bias_act.h"
13 |
14 | //------------------------------------------------------------------------
15 |
16 | static bool has_same_layout(torch::Tensor x, torch::Tensor y)
17 | {
18 | if (x.dim() != y.dim())
19 | return false;
20 | for (int64_t i = 0; i < x.dim(); i++)
21 | {
22 | if (x.size(i) != y.size(i))
23 | return false;
24 | if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
25 | return false;
26 | }
27 | return true;
28 | }
29 |
30 | //------------------------------------------------------------------------
31 |
32 | static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
33 | {
34 | // Validate arguments.
35 | TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
36 | TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
37 | TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
38 | TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
39 | TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
40 | TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
41 | TORCH_CHECK(b.dim() == 1, "b must have rank 1");
42 | TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
43 | TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
44 | TORCH_CHECK(grad >= 0, "grad must be non-negative");
45 |
46 | // Validate layout.
47 | TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
48 | TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
49 | TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
50 | TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
51 | TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
52 |
53 | // Create output tensor.
54 | const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
55 | torch::Tensor y = torch::empty_like(x);
56 | TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
57 |
58 | // Initialize CUDA kernel parameters.
59 | bias_act_kernel_params p;
60 | p.x = x.data_ptr();
61 | p.b = (b.numel()) ? b.data_ptr() : NULL;
62 | p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
63 | p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
64 | p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
65 | p.y = y.data_ptr();
66 | p.grad = grad;
67 | p.act = act;
68 | p.alpha = alpha;
69 | p.gain = gain;
70 | p.clamp = clamp;
71 | p.sizeX = (int)x.numel();
72 | p.sizeB = (int)b.numel();
73 | p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
74 |
75 | // Choose CUDA kernel.
76 | void* kernel;
77 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
78 | {
79 | kernel = choose_bias_act_kernel(p);
80 | });
81 | TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
82 |
83 | // Launch CUDA kernel.
84 | p.loopX = 4;
85 | int blockSize = 4 * 32;
86 | int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
87 | void* args[] = {&p};
88 | AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
89 | return y;
90 | }
91 |
92 | //------------------------------------------------------------------------
93 |
94 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
95 | {
96 | m.def("bias_act", &bias_act);
97 | }
98 |
99 | //------------------------------------------------------------------------
100 |
--------------------------------------------------------------------------------
/torch_utils/ops/bias_act.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 | #include "bias_act.h"
11 |
12 | //------------------------------------------------------------------------
13 | // Helpers.
14 |
15 | template struct InternalType;
16 | template <> struct InternalType { typedef double scalar_t; };
17 | template <> struct InternalType { typedef float scalar_t; };
18 | template <> struct InternalType { typedef float scalar_t; };
19 |
20 | //------------------------------------------------------------------------
21 | // CUDA kernel.
22 |
23 | template
24 | __global__ void bias_act_kernel(bias_act_kernel_params p)
25 | {
26 | typedef typename InternalType::scalar_t scalar_t;
27 | int G = p.grad;
28 | scalar_t alpha = (scalar_t)p.alpha;
29 | scalar_t gain = (scalar_t)p.gain;
30 | scalar_t clamp = (scalar_t)p.clamp;
31 | scalar_t one = (scalar_t)1;
32 | scalar_t two = (scalar_t)2;
33 | scalar_t expRange = (scalar_t)80;
34 | scalar_t halfExpRange = (scalar_t)40;
35 | scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946;
36 | scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717;
37 |
38 | // Loop over elements.
39 | int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
40 | for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
41 | {
42 | // Load.
43 | scalar_t x = (scalar_t)((const T*)p.x)[xi];
44 | scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0;
45 | scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0;
46 | scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0;
47 | scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one;
48 | scalar_t yy = (gain != 0) ? yref / gain : 0;
49 | scalar_t y = 0;
50 |
51 | // Apply bias.
52 | ((G == 0) ? x : xref) += b;
53 |
54 | // linear
55 | if (A == 1)
56 | {
57 | if (G == 0) y = x;
58 | if (G == 1) y = x;
59 | }
60 |
61 | // relu
62 | if (A == 2)
63 | {
64 | if (G == 0) y = (x > 0) ? x : 0;
65 | if (G == 1) y = (yy > 0) ? x : 0;
66 | }
67 |
68 | // lrelu
69 | if (A == 3)
70 | {
71 | if (G == 0) y = (x > 0) ? x : x * alpha;
72 | if (G == 1) y = (yy > 0) ? x : x * alpha;
73 | }
74 |
75 | // tanh
76 | if (A == 4)
77 | {
78 | if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); }
79 | if (G == 1) y = x * (one - yy * yy);
80 | if (G == 2) y = x * (one - yy * yy) * (-two * yy);
81 | }
82 |
83 | // sigmoid
84 | if (A == 5)
85 | {
86 | if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one);
87 | if (G == 1) y = x * yy * (one - yy);
88 | if (G == 2) y = x * yy * (one - yy) * (one - two * yy);
89 | }
90 |
91 | // elu
92 | if (A == 6)
93 | {
94 | if (G == 0) y = (x >= 0) ? x : exp(x) - one;
95 | if (G == 1) y = (yy >= 0) ? x : x * (yy + one);
96 | if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one);
97 | }
98 |
99 | // selu
100 | if (A == 7)
101 | {
102 | if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one);
103 | if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha);
104 | if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha);
105 | }
106 |
107 | // softplus
108 | if (A == 8)
109 | {
110 | if (G == 0) y = (x > expRange) ? x : log(exp(x) + one);
111 | if (G == 1) y = x * (one - exp(-yy));
112 | if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); }
113 | }
114 |
115 | // swish
116 | if (A == 9)
117 | {
118 | if (G == 0)
119 | y = (x < -expRange) ? 0 : x / (exp(-x) + one);
120 | else
121 | {
122 | scalar_t c = exp(xref);
123 | scalar_t d = c + one;
124 | if (G == 1)
125 | y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d);
126 | else
127 | y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d);
128 | yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain;
129 | }
130 | }
131 |
132 | // Apply gain.
133 | y *= gain * dy;
134 |
135 | // Clamp.
136 | if (clamp >= 0)
137 | {
138 | if (G == 0)
139 | y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp;
140 | else
141 | y = (yref > -clamp & yref < clamp) ? y : 0;
142 | }
143 |
144 | // Store.
145 | ((T*)p.y)[xi] = (T)y;
146 | }
147 | }
148 |
149 | //------------------------------------------------------------------------
150 | // CUDA kernel selection.
151 |
152 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p)
153 | {
154 | if (p.act == 1) return (void*)bias_act_kernel;
155 | if (p.act == 2) return (void*)bias_act_kernel;
156 | if (p.act == 3) return (void*)bias_act_kernel;
157 | if (p.act == 4) return (void*)bias_act_kernel;
158 | if (p.act == 5) return (void*)bias_act_kernel;
159 | if (p.act == 6) return (void*)bias_act_kernel;
160 | if (p.act == 7) return (void*)bias_act_kernel;
161 | if (p.act == 8) return (void*)bias_act_kernel;
162 | if (p.act == 9) return (void*)bias_act_kernel;
163 | return NULL;
164 | }
165 |
166 | //------------------------------------------------------------------------
167 | // Template specializations.
168 |
169 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p);
170 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p);
171 | template void* choose_bias_act_kernel (const bias_act_kernel_params& p);
172 |
173 | //------------------------------------------------------------------------
174 |
--------------------------------------------------------------------------------
/torch_utils/ops/bias_act.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | //------------------------------------------------------------------------
10 | // CUDA kernel parameters.
11 |
12 | struct bias_act_kernel_params
13 | {
14 | const void* x; // [sizeX]
15 | const void* b; // [sizeB] or NULL
16 | const void* xref; // [sizeX] or NULL
17 | const void* yref; // [sizeX] or NULL
18 | const void* dy; // [sizeX] or NULL
19 | void* y; // [sizeX]
20 |
21 | int grad;
22 | int act;
23 | float alpha;
24 | float gain;
25 | float clamp;
26 |
27 | int sizeX;
28 | int sizeB;
29 | int stepB;
30 | int loopX;
31 | };
32 |
33 | //------------------------------------------------------------------------
34 | // CUDA kernel selection.
35 |
36 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p);
37 |
38 | //------------------------------------------------------------------------
39 |
--------------------------------------------------------------------------------
/torch_utils/ops/conv2d_resample.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """2D convolution with optional up/downsampling."""
10 |
11 | import torch
12 |
13 | from .. import misc
14 | from . import conv2d_gradfix
15 | from . import upfirdn2d
16 | from .upfirdn2d import _parse_padding
17 | from .upfirdn2d import _get_filter_size
18 |
19 | #----------------------------------------------------------------------------
20 |
21 | def _get_weight_shape(w):
22 | with misc.suppress_tracer_warnings(): # this value will be treated as a constant
23 | shape = [int(sz) for sz in w.shape]
24 | misc.assert_shape(w, shape)
25 | return shape
26 |
27 | #----------------------------------------------------------------------------
28 |
29 | def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
30 | """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
31 | """
32 | _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w)
33 |
34 | # Flip weight if requested.
35 | # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
36 | if not flip_weight and (kw > 1 or kh > 1):
37 | w = w.flip([2, 3])
38 |
39 | # Execute using conv2d_gradfix.
40 | op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
41 | return op(x, w, stride=stride, padding=padding, groups=groups)
42 |
43 | #----------------------------------------------------------------------------
44 |
45 | @misc.profiled_function
46 | def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
47 | r"""2D convolution with optional up/downsampling.
48 |
49 | Padding is performed only once at the beginning, not between the operations.
50 |
51 | Args:
52 | x: Input tensor of shape
53 | `[batch_size, in_channels, in_height, in_width]`.
54 | w: Weight tensor of shape
55 | `[out_channels, in_channels//groups, kernel_height, kernel_width]`.
56 | f: Low-pass filter for up/downsampling. Must be prepared beforehand by
57 | calling upfirdn2d.setup_filter(). None = identity (default).
58 | up: Integer upsampling factor (default: 1).
59 | down: Integer downsampling factor (default: 1).
60 | padding: Padding with respect to the upsampled image. Can be a single number
61 | or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
62 | (default: 0).
63 | groups: Split input channels into N groups (default: 1).
64 | flip_weight: False = convolution, True = correlation (default: True).
65 | flip_filter: False = convolution, True = correlation (default: False).
66 |
67 | Returns:
68 | Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
69 | """
70 | # Validate arguments.
71 | assert isinstance(x, torch.Tensor) and (x.ndim == 4)
72 | assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
73 | assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
74 | assert isinstance(up, int) and (up >= 1)
75 | assert isinstance(down, int) and (down >= 1)
76 | assert isinstance(groups, int) and (groups >= 1)
77 | out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
78 | fw, fh = _get_filter_size(f)
79 | px0, px1, py0, py1 = _parse_padding(padding)
80 |
81 | # Adjust padding to account for up/downsampling.
82 | if up > 1:
83 | px0 += (fw + up - 1) // 2
84 | px1 += (fw - up) // 2
85 | py0 += (fh + up - 1) // 2
86 | py1 += (fh - up) // 2
87 | if down > 1:
88 | px0 += (fw - down + 1) // 2
89 | px1 += (fw - down) // 2
90 | py0 += (fh - down + 1) // 2
91 | py1 += (fh - down) // 2
92 |
93 | # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
94 | if kw == 1 and kh == 1 and (down > 1 and up == 1):
95 | x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
96 | x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
97 | return x
98 |
99 | # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
100 | if kw == 1 and kh == 1 and (up > 1 and down == 1):
101 | x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
102 | x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
103 | return x
104 |
105 | # Fast path: downsampling only => use strided convolution.
106 | if down > 1 and up == 1:
107 | x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
108 | x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
109 | return x
110 |
111 | # Fast path: upsampling with optional downsampling => use transpose strided convolution.
112 | if up > 1:
113 | if groups == 1:
114 | w = w.transpose(0, 1)
115 | else:
116 | w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
117 | w = w.transpose(1, 2)
118 | w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
119 | px0 -= kw - 1
120 | px1 -= kw - up
121 | py0 -= kh - 1
122 | py1 -= kh - up
123 | pxt = max(min(-px0, -px1), 0)
124 | pyt = max(min(-py0, -py1), 0)
125 | x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
126 | x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
127 | if down > 1:
128 | x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
129 | return x
130 |
131 | # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
132 | if up == 1 and down == 1:
133 | if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
134 | return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
135 |
136 | # Fallback: Generic reference implementation.
137 | x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
138 | x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
139 | if down > 1:
140 | x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
141 | return x
142 |
143 | #----------------------------------------------------------------------------
144 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 |
11 | //------------------------------------------------------------------------
12 | // CUDA kernel parameters.
13 |
14 | struct filtered_lrelu_kernel_params
15 | {
16 | // These parameters decide which kernel to use.
17 | int up; // upsampling ratio (1, 2, 4)
18 | int down; // downsampling ratio (1, 2, 4)
19 | int2 fuShape; // [size, 1] | [size, size]
20 | int2 fdShape; // [size, 1] | [size, size]
21 |
22 | int _dummy; // Alignment.
23 |
24 | // Rest of the parameters.
25 | const void* x; // Input tensor.
26 | void* y; // Output tensor.
27 | const void* b; // Bias tensor.
28 | unsigned char* s; // Sign tensor in/out. NULL if unused.
29 | const float* fu; // Upsampling filter.
30 | const float* fd; // Downsampling filter.
31 |
32 | int2 pad0; // Left/top padding.
33 | float gain; // Additional gain factor.
34 | float slope; // Leaky ReLU slope on negative side.
35 | float clamp; // Clamp after nonlinearity.
36 | int flip; // Filter kernel flip for gradient computation.
37 |
38 | int tilesXdim; // Original number of horizontal output tiles.
39 | int tilesXrep; // Number of horizontal tiles per CTA.
40 | int blockZofs; // Block z offset to support large minibatch, channel dimensions.
41 |
42 | int4 xShape; // [width, height, channel, batch]
43 | int4 yShape; // [width, height, channel, batch]
44 | int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused.
45 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
46 | int swLimit; // Active width of sign tensor in bytes.
47 |
48 | longlong4 xStride; // Strides of all tensors except signs, same component order as shapes.
49 | longlong4 yStride; //
50 | int64_t bStride; //
51 | longlong3 fuStride; //
52 | longlong3 fdStride; //
53 | };
54 |
55 | struct filtered_lrelu_act_kernel_params
56 | {
57 | void* x; // Input/output, modified in-place.
58 | unsigned char* s; // Sign tensor in/out. NULL if unused.
59 |
60 | float gain; // Additional gain factor.
61 | float slope; // Leaky ReLU slope on negative side.
62 | float clamp; // Clamp after nonlinearity.
63 |
64 | int4 xShape; // [width, height, channel, batch]
65 | longlong4 xStride; // Input/output tensor strides, same order as in shape.
66 | int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused.
67 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
68 | };
69 |
70 | //------------------------------------------------------------------------
71 | // CUDA kernel specialization.
72 |
73 | struct filtered_lrelu_kernel_spec
74 | {
75 | void* setup; // Function for filter kernel setup.
76 | void* exec; // Function for main operation.
77 | int2 tileOut; // Width/height of launch tile.
78 | int numWarps; // Number of warps per thread block, determines launch block size.
79 | int xrep; // For processing multiple horizontal tiles per thread block.
80 | int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants.
81 | };
82 |
83 | //------------------------------------------------------------------------
84 | // CUDA kernel selection.
85 |
86 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
87 | template void* choose_filtered_lrelu_act_kernel(void);
88 | template cudaError_t copy_filters(cudaStream_t stream);
89 |
90 | //------------------------------------------------------------------------
91 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_ns.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "filtered_lrelu.cu"
10 |
11 | // Template/kernel specializations for no signs mode (no gradients required).
12 |
13 | // Full op, 32-bit indexing.
14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
16 |
17 | // Full op, 64-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Activation/signs only for generic variant. 64-bit indexing.
22 | template void* choose_filtered_lrelu_act_kernel(void);
23 | template void* choose_filtered_lrelu_act_kernel(void);
24 | template void* choose_filtered_lrelu_act_kernel(void);
25 |
26 | // Copy filters to constant memory.
27 | template cudaError_t copy_filters(cudaStream_t stream);
28 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_rd.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "filtered_lrelu.cu"
10 |
11 | // Template/kernel specializations for sign read mode.
12 |
13 | // Full op, 32-bit indexing.
14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
16 |
17 | // Full op, 64-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Activation/signs only for generic variant. 64-bit indexing.
22 | template void* choose_filtered_lrelu_act_kernel(void);
23 | template void* choose_filtered_lrelu_act_kernel(void);
24 | template void* choose_filtered_lrelu_act_kernel(void);
25 |
26 | // Copy filters to constant memory.
27 | template cudaError_t copy_filters(cudaStream_t stream);
28 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_wr.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "filtered_lrelu.cu"
10 |
11 | // Template/kernel specializations for sign write mode.
12 |
13 | // Full op, 32-bit indexing.
14 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
15 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
16 |
17 | // Full op, 64-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Activation/signs only for generic variant. 64-bit indexing.
22 | template void* choose_filtered_lrelu_act_kernel(void);
23 | template void* choose_filtered_lrelu_act_kernel(void);
24 | template void* choose_filtered_lrelu_act_kernel(void);
25 |
26 | // Copy filters to constant memory.
27 | template cudaError_t copy_filters(cudaStream_t stream);
28 |
--------------------------------------------------------------------------------
/torch_utils/ops/fma.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
10 |
11 | import torch
12 |
13 | #----------------------------------------------------------------------------
14 |
15 | def fma(a, b, c): # => a * b + c
16 | return _FusedMultiplyAdd.apply(a, b, c)
17 |
18 | #----------------------------------------------------------------------------
19 |
20 | class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
21 | @staticmethod
22 | def forward(ctx, a, b, c): # pylint: disable=arguments-differ
23 | out = torch.addcmul(c, a, b)
24 | ctx.save_for_backward(a, b)
25 | ctx.c_shape = c.shape
26 | return out
27 |
28 | @staticmethod
29 | def backward(ctx, dout): # pylint: disable=arguments-differ
30 | a, b = ctx.saved_tensors
31 | c_shape = ctx.c_shape
32 | da = None
33 | db = None
34 | dc = None
35 |
36 | if ctx.needs_input_grad[0]:
37 | da = _unbroadcast(dout * b, a.shape)
38 |
39 | if ctx.needs_input_grad[1]:
40 | db = _unbroadcast(dout * a, b.shape)
41 |
42 | if ctx.needs_input_grad[2]:
43 | dc = _unbroadcast(dout, c_shape)
44 |
45 | return da, db, dc
46 |
47 | #----------------------------------------------------------------------------
48 |
49 | def _unbroadcast(x, shape):
50 | extra_dims = x.ndim - len(shape)
51 | assert extra_dims >= 0
52 | dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
53 | if len(dim):
54 | x = x.sum(dim=dim, keepdim=True)
55 | if extra_dims:
56 | x = x.reshape(-1, *x.shape[extra_dims+1:])
57 | assert x.shape == shape
58 | return x
59 |
60 | #----------------------------------------------------------------------------
61 |
--------------------------------------------------------------------------------
/torch_utils/ops/grid_sample_gradfix.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Custom replacement for `torch.nn.functional.grid_sample` that
10 | supports arbitrarily high order gradients between the input and output.
11 | Only works on 2D images and assumes
12 | `mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
13 |
14 | import torch
15 |
16 | # pylint: disable=redefined-builtin
17 | # pylint: disable=arguments-differ
18 | # pylint: disable=protected-access
19 |
20 | #----------------------------------------------------------------------------
21 |
22 | enabled = False # Enable the custom op by setting this to true.
23 |
24 | #----------------------------------------------------------------------------
25 |
26 | def grid_sample(input, grid):
27 | if _should_use_custom_op():
28 | return _GridSample2dForward.apply(input, grid)
29 | return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
30 |
31 | #----------------------------------------------------------------------------
32 |
33 | def _should_use_custom_op():
34 | return enabled
35 |
36 | #----------------------------------------------------------------------------
37 |
38 | class _GridSample2dForward(torch.autograd.Function):
39 | @staticmethod
40 | def forward(ctx, input, grid):
41 | assert input.ndim == 4
42 | assert grid.ndim == 4
43 | output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
44 | ctx.save_for_backward(input, grid)
45 | return output
46 |
47 | @staticmethod
48 | def backward(ctx, grad_output):
49 | input, grid = ctx.saved_tensors
50 | grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
51 | return grad_input, grad_grid
52 |
53 | #----------------------------------------------------------------------------
54 |
55 | class _GridSample2dBackward(torch.autograd.Function):
56 | @staticmethod
57 | def forward(ctx, grad_output, input, grid):
58 | op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
59 | grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
60 | ctx.save_for_backward(grid)
61 | return grad_input, grad_grid
62 |
63 | @staticmethod
64 | def backward(ctx, grad2_grad_input, grad2_grad_grid):
65 | _ = grad2_grad_grid # unused
66 | grid, = ctx.saved_tensors
67 | grad2_grad_output = None
68 | grad2_input = None
69 | grad2_grid = None
70 |
71 | if ctx.needs_input_grad[0]:
72 | grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
73 |
74 | assert not ctx.needs_input_grad[2]
75 | return grad2_grad_output, grad2_input, grad2_grid
76 |
77 | #----------------------------------------------------------------------------
78 |
--------------------------------------------------------------------------------
/torch_utils/ops/upfirdn2d.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 | #include
11 | #include
12 | #include "upfirdn2d.h"
13 |
14 | //------------------------------------------------------------------------
15 |
16 | static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain)
17 | {
18 | // Validate arguments.
19 | TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
20 | TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x");
21 | TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
22 | TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
23 | TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
24 | TORCH_CHECK(x.numel() > 0, "x has zero size");
25 | TORCH_CHECK(f.numel() > 0, "f has zero size");
26 | TORCH_CHECK(x.dim() == 4, "x must be rank 4");
27 | TORCH_CHECK(f.dim() == 2, "f must be rank 2");
28 | TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large");
29 | TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
30 | TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
31 | TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1");
32 |
33 | // Create output tensor.
34 | const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
35 | int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
36 | int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
37 | TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
38 | torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format());
39 | TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
40 | TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large");
41 |
42 | // Initialize CUDA kernel parameters.
43 | upfirdn2d_kernel_params p;
44 | p.x = x.data_ptr();
45 | p.f = f.data_ptr();
46 | p.y = y.data_ptr();
47 | p.up = make_int2(upx, upy);
48 | p.down = make_int2(downx, downy);
49 | p.pad0 = make_int2(padx0, pady0);
50 | p.flip = (flip) ? 1 : 0;
51 | p.gain = gain;
52 | p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
53 | p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0));
54 | p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
55 | p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
56 | p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
57 | p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0));
58 | p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
59 | p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
60 |
61 | // Choose CUDA kernel.
62 | upfirdn2d_kernel_spec spec;
63 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
64 | {
65 | spec = choose_upfirdn2d_kernel(p);
66 | });
67 |
68 | // Set looping options.
69 | p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
70 | p.loopMinor = spec.loopMinor;
71 | p.loopX = spec.loopX;
72 | p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
73 | p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
74 |
75 | // Compute grid size.
76 | dim3 blockSize, gridSize;
77 | if (spec.tileOutW < 0) // large
78 | {
79 | blockSize = dim3(4, 32, 1);
80 | gridSize = dim3(
81 | ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
82 | (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1,
83 | p.launchMajor);
84 | }
85 | else // small
86 | {
87 | blockSize = dim3(256, 1, 1);
88 | gridSize = dim3(
89 | ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
90 | (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1,
91 | p.launchMajor);
92 | }
93 |
94 | // Launch CUDA kernel.
95 | void* args[] = {&p};
96 | AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
97 | return y;
98 | }
99 |
100 | //------------------------------------------------------------------------
101 |
102 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
103 | {
104 | m.def("upfirdn2d", &upfirdn2d);
105 | }
106 |
107 | //------------------------------------------------------------------------
108 |
--------------------------------------------------------------------------------
/torch_utils/ops/upfirdn2d.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 |
11 | //------------------------------------------------------------------------
12 | // CUDA kernel parameters.
13 |
14 | struct upfirdn2d_kernel_params
15 | {
16 | const void* x;
17 | const float* f;
18 | void* y;
19 |
20 | int2 up;
21 | int2 down;
22 | int2 pad0;
23 | int flip;
24 | float gain;
25 |
26 | int4 inSize; // [width, height, channel, batch]
27 | int4 inStride;
28 | int2 filterSize; // [width, height]
29 | int2 filterStride;
30 | int4 outSize; // [width, height, channel, batch]
31 | int4 outStride;
32 | int sizeMinor;
33 | int sizeMajor;
34 |
35 | int loopMinor;
36 | int loopMajor;
37 | int loopX;
38 | int launchMinor;
39 | int launchMajor;
40 | };
41 |
42 | //------------------------------------------------------------------------
43 | // CUDA kernel specialization.
44 |
45 | struct upfirdn2d_kernel_spec
46 | {
47 | void* kernel;
48 | int tileOutW;
49 | int tileOutH;
50 | int loopMinor;
51 | int loopX;
52 | };
53 |
54 | //------------------------------------------------------------------------
55 | // CUDA kernel selection.
56 |
57 | template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p);
58 |
59 | //------------------------------------------------------------------------
60 |
--------------------------------------------------------------------------------
/train_boundary.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Trains semantic boundary from latent space.
3 |
4 | Basically, this file takes a collection of `latent code - attribute score`
5 | pairs, and find the separation boundary by treating it as a bi-classification
6 | problem and training a linear SVM classifier. The well-trained decision boundary
7 | of the SVM classifier will be saved as the boundary corresponding to a
8 | particular semantic from the latent space. The normal direction of the boundary
9 | can be used to manipulate the correpsonding attribute of the synthesis.
10 | """
11 |
12 | import os.path
13 | import argparse
14 | import numpy as np
15 |
16 | from utils.logger import setup_logger
17 | from utils.manipulator import train_boundary
18 |
19 | def parse_args():
20 | """Parses arguments."""
21 | parser = argparse.ArgumentParser(
22 | description='Train semantic boundary with given latent codes and '
23 | 'attribute scores.')
24 | parser.add_argument('-o', '--output_dir', type=str, required=True,
25 | help='Directory to save the output results. (required)')
26 | parser.add_argument('-c', '--latent_codes_path', type=str, required=True,
27 | help='Path to the input latent codes. (required)')
28 | parser.add_argument('-s', '--scores_path', type=str, required=True,
29 | help='Path to the input attribute scores. (required)')
30 | parser.add_argument('-n', '--chosen_num_or_ratio', type=float, default=0.02,
31 | help='How many samples to choose for training. '
32 | '(default: 0.2)')
33 | parser.add_argument('-r', '--split_ratio', type=float, default=0.7,
34 | help='Ratio with which to split training and validation '
35 | 'sets. (default: 0.7)')
36 | parser.add_argument('-V', '--invalid_value', type=float, default=None,
37 | help='Sample whose attribute score is equal to this '
38 | 'field will be ignored. (default: None)')
39 |
40 | return parser.parse_args()
41 |
42 |
43 | def main():
44 | """Main function."""
45 | args = parse_args()
46 | logger = setup_logger(args.output_dir, logger_name='train_boundary')
47 |
48 | logger.info('Loading latent codes.')
49 | if not os.path.isfile(args.latent_codes_path):
50 | raise ValueError(f'Latent codes `{args.latent_codes_path}` does not exist!')
51 | latent_codes = np.load(args.latent_codes_path)
52 |
53 | logger.info('Loading attribute scores.')
54 | if not os.path.isfile(args.scores_path):
55 | raise ValueError(f'Attribute scores `{args.scores_path}` does not exist!')
56 | scores = np.load(args.scores_path)
57 |
58 | boundary = train_boundary(latent_codes=latent_codes,
59 | scores=scores,
60 | chosen_num_or_ratio=args.chosen_num_or_ratio,
61 | split_ratio=args.split_ratio,
62 | invalid_value=args.invalid_value,
63 | logger=logger)
64 | np.save(os.path.join(args.output_dir, 'boundary.npy'), boundary)
65 |
66 |
67 | if __name__ == '__main__':
68 | main()
69 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/younesbelkada/interfacegan/9d8df611af795490abddfa6ddd0452c6ef309a41/utils/__init__.py
--------------------------------------------------------------------------------
/utils/logger.py:
--------------------------------------------------------------------------------
1 | # python3.7
2 | """Utility functions for logging."""
3 |
4 | import os
5 | import sys
6 | import logging
7 |
8 | __all__ = ['setup_logger']
9 |
10 |
11 | def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='logger'):
12 | """Sets up logger from target work directory.
13 |
14 | The function will sets up a logger with `DEBUG` log level. Two handlers will
15 | be added to the logger automatically. One is the `sys.stdout` stream, with
16 | `INFO` log level, which will print improtant messages on the screen. The other
17 | is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will
18 | be added time stamp and log level before logged.
19 |
20 | NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be
21 | skipped.
22 |
23 | Args:
24 | work_dir: The work directory. All intermediate files will be saved here.
25 | (default: None)
26 | logfile_name: Name of the file to save log message. (default: `log.txt`)
27 | logger_name: Unique name for the logger. (default: `logger`)
28 |
29 | Returns:
30 | A `logging.Logger` object.
31 |
32 | Raises:
33 | SystemExit: If the work directory has already existed, of the logger with
34 | specified name `logger_name` has already existed.
35 | """
36 |
37 | logger = logging.getLogger(logger_name)
38 | # if logger.hasHandlers(): # Already existed
39 | # raise SystemExit(f'Logger name `{logger_name}` has already been set up!\n'
40 | # f'Please use another name, or otherwise the messages '
41 | # f'may be mixed between these two loggers.')
42 |
43 | logger.setLevel(logging.DEBUG)
44 | formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s")
45 |
46 | # Print log message with `INFO` level or above onto the screen.
47 | sh = logging.StreamHandler(stream=sys.stdout)
48 | sh.setLevel(logging.INFO)
49 | sh.setFormatter(formatter)
50 | logger.addHandler(sh)
51 |
52 | if not work_dir or not logfile_name:
53 | return logger
54 |
55 | if os.path.exists(work_dir):
56 | logger.warning('WARNING: Directory already exists, overwriting images')
57 | #raise SystemExit(f'Work directory `{work_dir}` has already existed!\n'
58 | # f'Please specify another one.')
59 | else:
60 | os.makedirs(work_dir)
61 | # Save log message with all levels in log file.
62 | fh = logging.FileHandler(os.path.join(work_dir, logfile_name))
63 | fh.setLevel(logging.DEBUG)
64 | fh.setFormatter(formatter)
65 | logger.addHandler(fh)
66 |
67 | return logger
68 |
--------------------------------------------------------------------------------
141 | Comment: 142 | Dissects neurons in GANs from the perspective of spatial feature map. 143 |