├── .gitignore
├── .idea
├── .gitignore
├── Facial-Beauty-Prediction.iml
├── misc.xml
├── modules.xml
└── vcs.xml
├── README.md
├── beautify_image-old.py
├── beautify_image.py
├── beauty_prediction
├── =0.2.3.2
├── =0.93
├── =3.1.1
├── =3.4.0.12
├── beautyrater.py
├── execute_beauty_prediction.py
├── execute_beauty_prediction_single.py
├── faces_dataset.py
├── sample.png
├── sample.png.html
├── temp
│ └── img
│ │ ├── sample copy.png
│ │ └── sample.png
└── train_beauty_prediction.py
├── config.py
├── dataset.py
├── dataset_tool-old.py
├── dataset_tool.py
├── dnnlib
├── __init__.py
├── submission
│ ├── __init__.py
│ ├── _internal
│ │ └── run.py
│ ├── run_context.py
│ └── submit.py
├── tflib
│ ├── __init__.py
│ ├── autosummary.py
│ ├── network.py
│ ├── optimizer.py
│ └── tfutil.py
└── util.py
├── docs
├── 00001.png
├── beautification_samples.png
└── samples.png
├── encoder
├── __init__.py
├── generator_model.py
└── perceptual_model.py
├── eyeglasses.npy
├── feature_extract.py
├── feature_visualization.py
├── ffhq_dataset
├── __init__.py
├── face_alignment.py
├── landmarks_detector.py
├── latent_directions
│ ├── age.npy
│ ├── gender.npy
│ └── smile.npy
└── latent_representations
│ ├── donald_trump_01.npy
│ └── hillary_clinton_01.npy
├── gender.npy
├── id_features.npy
├── identity_prediction
├── __init__.py
├── facenet.py
├── identity_predict.py
└── test
│ └── img
│ └── test.png
├── inference_cond.py
├── label_ffhq.py
├── legacy.py
├── loss.py
├── metrics
├── __init__.py
├── frechet_inception_distance.py
├── inception_score.py
├── ms_ssim.py
└── sliced_wasserstein.py
├── metrics_evaluation
├── __init__.py
├── allmodel
├── brisque.py
├── frechet_inception_distance.py
├── id-0-combined-brisque.csv
├── id-0-combined-brisque.txt
├── id-0-combined-id-preserving.csv
├── id-0-combined-id-preserving.txt
├── id-1-combined-brisque.csv
├── id-1-combined-brisque.txt
├── id-1-combined-id-preserving.csv
├── id-1-combined-id-preserving.txt
├── id-1-combined.csv
├── id-1-combined.txt
├── id_preserving.py
├── image_quality_metric
│ ├── Python
│ │ ├── __init__.py
│ │ ├── allmodel
│ │ ├── brisquequality.py
│ │ └── libsvm
│ │ │ ├── COPYRIGHT
│ │ │ ├── FAQ.html
│ │ │ ├── Makefile
│ │ │ ├── Makefile.win
│ │ │ ├── README
│ │ │ ├── __init__.py
│ │ │ ├── heart_scale
│ │ │ ├── java
│ │ │ ├── Makefile
│ │ │ ├── libsvm.jar
│ │ │ ├── libsvm
│ │ │ │ ├── svm.java
│ │ │ │ ├── svm.m4
│ │ │ │ ├── svm_model.java
│ │ │ │ ├── svm_node.java
│ │ │ │ ├── svm_parameter.java
│ │ │ │ ├── svm_print_interface.java
│ │ │ │ └── svm_problem.java
│ │ │ ├── svm_predict.java
│ │ │ ├── svm_scale.java
│ │ │ ├── svm_toy.java
│ │ │ ├── svm_train.java
│ │ │ └── test_applet.html
│ │ │ ├── libsvm.so.2
│ │ │ ├── matlab
│ │ │ ├── Makefile
│ │ │ ├── README
│ │ │ ├── libsvmread.c
│ │ │ ├── libsvmwrite.c
│ │ │ ├── make.m
│ │ │ ├── svm_model_matlab.c
│ │ │ ├── svm_model_matlab.h
│ │ │ ├── svmpredict.c
│ │ │ └── svmtrain.c
│ │ │ ├── python
│ │ │ ├── Makefile
│ │ │ ├── README
│ │ │ ├── __init__.py
│ │ │ ├── allmodel
│ │ │ ├── brisquequality.py
│ │ │ ├── svm.py
│ │ │ └── svmutil.py
│ │ │ ├── svm-predict
│ │ │ ├── svm-predict.c
│ │ │ ├── svm-scale
│ │ │ ├── svm-scale.c
│ │ │ ├── svm-toy
│ │ │ ├── gtk
│ │ │ │ ├── Makefile
│ │ │ │ ├── callbacks.cpp
│ │ │ │ ├── callbacks.h
│ │ │ │ ├── interface.c
│ │ │ │ ├── interface.h
│ │ │ │ ├── main.c
│ │ │ │ └── svm-toy.glade
│ │ │ ├── qt
│ │ │ │ ├── Makefile
│ │ │ │ └── svm-toy.cpp
│ │ │ └── windows
│ │ │ │ └── svm-toy.cpp
│ │ │ ├── svm-train
│ │ │ ├── svm-train.c
│ │ │ ├── svm.cpp
│ │ │ ├── svm.def
│ │ │ ├── svm.h
│ │ │ ├── svm.o
│ │ │ ├── tools
│ │ │ ├── README
│ │ │ ├── checkdata.py
│ │ │ ├── easy.py
│ │ │ ├── grid.py
│ │ │ └── subset.py
│ │ │ └── windows
│ │ │ ├── libsvm.dll
│ │ │ ├── libsvmread.mexw64
│ │ │ ├── libsvmwrite.mexw64
│ │ │ ├── svm-predict.exe
│ │ │ ├── svm-scale.exe
│ │ │ ├── svm-toy.exe
│ │ │ ├── svm-train.exe
│ │ │ ├── svmpredict.mexw64
│ │ │ └── svmtrain.mexw64
│ ├── README.md
│ └── __init__.py
├── interface-stylegan-0-combined-brisque.csv
├── interface-stylegan-0-combined-brisque.txt
├── interface-stylegan-0-combined-id-preserving.csv
├── interface-stylegan-0-combined-id-preserving.txt
├── interface-stylegan-128x128-combined-brisque.csv
├── interface-stylegan-128x128-combined-brisque.txt
├── interface-stylegan-128x128-combined-id-preserving.csv
├── interface-stylegan-128x128-combined-id-preserving.txt
├── metric_base.py
├── openface
│ ├── .github
│ │ ├── ISSUE_TEMPLATE.md
│ │ ├── PULL_REQUEST_TEMPLATE.md
│ │ └── stale.yml
│ ├── .gitignore
│ ├── .gitmodules
│ ├── .travis.yml
│ ├── CONTRIBUTING.md
│ ├── Dockerfile
│ ├── LICENSE
│ ├── README.md
│ ├── api-docs
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── _static
│ │ │ ├── sp.js
│ │ │ └── track.js
│ │ ├── conf.py
│ │ ├── index.rst
│ │ ├── openface.rst
│ │ └── requirements.txt
│ ├── batch-represent
│ │ ├── batch-represent.lua
│ │ ├── dataset.lua
│ │ ├── main.lua
│ │ └── opts.lua
│ ├── cloc.sh
│ ├── demos
│ │ ├── classifier.py
│ │ ├── classifier_webcam.py
│ │ ├── compare.py
│ │ ├── sphere.py
│ │ ├── vis-outputs.lua
│ │ └── web
│ │ │ ├── bower.json
│ │ │ ├── create-cert.sh
│ │ │ ├── create-unknown-vectors.py
│ │ │ ├── css
│ │ │ └── main.css
│ │ │ ├── examples.md
│ │ │ ├── favicon.ico
│ │ │ ├── images
│ │ │ └── FacerecWebDemo.ai
│ │ │ ├── index.html
│ │ │ ├── install-deps.sh
│ │ │ ├── js
│ │ │ ├── openface-demo.js
│ │ │ └── utils.js
│ │ │ ├── requirements.txt
│ │ │ ├── simpleSSLServer.py
│ │ │ ├── start-servers.sh
│ │ │ ├── tls
│ │ │ ├── server.crt
│ │ │ ├── server.key
│ │ │ └── server.pem
│ │ │ ├── update-vendor-deps.sh
│ │ │ ├── vendor
│ │ │ ├── bower.json
│ │ │ ├── css
│ │ │ │ ├── bootstrap-dialog.css
│ │ │ │ ├── bootstrap-dialog.min.css
│ │ │ │ ├── bootstrap-theme.css
│ │ │ │ ├── bootstrap-theme.css.map
│ │ │ │ ├── bootstrap-theme.min.css
│ │ │ │ ├── bootstrap-toggle.min.css
│ │ │ │ ├── bootstrap.css
│ │ │ │ ├── bootstrap.css.map
│ │ │ │ ├── bootstrap.min.css
│ │ │ │ ├── bootstrap2-toggle.min.css
│ │ │ │ ├── font-awesome.css
│ │ │ │ ├── font-awesome.css.map
│ │ │ │ └── font-awesome.min.css
│ │ │ ├── fonts
│ │ │ │ ├── FontAwesome.otf
│ │ │ │ ├── fontawesome-webfont.eot
│ │ │ │ ├── fontawesome-webfont.svg
│ │ │ │ ├── fontawesome-webfont.ttf
│ │ │ │ ├── fontawesome-webfont.woff
│ │ │ │ ├── fontawesome-webfont.woff2
│ │ │ │ ├── glyphicons-halflings-regular.eot
│ │ │ │ ├── glyphicons-halflings-regular.svg
│ │ │ │ ├── glyphicons-halflings-regular.ttf
│ │ │ │ ├── glyphicons-halflings-regular.woff
│ │ │ │ └── glyphicons-halflings-regular.woff2
│ │ │ ├── js
│ │ │ │ ├── bootstrap-dialog.js
│ │ │ │ ├── bootstrap-dialog.min.js
│ │ │ │ ├── bootstrap-toggle.min.js
│ │ │ │ ├── bootstrap-toggle.min.js.map
│ │ │ │ ├── bootstrap.js
│ │ │ │ ├── bootstrap.min.js
│ │ │ │ ├── bootstrap2-toggle.min.js
│ │ │ │ ├── bootstrap2-toggle.min.js.map
│ │ │ │ ├── handlebars.min.js
│ │ │ │ ├── jquery.js
│ │ │ │ ├── jquery.min.js
│ │ │ │ ├── jquery.min.map
│ │ │ │ ├── jstat.min.js
│ │ │ │ ├── npm.js
│ │ │ │ └── ping.min.js
│ │ │ ├── less
│ │ │ │ ├── animated.less
│ │ │ │ ├── bootstrap-dialog.less
│ │ │ │ ├── bordered-pulled.less
│ │ │ │ ├── core.less
│ │ │ │ ├── fixed-width.less
│ │ │ │ ├── font-awesome.less
│ │ │ │ ├── icons.less
│ │ │ │ ├── larger.less
│ │ │ │ ├── list.less
│ │ │ │ ├── mixins.less
│ │ │ │ ├── path.less
│ │ │ │ ├── rotated-flipped.less
│ │ │ │ ├── stacked.less
│ │ │ │ └── variables.less
│ │ │ └── scss
│ │ │ │ ├── _animated.scss
│ │ │ │ ├── _bordered-pulled.scss
│ │ │ │ ├── _core.scss
│ │ │ │ ├── _fixed-width.scss
│ │ │ │ ├── _icons.scss
│ │ │ │ ├── _larger.scss
│ │ │ │ ├── _list.scss
│ │ │ │ ├── _mixins.scss
│ │ │ │ ├── _path.scss
│ │ │ │ ├── _rotated-flipped.scss
│ │ │ │ ├── _stacked.scss
│ │ │ │ ├── _variables.scss
│ │ │ │ └── font-awesome.scss
│ │ │ └── websocket-server.py
│ ├── docs
│ │ ├── css
│ │ │ └── extra.css
│ │ ├── demo-1-web.md
│ │ ├── demo-2-comparison.md
│ │ ├── demo-3-classifier.md
│ │ ├── demo-4-sphere.md
│ │ ├── faq.md
│ │ ├── index.md
│ │ ├── js
│ │ │ ├── extra.js
│ │ │ └── sp.js
│ │ ├── models-and-accuracies.md
│ │ ├── release-notes.md
│ │ ├── setup.md
│ │ ├── training-new-models.md
│ │ ├── usage.md
│ │ └── visualizations.md
│ ├── evaluation
│ │ ├── lfw-classification-unknown.py
│ │ ├── lfw-classification.py
│ │ └── lfw.py
│ ├── images
│ │ ├── dlib-landmark-mean.png
│ │ ├── examples
│ │ │ ├── adams.jpg
│ │ │ ├── carell.jpg
│ │ │ ├── clapton-1.jpg
│ │ │ ├── clapton-2.jpg
│ │ │ ├── lennon-1.jpg
│ │ │ ├── lennon-2.jpg
│ │ │ └── longoria-cooper.jpg
│ │ ├── nn4.v1.conv1.lennon-1.png
│ │ ├── nn4.v1.conv1.lennon-2.png
│ │ ├── nn4.v1.loss.png
│ │ ├── performance.png
│ │ ├── sphere-demo
│ │ │ ├── data-afterlives-poster.jpg
│ │ │ ├── demo.gif
│ │ │ ├── exhibit-amos.png
│ │ │ ├── exhibits-all.png
│ │ │ ├── exhibits-nosenzo.png
│ │ │ └── screenshot.png
│ │ ├── summary.ai
│ │ ├── summary.jpg
│ │ ├── train-tsne.png
│ │ ├── val-tsne.png
│ │ └── youtube-web.gif
│ ├── mkdocs.yml
│ ├── models
│ │ ├── dlib
│ │ │ ├── mean.csv
│ │ │ └── std.csv
│ │ ├── get-models.sh
│ │ └── openface
│ │ │ ├── nn2.def.lua
│ │ │ ├── nn4.def.lua
│ │ │ ├── nn4.small1.def.lua
│ │ │ ├── nn4.small2.def.lua
│ │ │ ├── resnet1.def.lua
│ │ │ ├── vgg-face.def.lua
│ │ │ └── vgg-face.small1.def.lua
│ ├── opencv-dlib-torch.Dockerfile
│ ├── openface
│ │ ├── __init__.py
│ │ ├── align_dlib.py
│ │ ├── data.py
│ │ ├── helper.py
│ │ ├── openface_server.lua
│ │ ├── torch_neural_net.lutorpy.py
│ │ └── torch_neural_net.py
│ ├── requirements.txt
│ ├── run-tests.sh
│ ├── setup.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── openface_api_tests.py
│ │ ├── openface_batch_represent_tests.py
│ │ ├── openface_demo_tests.py
│ │ └── openface_neural_net_training_tests.py
│ ├── training
│ │ ├── OpenFaceOptim.lua
│ │ ├── attic
│ │ │ ├── OpenFaceOptim.lua
│ │ │ ├── model.lua
│ │ │ ├── sanitize.lua
│ │ │ ├── test-hardNeg.lua
│ │ │ ├── test.lua
│ │ │ └── train.lua
│ │ ├── data.lua
│ │ ├── dataset-issue-132.lua
│ │ ├── dataset.lua
│ │ ├── donkey.lua
│ │ ├── lfw-latest.sh
│ │ ├── main.lua
│ │ ├── model.lua
│ │ ├── opts.lua
│ │ ├── plot-loss.py
│ │ ├── requirements.txt
│ │ ├── test.lua
│ │ ├── train.lua
│ │ └── util.lua
│ └── util
│ │ ├── align-dlib.py
│ │ ├── annotate-image.py
│ │ ├── check-links.py
│ │ ├── create-train-val-split.py
│ │ ├── detect-outliers.py
│ │ ├── email-broken-links.sh
│ │ ├── print-network-table.lua
│ │ ├── profile-network.lua
│ │ ├── profile-pipeline.py
│ │ ├── prune-dataset.py
│ │ └── tsne.py
├── ori-0-combined-brisque.csv
├── ori-0-combined-brisque.txt
├── ori-0-combined-id-presearving.txt
├── ori-0-combined-id-preserving.csv
├── ori-0-combined-recloss-brisque.csv
├── ori-0-combined-recloss-brisque.txt
├── ori-0-combined-recloss-id-preserving.csv
├── ori-0-combined-recloss-id-preserving.txt
├── ori-0-combined-recloss-nolabelloss-brisque.csv
├── ori-0-combined-recloss-nolabelloss-brisque.txt
├── ori-0-combined-recloss-nolabelloss-id-preserving.csv
├── ori-0-combined-recloss-nolabelloss-id-preserving.txt
├── ori-6000k-id-preserving.csv
├── ori-6000k-id-preserving.txt
├── ori-6000k-id-recloss-nolabel-preserving.csv
├── ori-6000k-id-recloss-nolabel-preserving.txt
├── ori-6000k-id-recloss-preserving.csv
├── ori-6000k-id-recloss-preserving.txt
├── test-id.csv
├── test-id.txt
├── test.csv
└── test.txt
├── misc.py
├── networks.py
├── requirements-pip.txt
├── run_metrics.py
├── selectimages.py
├── test.py
├── tfutil.py
├── train-old.py
├── train.py
├── tsne.py
├── util_scripts.py
└── utils
├── plot_beauty_distribution.py
└── transform_images.py
/.gitignore:
--------------------------------------------------------------------------------
1 | */.DS_Store
2 | .DS_Store
3 | beauty_prediction/trained_model/
4 | models/
5 | datasets/
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | .hypothesis/
53 | .pytest_cache/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | data/
111 | *.png
112 | *.out
113 | results/
114 | cache/
115 |
116 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Default ignored files
3 | /workspace.xml
--------------------------------------------------------------------------------
/.idea/Facial-Beauty-Prediction.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GAN-Based Facial Attractiveness Enhancement
2 |
3 | This is the code repository for our manuscript: [https://arxiv.org/abs/2006.02766](https://arxiv.org/abs/2006.02766)
4 |
5 | ## README, cleared code and models will be coming soon.
6 |
7 | #### InterFaceGAN ans StyleGAN Based Beautification
8 | We use the [stylegan-ffhq-1024x1024.pkl](https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ) provided by StyleGAN repo.
9 |
10 | Here is the beauty boundary we trained. [beauty boundary](https://drive.google.com/drive/folders/1VD1aKG9SgQ8GhyISdsScLB9QSKYzn4Nb?usp=sharing)
11 |
12 | For the editing part, please refer to the InterFaceGAN repo. :)
13 |
14 | This repo contains modified Beholder-GAN, evaluation part and some should-be-cleared files.
15 |
16 | #### Enhanced Version of Beholder-GAN
17 |
18 | Here is our pretrained model for modified Beholder-GAN. [beholder-id.pkl](https://drive.google.com/file/d/1rUZ2bmXl0Re952l4QwO1s4cJjn3kKJ4C/view?usp=sharing)
19 |
20 | For the model of Beholder-GAN, we trained the model using FFHQ instead of CelebA-HQ and due to limited training information Beholder-GAN paper provided, we changed mini batch size and set the resolution to 128x128 to fit our then machine with everything else untouched. Unfortunately, we cannot reproduce the same performance as Beholder-GAN paper.
21 |
22 | ## Datasets
23 |
24 | The datasets we worked on can be found in these links:
25 | * FFHQ: [FFHQ](https://github.com/NVlabs/ffhq-dataset)
26 |
--------------------------------------------------------------------------------
/beauty_prediction/=0.2.3.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/beauty_prediction/=0.2.3.2
--------------------------------------------------------------------------------
/beauty_prediction/=0.93:
--------------------------------------------------------------------------------
1 | Requirement already satisfied: lmdb in /usr/local/lib/python3.7/site-packages (0.97)
2 |
--------------------------------------------------------------------------------
/beauty_prediction/=3.1.1:
--------------------------------------------------------------------------------
1 | Requirement already satisfied: Pillow in /usr/local/lib/python3.7/site-packages (6.1.0)
2 |
--------------------------------------------------------------------------------
/beauty_prediction/=3.4.0.12:
--------------------------------------------------------------------------------
1 | Collecting opencv-python
2 | Downloading https://files.pythonhosted.org/packages/6a/a8/f051a1ec9a08312d76a5b8b663d831c91de24ec80a073a3303a1617aaef1/opencv_python-4.1.1.26-cp37-cp37m-macosx_10_8_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (51.6MB)
3 | Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/site-packages (from opencv-python) (1.16.4)
4 | Installing collected packages: opencv-python
5 | Successfully installed opencv-python-4.1.1.26
6 |
--------------------------------------------------------------------------------
/beauty_prediction/execute_beauty_prediction_single.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | import argparse
3 | import torch
4 | import torch.nn as nn
5 | import torch.backends.cudnn as cudnn
6 | from torchvision import transforms, models
7 | from torch.autograd import Variable
8 | import os
9 | import numpy as np
10 | from PIL import Image
11 | import csv
12 |
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('--model', type=str, default='experiments/train_beauty_vgg/VGG16_beauty_rates-new.pt', help='path to the trained VGG16 model')
15 | parser.add_argument('--image', type=str, default='sample.png', help='path to the trained VGG16 model')
16 | parser.add_argument('--beauty_rates', type=int, default=60, help='number of beauty rates/output neurons for the last layer')
17 | parser.add_argument('--pad_x', type=int, default=0, help='pixels to pad the given images from left and right')
18 | parser.add_argument('--pad_y', type=int, default=0, help='pixels to pad the given images from up and down')
19 | opt = parser.parse_args()
20 | print(opt)
21 |
22 | # define cuda as device if available
23 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24 | cudnn.benchmark = True
25 |
26 | # VGG-16 Takes 224x224 images as input
27 | transform=transforms.Compose([
28 | transforms.Pad((opt.pad_x,opt.pad_y)),
29 | transforms.Resize(224),
30 | transforms.CenterCrop(224),
31 | transforms.ToTensor(),
32 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
33 | ])
34 |
35 | # Load the pretrained model from pytorch
36 | vgg16 = models.vgg16_bn(pretrained=True)
37 | #print(vgg16.classifier[6].out_features) # 1000
38 |
39 | # Freeze training for all layers
40 | for param in vgg16.features.parameters():
41 | param.require_grad = False
42 | # Newly created modules have require_grad=True by default
43 | num_features = vgg16.classifier[6].in_features
44 | features = list(vgg16.classifier.children())[:-1] # Remove last layer
45 | features.extend([nn.Linear(num_features, opt.beauty_rates)]) # Add our layer with opt.beauty_rates outputs
46 | vgg16.classifier = nn.Sequential(*features) # Replace the model classifier
47 |
48 | # check if several GPUs exist and move model to gpu if available
49 | if torch.cuda.device_count() > 1:
50 | print("Running on", torch.cuda.device_count(), "GPUs.")
51 | vgg16 = nn.DataParallel(vgg16)
52 | else:
53 | print("Running on single GPU.")
54 | vgg16.to(device)
55 |
56 | # upload pretrained weights from beauty labeled dataset
57 | vgg16.load_state_dict(torch.load(opt.model))
58 | vgg16.eval()
59 |
60 | # open image, transform and upload to gpu
61 | img = Image.open(opt.image)
62 | img = transform(img)
63 | img = torch.from_numpy(np.asarray(img))
64 | if torch.cuda.is_available():
65 | with torch.no_grad():
66 | img = Variable(img.cuda())
67 | else:
68 | with torch.no_grad():
69 | img = Variable(img)
70 | img = torch.unsqueeze(img,0)
71 |
72 | # infer image to receive beauty rates
73 | output = vgg16(img)
74 |
75 | print("beauty_rates:")
76 | print(output)
77 | print("mean:")
78 | print(output.mean())
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/beauty_prediction/faces_dataset.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import numpy as np
3 | import torch
4 | from torch.utils.data.dataset import Dataset
5 | from PIL import Image
6 | import matplotlib.pyplot as plt
7 |
8 | ##### Dataset for Face images with beauty rates #####
9 | # Each entry will contain: #
10 | # Face image #
11 | # List of 60 beauty grades in the range of [1,5] #
12 |
13 | raters_number = 60
14 |
15 | class FacesDataset(Dataset):
16 |
17 | # lists to store dataset:
18 | images = [] # each var is a string of image name
19 | beauty_rates = [] # each var is numpy in size of 60 with floats in range of [0,1]
20 |
21 | def __init__(self, folder_dataset, transform=None):
22 | self.transform = transform
23 |
24 | # Dictionary to load dataset
25 | # key: image name
26 | # value: list of 60 beauty rates from raters
27 | dataset_dict = {}
28 |
29 | # read raters csv file
30 | with open(folder_dataset + '/All_Ratings.csv', 'r') as csvfile:
31 |
32 | raw_dataset = csv.reader(csvfile, delimiter=',', quotechar='|')
33 | for i, row in enumerate(raw_dataset):
34 | row = ','.join(row)
35 | row = row.split(',')
36 |
37 | # create list of rates for each image
38 | if row[1] in dataset_dict:
39 | dataset_dict[row[1]][0].append(float(row[2]))
40 | else:
41 | dataset_dict[row[1]] = [[float(row[2])]]
42 |
43 | # move dict to lists, convert beauty rates to numpy ranged in [0,1]
44 | for key, value in dataset_dict.items():
45 | self.images.append(folder_dataset + '/img/' + key)
46 | self.beauty_rates.append((np.asarray(value, dtype=np.float32) / 5.0))
47 |
48 | # Override to give PyTorch access to any image on the dataset
49 | def __getitem__(self, index):
50 |
51 | img = Image.open(self.images[index])
52 | #img = img.convert('RGB') #TODO: check if necessary
53 |
54 | # perform transform only on the image (!!)
55 | if self.transform is not None:
56 | img = self.transform(img)
57 |
58 | # Convert image and beauty rates to torch tensors
59 | img = torch.from_numpy(np.asarray(img))
60 | features = torch.from_numpy(np.asarray(self.beauty_rates[index]).reshape([1,raters_number]))
61 |
62 | # compute class for beauty rates in [1,10]
63 | features_class = (torch.mean(features)* 10.0).int()
64 |
65 | #return img, features, Is_Beauty
66 | return img, features, features_class
67 |
68 | # Override to give PyTorch size of dataset
69 | def __len__(self):
70 | return len(self.images)
71 |
72 | if __name__ == "__main__":
73 |
74 | train_dataset = FacesDataset('../datasets/beauty_dataset')
75 |
76 | # sample one image and beauty rates to test correlation
77 | image, features, features_class = train_dataset.__getitem__(5)
78 |
79 | print("beauty rates: "+ str(features))
80 | print("beauty rate mean: "+ str(features.mean()))
81 | print("beauty rate class: "+ str(features_class))
82 |
--------------------------------------------------------------------------------
/beauty_prediction/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/beauty_prediction/sample.png
--------------------------------------------------------------------------------
/beauty_prediction/temp/img/sample copy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/beauty_prediction/temp/img/sample copy.png
--------------------------------------------------------------------------------
/beauty_prediction/temp/img/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/beauty_prediction/temp/img/sample.png
--------------------------------------------------------------------------------
/dnnlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import submission
9 |
10 | from .submission.run_context import RunContext
11 |
12 | from .submission.submit import SubmitTarget
13 | from .submission.submit import PathType
14 | from .submission.submit import SubmitConfig
15 | from .submission.submit import get_path_from_template
16 | from .submission.submit import submit_run
17 |
18 | from .util import EasyDict
19 |
20 | submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
21 |
--------------------------------------------------------------------------------
/dnnlib/submission/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import run_context
9 | from . import submit
10 |
--------------------------------------------------------------------------------
/dnnlib/submission/_internal/run.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | """Helper for launching run functions in computing clusters.
9 |
10 | During the submit process, this file is copied to the appropriate run dir.
11 | When the job is launched in the cluster, this module is the first thing that
12 | is run inside the docker container.
13 | """
14 |
15 | import os
16 | import pickle
17 | import sys
18 |
19 | # PYTHONPATH should have been set so that the run_dir/src is in it
20 | import dnnlib
21 |
22 | def main():
23 | if not len(sys.argv) >= 4:
24 | raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
25 |
26 | run_dir = str(sys.argv[1])
27 | task_name = str(sys.argv[2])
28 | host_name = str(sys.argv[3])
29 |
30 | submit_config_path = os.path.join(run_dir, "submit_config.pkl")
31 |
32 | # SubmitConfig should have been pickled to the run dir
33 | if not os.path.exists(submit_config_path):
34 | raise RuntimeError("SubmitConfig pickle file does not exist!")
35 |
36 | submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
37 | dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
38 |
39 | submit_config.task_name = task_name
40 | submit_config.host_name = host_name
41 |
42 | dnnlib.submission.submit.run_wrapper(submit_config)
43 |
44 | if __name__ == "__main__":
45 | main()
46 |
--------------------------------------------------------------------------------
/dnnlib/tflib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | from . import autosummary
9 | from . import network
10 | from . import optimizer
11 | from . import tfutil
12 |
13 | from .tfutil import *
14 | from .network import Network
15 |
16 | from .optimizer import Optimizer
17 |
--------------------------------------------------------------------------------
/docs/00001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/docs/00001.png
--------------------------------------------------------------------------------
/docs/beautification_samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/docs/beautification_samples.png
--------------------------------------------------------------------------------
/docs/samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/docs/samples.png
--------------------------------------------------------------------------------
/encoder/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/encoder/__init__.py
--------------------------------------------------------------------------------
/eyeglasses.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/eyeglasses.npy
--------------------------------------------------------------------------------
/feature_extract.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import argparse
4 | import os
5 | import sys
6 | import math
7 | import pickle
8 | from sklearn.svm import SVC
9 | from scipy import misc
10 | from six.moves import xrange
11 | import cv2
12 | from sklearn.preprocessing import normalize
13 | import glob
14 |
15 | def load_pb(path_to_pb):
16 | with tf.io.gfile.GFile(path_to_pb, "rb") as f:
17 | graph_def = tf.compat.v1.GraphDef()
18 | graph_def.ParseFromString(f.read())
19 | with tf.compat.v1.Graph().as_default() as graph:
20 | tf.import_graph_def(graph_def, name='')
21 | return graph
22 |
23 | def preprocess_img(x):
24 | x = cv2.resize(x, (160, 160))
25 | # mean = np.mean(x)
26 | # std = np.std(x)
27 | # std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
28 | # y = np.multiply(np.subtract(x, mean), 1/std_adj)
29 | y = (np.float32(x) - 127.5) / 128.0
30 | return np.expand_dims(y, 0)
31 |
32 | parser = argparse.ArgumentParser(description='face identity prediction')
33 | parser.add_argument('--model', default='../model_results/facenet/20180402-114759/20180402-114759.pb', help='path to load model.')
34 | parser.add_argument('--dataset', type=str, default='../datasets/ffhq_128x128/img', help='path to the dataset we want to label')
35 | args = parser.parse_args()
36 |
37 | graph = load_pb(args.model)
38 | input = graph.get_tensor_by_name('input:0')
39 | output = graph.get_tensor_by_name('embeddings:0')
40 | phase_train_placeholder = graph.get_tensor_by_name("phase_train:0")
41 |
42 | id_features=[]
43 | config = tf.compat.v1.ConfigProto(allow_soft_placement=True, log_device_placement=True)
44 | config.gpu_options.allow_growth = True
45 |
46 | with tf.Session(graph=graph, config=config) as sess:
47 | images=sorted(glob.glob(os.path.join(args.dataset,"*.png")))
48 | for i, file in enumerate(images):
49 | if i==2000:
50 | break
51 | img = cv2.imread(file)
52 | print(file)
53 | embed = sess.run(output, feed_dict={input: preprocess_img(img), phase_train_placeholder: False})
54 | embed=normalize(embed)
55 | embed=embed.reshape((512,))
56 | id_features.append(embed)
57 |
58 | id_features = np.array(id_features, dtype=np.float32)
59 | print("shape of id_feature is")
60 | print(id_features.shape)
61 | np.save('id_features.npy', id_features)
62 | # pickle.dump(id_features, open(os.path.join(args.dataset, "id_features.p"), "wb"))
63 |
64 |
65 |
--------------------------------------------------------------------------------
/feature_visualization.py:
--------------------------------------------------------------------------------
1 | from tsne import tsne
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import os
5 | import csv
6 | import numpy as np
7 | import sys
8 | import glob
9 | import pylab
10 |
11 |
12 | if __name__ == "__main__":
13 | print("Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset.")
14 | print("Running example on 2,500 MNIST digits...")
15 | X = np.load('id_features.npy')
16 | labels1 = (np.load("gender.npy")).reshape((-1,))
17 | labels2 = (np.load("eyeglasses.npy")).reshape((-1,))
18 | Y = tsne(X, 2, 50, 20.0)
19 | colors = ['b','r','g']
20 | plt.close('all')
21 | plt.ioff()
22 | fig = plt.figure(figsize=(10, 10))
23 | fig.clf()
24 |
25 | for x,y,label in zip(Y[:, 0], Y[:, 1],labels1):
26 | if label==1:
27 | labelname="Female"
28 | else:
29 | labelname="Male"
30 | plt.scatter(x,y,color=colors[label],label=labelname, alpha=0.5)
31 | plt.title("FaceNet-Gender")
32 | plt.savefig("FaceNet-Gender.png")
33 |
34 | plt.close('all')
35 | plt.ioff()
36 | fig = plt.figure(figsize=(10, 10))
37 | fig.clf()
38 | for x,y,label in zip(Y[:, 0], Y[:, 1],labels2):
39 | if label==1:
40 | labelname="W/O Eyeglasses"
41 | else:
42 | labelname="With Eyeglasses"
43 | plt.scatter(x,y,color=colors[label],label=labelname, alpha=0.5)
44 | plt.title("FaceNet-Eyeglasses")
45 | plt.savefig("FaceNet-Eyeglasses.png")
46 |
--------------------------------------------------------------------------------
/ffhq_dataset/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/__init__.py
--------------------------------------------------------------------------------
/ffhq_dataset/landmarks_detector.py:
--------------------------------------------------------------------------------
1 | import dlib
2 |
3 |
4 | class LandmarksDetector:
5 | def __init__(self, predictor_model_path):
6 | """
7 | :param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
8 | """
9 | self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used
10 | self.shape_predictor = dlib.shape_predictor(predictor_model_path)
11 |
12 | def get_landmarks(self, image):
13 | img = dlib.load_rgb_image(image)
14 | dets = self.detector(img, 1)
15 |
16 | for detection in dets:
17 | try:
18 | face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()]
19 | yield face_landmarks
20 | except:
21 | print("Exception in get_landmarks()!")
22 |
--------------------------------------------------------------------------------
/ffhq_dataset/latent_directions/age.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/latent_directions/age.npy
--------------------------------------------------------------------------------
/ffhq_dataset/latent_directions/gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/latent_directions/gender.npy
--------------------------------------------------------------------------------
/ffhq_dataset/latent_directions/smile.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/latent_directions/smile.npy
--------------------------------------------------------------------------------
/ffhq_dataset/latent_representations/donald_trump_01.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/latent_representations/donald_trump_01.npy
--------------------------------------------------------------------------------
/ffhq_dataset/latent_representations/hillary_clinton_01.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/ffhq_dataset/latent_representations/hillary_clinton_01.npy
--------------------------------------------------------------------------------
/gender.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/gender.npy
--------------------------------------------------------------------------------
/id_features.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/id_features.npy
--------------------------------------------------------------------------------
/identity_prediction/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/identity_prediction/__init__.py
--------------------------------------------------------------------------------
/identity_prediction/identity_predict.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import argparse
4 | import os
5 | import sys
6 | import math
7 | import pickle
8 | from sklearn.svm import SVC
9 | from scipy import misc
10 | from six.moves import xrange
11 | import cv2
12 | from sklearn.preprocessing import normalize
13 |
14 | def load_pb(path_to_pb):
15 | with tf.io.gfile.GFile(path_to_pb, "rb") as f:
16 | graph_def = tf.compat.v1.GraphDef()
17 | graph_def.ParseFromString(f.read())
18 | with tf.compat.v1.Graph().as_default() as graph:
19 | tf.import_graph_def(graph_def, name='')
20 | return graph
21 |
22 | def preprocess_img(x):
23 | x = cv2.resize(x, (160, 160))
24 | # mean = np.mean(x)
25 | # std = np.std(x)
26 | # std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
27 | # y = np.multiply(np.subtract(x, mean), 1/std_adj)
28 | y = (np.float32(x) - 127.5) / 128.0
29 | return np.expand_dims(y, 0)
30 |
31 | parser = argparse.ArgumentParser(description='face identity prediction')
32 | parser.add_argument('--model', default='../../model_results/facenet/20180402-114759/20180402-114759.pb', help='path to load model.')
33 | parser.add_argument('--dataset', type=str, default='../../datasets/ffhq_128x128', help='path to the dataset we want to label')
34 | args = parser.parse_args()
35 |
36 | graph = load_pb(args.model)
37 | input = graph.get_tensor_by_name('input:0')
38 | output = graph.get_tensor_by_name('embeddings:0')
39 | phase_train_placeholder = graph.get_tensor_by_name("phase_train:0")
40 |
41 | id_features=[]
42 | images_dir = "{0}/img".format(args.dataset)
43 | number_of_images = len(os.listdir(images_dir))
44 | config = tf.compat.v1.ConfigProto(allow_soft_placement=True, log_device_placement=True)
45 | config.gpu_options.allow_growth = True
46 |
47 | with tf.Session(graph=graph, config=config) as sess:
48 | for i, file in enumerate(sorted(os.listdir(images_dir))):
49 | img = cv2.imread(os.path.join(images_dir, file))
50 | print(file)
51 | embed = sess.run(output, feed_dict={input: preprocess_img(img), phase_train_placeholder: False})
52 | embed=normalize(embed)
53 | embed=embed.reshape((512,))
54 | print(embed.shape)
55 | print(embed[0:10])
56 | id_features.append(embed)
57 |
58 | id_features = np.array(id_features, dtype=np.float32)
59 | print("shape of id_feature is")
60 | print(id_features.shape)
61 | pickle.dump(id_features, open(os.path.join(args.dataset, "id_features.p"), "wb"))
62 |
63 |
64 |
--------------------------------------------------------------------------------
/identity_prediction/test/img/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/identity_prediction/test/img/test.png
--------------------------------------------------------------------------------
/label_ffhq.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | from matplotlib import pyplot as plt
3 | import glob
4 | import os
5 | import numpy as np
6 | from PIL import Image
7 | import subprocess
8 |
9 | def tell():
10 | if os.path.exists('eyeglasses.npy') and os.path.isfile('eyeglasses.npy'):
11 | subprocess.check_output(['bash', '-c', "rm eyeglasses.npy"])
12 |
13 | if __name__ == '__main__':
14 | x = [1, 2, 3]
15 | labels=[]
16 | folderpath="./datasets/ffhq_128x128"
17 | images=sorted(glob.glob(os.path.join(folderpath,"*.png")))
18 | images_copy=images.copy()
19 | length=len(images)
20 | start=0
21 | if os.path.exists('eyeglasses.npy') and os.path.isfile('eyeglasses.npy'):
22 | tmp=np.load('eyeglasses.npy')
23 | start=tmp.shape[0]
24 | print("original stored num is "+str(start))
25 | labels=list(tmp.reshape((-1,)))
26 | plt.ion() # turn on interactive mode
27 | for i in range(start,len(images)):
28 | img=Image.open(images[i])
29 | plt.imshow(img)
30 | label=None
31 | print("input eyeglasses label to continue for image "+str(i)+" "+os.path.basename(images[i]))
32 | while True:
33 | label= int(input())
34 | if label==1 or label==2:
35 | break
36 | elif label==3:
37 | if i>0:
38 | labels.pop()
39 | img_=Image.open(images_copy[i-1])
40 | plt.imshow(img_)
41 | print("undo previous image! "+images_copy[i-1])
42 | print("input eyeglasses label to continue for image "+str(i-1)+" "+os.path.basename(images_copy[i-1]))
43 | while True:
44 | label= int(input())
45 | if label==1 or label==2:
46 | break
47 | else:
48 | print("input correct eyeglasses label to continue!")
49 | if label==1 or label==2:
50 | labels.append(label)
51 | tell()
52 | np.save('eyeglasses.npy', (np.array(labels)).reshape((-1,1)))
53 | plt.close()
54 | else:
55 | exit(1)
56 | else:
57 | print("input correct eyeglasses label to continue!")
58 | if label==1 or label==2:
59 | labels.append(label)
60 | tell()
61 | np.save('eyeglasses.npy', (np.array(labels)).reshape((-1,1)))
62 | plt.close()
63 | labels=np.array(labels)
64 | labels=labels.reshape((-1,1))
65 | tell()
66 | np.save('eyeglasses.npy', labels)
67 | #1 for female; 2 for male
68 | #1 for no eyeglasses; 2 for eyeglasses
69 |
70 |
--------------------------------------------------------------------------------
/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # empty
2 |
--------------------------------------------------------------------------------
/metrics_evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | # empty
9 |
--------------------------------------------------------------------------------
/metrics_evaluation/brisque.py:
--------------------------------------------------------------------------------
1 | from image_quality_metric.Python.libsvm.python.brisquequality import test_measure_BRISQUE
2 | import argparse
3 | import csv
4 | import os
5 | import glob
6 |
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument('--results_dir', '-results_dir', help='batch image beautification results', default='dean_cond_batch16', type=str)
9 | parser.add_argument('--src_dir', '-src_dir', help='original images path', default='dean_cond_batch16', type=str)
10 | parser.add_argument('--final_iteration', '-final_iteration', help='mark the final beautificaton result', default=572, type=int)
11 | parser.add_argument('--csv_name', '-csv_name', help='csv file name', default='dean_cond_batch16', type=str)
12 |
13 | args = parser.parse_args()
14 |
15 | paths=sorted(glob.glob(os.path.join(args.src_dir,"*.png")))
16 | mean_ori_qualityscore=0
17 | mean_res_qualityscore=0
18 | with open(args.csv_name+ ".csv", mode='w') as f:
19 | writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
20 | writer.writerow(['image_name', 'ori_qualityscore', 'res_qualityscore'])
21 | for path in paths:
22 | name=os.path.basename(path)
23 | # name=name[0:name.find("_")]
24 | name=name[0:name.find(".")]
25 | #These for Beholder-XXXX
26 | result_path=os.path.join(args.results_dir,str(name))
27 | result_path_image=os.path.join(result_path,str(args.final_iteration)+"_0.png")
28 | # result_path_image=os.path.join(result_path,"%04d-0.png" % args.final_iteration)
29 |
30 | #These for InterFaceGAN-XXXX
31 | # result_path_image=os.path.join(args.results_dir,name+"_0.png")
32 |
33 | print(path,name,result_path_image)
34 | # print(path,name,result_path,result_path_image)
35 | # calculate quality score
36 | ori_qualityscore = test_measure_BRISQUE(path)
37 | res_qualityscore = test_measure_BRISQUE(result_path_image)
38 | mean_ori_qualityscore+=ori_qualityscore
39 | mean_res_qualityscore+=res_qualityscore
40 | writer.writerow([name, ori_qualityscore, res_qualityscore])
41 |
42 | with open(args.csv_name+ ".txt", mode='w') as f:
43 | mean_ori_qualityscore=mean_ori_qualityscore/len(paths)
44 | mean_res_qualityscore=mean_res_qualityscore/len(paths)
45 | f.writelines("image num: {};\n".format(len(paths)))
46 | f.writelines("mean_ori_qualityscore: {};\n".format(mean_ori_qualityscore))
47 | f.writelines("mean_res_qualityscore: {};\n".format(mean_res_qualityscore))
--------------------------------------------------------------------------------
/metrics_evaluation/id-0-combined-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.246608031;
3 | mean_res_qualityscore: 12.8986287868;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/id-0-combined-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 0.891222415912;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/id-1-combined-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.221051952444885;
3 | mean_res_qualityscore: 22.541161823635097;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/id-1-combined-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 0.934160476085;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/id-1-combined.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.221051952444853;
3 | mean_res_qualityscore: 12.885734190991407;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | # empty
9 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/COPYRIGHT:
--------------------------------------------------------------------------------
1 |
2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions
7 | are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright
10 | notice, this list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright
13 | notice, this list of conditions and the following disclaimer in the
14 | documentation and/or other materials provided with the distribution.
15 |
16 | 3. Neither name of copyright holders nor the names of its contributors
17 | may be used to endorse or promote products derived from this software
18 | without specific prior written permission.
19 |
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/Makefile:
--------------------------------------------------------------------------------
1 | CXX ?= g++
2 | CFLAGS = -Wall -Wconversion -O3 -fPIC
3 | SHVER = 2
4 | OS = $(shell uname)
5 |
6 | all: svm-train svm-predict svm-scale
7 |
8 | lib: svm.o
9 | if [ "$(OS)" = "Darwin" ]; then \
10 | SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)"; \
11 | else \
12 | SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \
13 | fi; \
14 | $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER)
15 |
16 | svm-predict: svm-predict.c svm.o
17 | $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm
18 | svm-train: svm-train.c svm.o
19 | $(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm
20 | svm-scale: svm-scale.c
21 | $(CXX) $(CFLAGS) svm-scale.c -o svm-scale
22 | svm.o: svm.cpp svm.h
23 | $(CXX) $(CFLAGS) -c svm.cpp
24 | clean:
25 | rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so.$(SHVER)
26 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/Makefile.win:
--------------------------------------------------------------------------------
1 | #You must ensure nmake.exe, cl.exe, link.exe are in system path.
2 | #VCVARS64.bat
3 | #Under dosbox prompt
4 | #nmake -f Makefile.win
5 |
6 | ##########################################
7 | CXX = cl.exe
8 | CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE
9 | TARGET = windows
10 |
11 | all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib
12 |
13 | $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj
14 | $(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe
15 |
16 | $(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj
17 | $(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe
18 |
19 | $(TARGET)\svm-scale.exe: svm.h svm-scale.c
20 | $(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe
21 |
22 | $(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp
23 | $(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe
24 |
25 | svm.obj: svm.cpp svm.h
26 | $(CXX) $(CFLAGS) -c svm.cpp
27 |
28 | lib: svm.cpp svm.h svm.def
29 | $(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def
30 |
31 | clean:
32 | -erase /Q *.obj $(TARGET)\*.exe $(TARGET)\*.dll $(TARGET)\*.exp $(TARGET)\*.lib
33 |
34 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/__init__.py
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/Makefile:
--------------------------------------------------------------------------------
1 | .SUFFIXES: .class .java
2 | FILES = libsvm/svm.class libsvm/svm_model.class libsvm/svm_node.class \
3 | libsvm/svm_parameter.class libsvm/svm_problem.class \
4 | libsvm/svm_print_interface.class \
5 | svm_train.class svm_predict.class svm_toy.class svm_scale.class
6 |
7 | #JAVAC = jikes
8 | JAVAC_FLAGS = -target 1.7 -source 1.7
9 | JAVAC = javac
10 | # JAVAC_FLAGS =
11 | export CLASSPATH := .:$(CLASSPATH)
12 |
13 | all: $(FILES)
14 | jar cvf libsvm.jar *.class libsvm/*.class
15 |
16 | .java.class:
17 | $(JAVAC) $(JAVAC_FLAGS) $<
18 |
19 | libsvm/svm.java: libsvm/svm.m4
20 | m4 libsvm/svm.m4 > libsvm/svm.java
21 |
22 | clean:
23 | rm -f libsvm/*.class *.class *.jar libsvm/*~ *~ libsvm/svm.java
24 |
25 | dist: clean all
26 | rm *.class libsvm/*.class
27 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm.jar
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm/svm_model.java:
--------------------------------------------------------------------------------
1 | //
2 | // svm_model
3 | //
4 | package libsvm;
5 | public class svm_model implements java.io.Serializable
6 | {
7 | public svm_parameter param; // parameter
8 | public int nr_class; // number of classes, = 2 in regression/one class svm
9 | public int l; // total #SV
10 | public svm_node[][] SV; // SVs (SV[l])
11 | public double[][] sv_coef; // coefficients for SVs in decision functions (sv_coef[k-1][l])
12 | public double[] rho; // constants in decision functions (rho[k*(k-1)/2])
13 | public double[] probA; // pariwise probability information
14 | public double[] probB;
15 | public int[] sv_indices; // sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set
16 |
17 | // for classification only
18 |
19 | public int[] label; // label of each class (label[k])
20 | public int[] nSV; // number of SVs for each class (nSV[k])
21 | // nSV[0] + nSV[1] + ... + nSV[k-1] = l
22 | };
23 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm/svm_node.java:
--------------------------------------------------------------------------------
1 | package libsvm;
2 | public class svm_node implements java.io.Serializable
3 | {
4 | public int index;
5 | public double value;
6 | }
7 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm/svm_parameter.java:
--------------------------------------------------------------------------------
1 | package libsvm;
2 | public class svm_parameter implements Cloneable,java.io.Serializable
3 | {
4 | /* svm_type */
5 | public static final int C_SVC = 0;
6 | public static final int NU_SVC = 1;
7 | public static final int ONE_CLASS = 2;
8 | public static final int EPSILON_SVR = 3;
9 | public static final int NU_SVR = 4;
10 |
11 | /* kernel_type */
12 | public static final int LINEAR = 0;
13 | public static final int POLY = 1;
14 | public static final int RBF = 2;
15 | public static final int SIGMOID = 3;
16 | public static final int PRECOMPUTED = 4;
17 |
18 | public int svm_type;
19 | public int kernel_type;
20 | public int degree; // for poly
21 | public double gamma; // for poly/rbf/sigmoid
22 | public double coef0; // for poly/sigmoid
23 |
24 | // these are for training only
25 | public double cache_size; // in MB
26 | public double eps; // stopping criteria
27 | public double C; // for C_SVC, EPSILON_SVR and NU_SVR
28 | public int nr_weight; // for C_SVC
29 | public int[] weight_label; // for C_SVC
30 | public double[] weight; // for C_SVC
31 | public double nu; // for NU_SVC, ONE_CLASS, and NU_SVR
32 | public double p; // for EPSILON_SVR
33 | public int shrinking; // use the shrinking heuristics
34 | public int probability; // do probability estimates
35 |
36 | public Object clone()
37 | {
38 | try
39 | {
40 | return super.clone();
41 | } catch (CloneNotSupportedException e)
42 | {
43 | return null;
44 | }
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm/svm_print_interface.java:
--------------------------------------------------------------------------------
1 | package libsvm;
2 | public interface svm_print_interface
3 | {
4 | public void print(String s);
5 | }
6 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/libsvm/svm_problem.java:
--------------------------------------------------------------------------------
1 | package libsvm;
2 | public class svm_problem implements java.io.Serializable
3 | {
4 | public int l;
5 | public double[] y;
6 | public svm_node[][] x;
7 | }
8 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/java/test_applet.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/libsvm.so.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/libsvm.so.2
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/matlab/Makefile:
--------------------------------------------------------------------------------
1 | # This Makefile is used under Linux
2 |
3 | MATLABDIR ?= /usr/local/matlab
4 | # for Mac
5 | # MATLABDIR ?= /opt/local/matlab
6 |
7 | CXX ?= g++
8 | #CXX = g++-4.1
9 | CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
10 |
11 | MEX = $(MATLABDIR)/bin/mex
12 | MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
13 | # comment the following line if you use MATLAB on 32-bit computer
14 | MEX_OPTION += -largeArrayDims
15 | MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
16 |
17 | all: matlab
18 |
19 | matlab: binary
20 |
21 | octave:
22 | @echo "please type make under Octave"
23 |
24 | binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
25 |
26 | svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o
27 | $(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o
28 |
29 | svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o
30 | $(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o
31 |
32 | libsvmread.$(MEX_EXT): libsvmread.c
33 | $(MEX) $(MEX_OPTION) libsvmread.c
34 |
35 | libsvmwrite.$(MEX_EXT): libsvmwrite.c
36 | $(MEX) $(MEX_OPTION) libsvmwrite.c
37 |
38 | svm_model_matlab.o: svm_model_matlab.c ../svm.h
39 | $(CXX) $(CFLAGS) -c svm_model_matlab.c
40 |
41 | ../svm.o: ../svm.cpp ../svm.h
42 | make -C .. svm.o
43 |
44 | clean:
45 | rm -f *~ *.o *.mex* *.obj ../svm.o
46 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/matlab/libsvmwrite.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "mex.h"
5 |
6 | #ifdef MX_API_VER
7 | #if MX_API_VER < 0x07030000
8 | typedef int mwIndex;
9 | #endif
10 | #endif
11 |
12 | void exit_with_help()
13 | {
14 | mexPrintf(
15 | "Usage: libsvmwrite('filename', label_vector, instance_matrix);\n"
16 | );
17 | }
18 |
19 | static void fake_answer(int nlhs, mxArray *plhs[])
20 | {
21 | int i;
22 | for(i=0;i 0)
88 | {
89 | exit_with_help();
90 | fake_answer(nlhs, plhs);
91 | return;
92 | }
93 |
94 | // Transform the input Matrix to libsvm format
95 | if(nrhs == 3)
96 | {
97 | char filename[256];
98 | if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2]))
99 | {
100 | mexPrintf("Error: label vector and instance matrix must be double\n");
101 | return;
102 | }
103 |
104 | mxGetString(prhs[0], filename, mxGetN(prhs[0])+1);
105 |
106 | if(mxIsSparse(prhs[2]))
107 | libsvmwrite(filename, prhs[1], prhs[2]);
108 | else
109 | {
110 | mexPrintf("Instance_matrix must be sparse\n");
111 | return;
112 | }
113 | }
114 | else
115 | {
116 | exit_with_help();
117 | return;
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/matlab/make.m:
--------------------------------------------------------------------------------
1 | % This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
2 | function make()
3 | try
4 | % This part is for OCTAVE
5 | if (exist ('OCTAVE_VERSION', 'builtin'))
6 | mex libsvmread.c
7 | mex libsvmwrite.c
8 | mex -I.. svmtrain.c ../svm.cpp svm_model_matlab.c
9 | mex -I.. svmpredict.c ../svm.cpp svm_model_matlab.c
10 | % This part is for MATLAB
11 | % Add -largeArrayDims on 64-bit machines of MATLAB
12 | else
13 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c
14 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c
15 | mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c
16 | mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c
17 | end
18 | catch err
19 | fprintf('Error: %s failed (line %d)\n', err.stack(1).file, err.stack(1).line);
20 | disp(err.message);
21 | fprintf('=> Please check README for detailed instructions.\n');
22 | end
23 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/matlab/svm_model_matlab.h:
--------------------------------------------------------------------------------
1 | const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model);
2 | struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message);
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/python/Makefile:
--------------------------------------------------------------------------------
1 | all = lib
2 |
3 | lib:
4 | make -C .. lib
5 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/python/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/python/__init__.py
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-predict:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/svm-predict
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-scale:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/svm-scale
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-toy/gtk/Makefile:
--------------------------------------------------------------------------------
1 | CC? = gcc
2 | CXX? = g++
3 | CFLAGS = -Wall -O3 -g `pkg-config --cflags gtk+-2.0`
4 | LIBS = `pkg-config --libs gtk+-2.0`
5 |
6 | svm-toy: main.o interface.o callbacks.o ../../svm.o
7 | $(CXX) $(CFLAGS) main.o interface.o callbacks.o ../../svm.o -o svm-toy $(LIBS)
8 |
9 | main.o: main.c
10 | $(CC) $(CFLAGS) -c main.c
11 |
12 | interface.o: interface.c interface.h
13 | $(CC) $(CFLAGS) -c interface.c
14 |
15 | callbacks.o: callbacks.cpp callbacks.h
16 | $(CXX) $(CFLAGS) -c callbacks.cpp
17 |
18 | ../../svm.o: ../../svm.cpp ../../svm.h
19 | make -C ../.. svm.o
20 |
21 | clean:
22 | rm -f *~ callbacks.o svm-toy main.o interface.o callbacks.o ../../svm.o
23 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-toy/gtk/callbacks.h:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #ifdef __cplusplus
4 | extern "C" {
5 | #endif
6 |
7 | void
8 | on_window1_destroy (GtkObject *object,
9 | gpointer user_data);
10 |
11 | gboolean
12 | on_draw_main_button_press_event (GtkWidget *widget,
13 | GdkEventButton *event,
14 | gpointer user_data);
15 |
16 | gboolean
17 | on_draw_main_expose_event (GtkWidget *widget,
18 | GdkEventExpose *event,
19 | gpointer user_data);
20 |
21 | void
22 | on_button_change_clicked (GtkButton *button,
23 | gpointer user_data);
24 |
25 | void
26 | on_button_run_clicked (GtkButton *button,
27 | gpointer user_data);
28 |
29 | void
30 | on_button_clear_clicked (GtkButton *button,
31 | gpointer user_data);
32 |
33 | void
34 | on_button_save_clicked (GtkButton *button,
35 | gpointer user_data);
36 |
37 | void
38 | on_button_load_clicked (GtkButton *button,
39 | gpointer user_data);
40 |
41 | void
42 | on_fileselection_destroy (GtkObject *object,
43 | gpointer user_data);
44 |
45 | void
46 | on_filesel_ok_clicked (GtkButton *button,
47 | gpointer user_data);
48 |
49 | void
50 | on_filesel_cancel_clicked (GtkButton *button,
51 | gpointer user_data);
52 | #ifdef __cplusplus
53 | }
54 | #endif
55 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-toy/gtk/interface.h:
--------------------------------------------------------------------------------
1 | /*
2 | * DO NOT EDIT THIS FILE - it is generated by Glade.
3 | */
4 |
5 | #ifdef __cplusplus
6 | extern "C" {
7 | #endif
8 |
9 | GtkWidget* create_window (void);
10 | GtkWidget* create_fileselection (void);
11 |
12 | #ifdef __cplusplus
13 | }
14 | #endif
15 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-toy/gtk/main.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Initial main.c file generated by Glade. Edit as required.
3 | * Glade will not overwrite this file.
4 | */
5 |
6 | #include
7 | #include "interface.h"
8 | void svm_toy_initialize();
9 |
10 | int main (int argc, char *argv[])
11 | {
12 | GtkWidget *window;
13 |
14 | gtk_set_locale ();
15 | gtk_init (&argc, &argv);
16 |
17 | window = create_window ();
18 | gtk_widget_show (window);
19 |
20 | svm_toy_initialize();
21 | gtk_main ();
22 | return 0;
23 | }
24 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-toy/qt/Makefile:
--------------------------------------------------------------------------------
1 | CXX? = g++
2 | INCLUDE = /usr/include/qt4
3 | CFLAGS = -Wall -O3 -I$(INCLUDE) -I$(INCLUDE)/QtGui -I$(INCLUDE)/QtCore
4 | LIB = -lQtGui -lQtCore
5 | MOC = /usr/bin/moc-qt4
6 |
7 | svm-toy: svm-toy.cpp svm-toy.moc ../../svm.o
8 | $(CXX) $(CFLAGS) svm-toy.cpp ../../svm.o -o svm-toy $(LIB)
9 |
10 | svm-toy.moc: svm-toy.cpp
11 | $(MOC) svm-toy.cpp -o svm-toy.moc
12 |
13 | ../../svm.o: ../../svm.cpp ../../svm.h
14 | make -C ../.. svm.o
15 |
16 | clean:
17 | rm -f *~ svm-toy svm-toy.moc ../../svm.o
18 |
19 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm-train:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/svm-train
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm.def:
--------------------------------------------------------------------------------
1 | LIBRARY libsvm
2 | EXPORTS
3 | svm_train @1
4 | svm_cross_validation @2
5 | svm_save_model @3
6 | svm_load_model @4
7 | svm_get_svm_type @5
8 | svm_get_nr_class @6
9 | svm_get_labels @7
10 | svm_get_svr_probability @8
11 | svm_predict_values @9
12 | svm_predict @10
13 | svm_predict_probability @11
14 | svm_free_model_content @12
15 | svm_free_and_destroy_model @13
16 | svm_destroy_param @14
17 | svm_check_parameter @15
18 | svm_check_probability_model @16
19 | svm_set_print_string_function @17
20 | svm_get_sv_indices @18
21 | svm_get_nr_sv @19
22 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/svm.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/svm.o
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/tools/checkdata.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #
4 | # A format checker for LIBSVM
5 | #
6 |
7 | #
8 | # Copyright (c) 2007, Rong-En Fan
9 | #
10 | # All rights reserved.
11 | #
12 | # This program is distributed under the same license of the LIBSVM package.
13 | #
14 |
15 | from sys import argv, exit
16 | import os.path
17 |
18 | def err(line_no, msg):
19 | print("line {0}: {1}".format(line_no, msg))
20 |
21 | # works like float() but does not accept nan and inf
22 | def my_float(x):
23 | if x.lower().find("nan") != -1 or x.lower().find("inf") != -1:
24 | raise ValueError
25 |
26 | return float(x)
27 |
28 | def main():
29 | if len(argv) != 2:
30 | print("Usage: {0} dataset".format(argv[0]))
31 | exit(1)
32 |
33 | dataset = argv[1]
34 |
35 | if not os.path.exists(dataset):
36 | print("dataset {0} not found".format(dataset))
37 | exit(1)
38 |
39 | line_no = 1
40 | error_line_count = 0
41 | for line in open(dataset, 'r'):
42 | line_error = False
43 |
44 | # each line must end with a newline character
45 | if line[-1] != '\n':
46 | err(line_no, "missing a newline character in the end")
47 | line_error = True
48 |
49 | nodes = line.split()
50 |
51 | # check label
52 | try:
53 | label = nodes.pop(0)
54 |
55 | if label.find(',') != -1:
56 | # multi-label format
57 | try:
58 | for l in label.split(','):
59 | l = my_float(l)
60 | except:
61 | err(line_no, "label {0} is not a valid multi-label form".format(label))
62 | line_error = True
63 | else:
64 | try:
65 | label = my_float(label)
66 | except:
67 | err(line_no, "label {0} is not a number".format(label))
68 | line_error = True
69 | except:
70 | err(line_no, "missing label, perhaps an empty line?")
71 | line_error = True
72 |
73 | # check features
74 | prev_index = -1
75 | for i in range(len(nodes)):
76 | try:
77 | (index, value) = nodes[i].split(':')
78 |
79 | index = int(index)
80 | value = my_float(value)
81 |
82 | # precomputed kernel's index starts from 0 and LIBSVM
83 | # checks it. Hence, don't treat index 0 as an error.
84 | if index < 0:
85 | err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i]))
86 | line_error = True
87 | elif index <= prev_index:
88 | err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i]))
89 | line_error = True
90 | prev_index = index
91 | except:
92 | err(line_no, "feature '{0}' not an : pair, integer, real number ".format(nodes[i]))
93 | line_error = True
94 |
95 | line_no += 1
96 |
97 | if line_error:
98 | error_line_count += 1
99 |
100 | if error_line_count > 0:
101 | print("Found {0} lines with error.".format(error_line_count))
102 | return 1
103 | else:
104 | print("No error.")
105 | return 0
106 |
107 | if __name__ == "__main__":
108 | exit(main())
109 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/tools/easy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | import os
5 | from subprocess import *
6 |
7 | if len(sys.argv) <= 1:
8 | print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
9 | raise SystemExit
10 |
11 | # svm, grid, and gnuplot executable files
12 |
13 | is_win32 = (sys.platform == 'win32')
14 | if not is_win32:
15 | svmscale_exe = "../svm-scale"
16 | svmtrain_exe = "../svm-train"
17 | svmpredict_exe = "../svm-predict"
18 | grid_py = "./grid.py"
19 | gnuplot_exe = "/usr/bin/gnuplot"
20 | else:
21 | # example for windows
22 | svmscale_exe = r"..\windows\svm-scale.exe"
23 | svmtrain_exe = r"..\windows\svm-train.exe"
24 | svmpredict_exe = r"..\windows\svm-predict.exe"
25 | gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
26 | grid_py = r".\grid.py"
27 |
28 | assert os.path.exists(svmscale_exe),"svm-scale executable not found"
29 | assert os.path.exists(svmtrain_exe),"svm-train executable not found"
30 | assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
31 | assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
32 | assert os.path.exists(grid_py),"grid.py not found"
33 |
34 | train_pathname = sys.argv[1]
35 | assert os.path.exists(train_pathname),"training file not found"
36 | file_name = os.path.split(train_pathname)[1]
37 | scaled_file = file_name + ".scale"
38 | model_file = file_name + ".model"
39 | range_file = file_name + ".range"
40 |
41 | if len(sys.argv) > 2:
42 | test_pathname = sys.argv[2]
43 | file_name = os.path.split(test_pathname)[1]
44 | assert os.path.exists(test_pathname),"testing file not found"
45 | scaled_test_file = file_name + ".scale"
46 | predict_test_file = file_name + ".predict"
47 |
48 | cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
49 | print('Scaling training data...')
50 | Popen(cmd, shell = True, stdout = PIPE).communicate()
51 |
52 | cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
53 | print('Cross validation...')
54 | f = Popen(cmd, shell = True, stdout = PIPE).stdout
55 |
56 | line = ''
57 | while True:
58 | last_line = line
59 | line = f.readline()
60 | if not line: break
61 | c,g,rate = map(float,last_line.split())
62 |
63 | print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
64 |
65 | cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
66 | print('Training...')
67 | Popen(cmd, shell = True, stdout = PIPE).communicate()
68 |
69 | print('Output model: {0}'.format(model_file))
70 | if len(sys.argv) > 2:
71 | cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
72 | print('Scaling testing data...')
73 | Popen(cmd, shell = True, stdout = PIPE).communicate()
74 |
75 | cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
76 | print('Testing...')
77 | Popen(cmd, shell = True).communicate()
78 |
79 | print('Output prediction: {0}'.format(predict_test_file))
80 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvm.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvm.dll
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvmread.mexw64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvmread.mexw64
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvmwrite.mexw64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/libsvmwrite.mexw64
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-predict.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-predict.exe
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-scale.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-scale.exe
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-toy.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-toy.exe
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-train.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svm-train.exe
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svmpredict.mexw64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svmpredict.mexw64
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svmtrain.mexw64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/image_quality_metric/Python/libsvm/windows/svmtrain.mexw64
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/README.md:
--------------------------------------------------------------------------------
1 | Please see the following [blog post](https://www.learnopencv.com/image-quality-assessment-brisque/) for more details about this code
2 |
3 | ## Installation Instructions
4 | **Python 2.x LIBSVM Installation**
5 | `sudo apt-get install python-libsvm`
6 |
7 | **Python 3.x LIVSVM Installation and C++ LIBSVM Installation**
8 |
9 | For C++ :
10 |
11 | 1. `cd C++/libsvm/`
12 | 2. `cmake .`
13 | 3. `make`
14 |
15 | For Python 3.x :
16 |
17 | 1. `cd Python/libsvm/`
18 | 2. `make`
19 | 3. `cd python`
20 | 4. `make`
21 |
22 | ## Usage
23 |
24 | **Python 2.x**
25 |
26 | 1. `python2 brisquequality.py `
27 |
28 | **Python 3.x**
29 |
30 | 1. `cd Python/libsvm/python/`
31 | 2. `python3 brisquequality.py `
32 |
33 | **C++**
34 |
35 | 1. `cd C++/`
36 | 2. `./brisquequality `
37 |
--------------------------------------------------------------------------------
/metrics_evaluation/image_quality_metric/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # This work is licensed under the Creative Commons Attribution-NonCommercial
4 | # 4.0 International License. To view a copy of this license, visit
5 | # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
7 |
8 | # empty
9 |
--------------------------------------------------------------------------------
/metrics_evaluation/interface-stylegan-0-combined-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 9.212776697602752;
3 | mean_res_qualityscore: 8.966642318802714;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/interface-stylegan-0-combined-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 0.129920677095;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/interface-stylegan-128x128-combined-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.221051952444885;
3 | mean_res_qualityscore: 19.34272609995076;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/interface-stylegan-128x128-combined-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 0.157132699995;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
12 |
13 | ### Context of the issue.
14 | [Provide more detailed introduction to the issue itself and why it is relevant]
15 |
16 | ### Expected behavior.
17 | [Describe what you would expect to have resulted from this process.]
18 |
19 | ### Actual behavior.
20 | [Describe what you currently experience from this process, and thereby explain the bug.]
21 |
22 | ### Steps to reproduce.
23 | [Present a minimal example and steps to produce the issue.]
24 |
25 | ### OS and hardware information.
26 |
27 | + Operating system: [todo]
28 | + Torch version: [todo]
29 | + CPU architecture: [todo]
30 | + GPU type (if using): [todo]
31 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ### What does this PR do?
2 |
3 | ### Where should the reviewer start?
4 |
5 | ### How should this PR be tested?
6 |
7 | ### Any background context you want to provide?
8 |
9 | ### What are the relevant issues?
10 |
11 | [You can link directly to issues by entering # then the
12 | number of the issue, for example, #3 links to issue 3]
13 |
14 | # Screenshots (if appropriate)
15 |
16 | # Questions:
17 |
18 | + Do the docs need to be updated?
19 | + Does this PR add new (Python) dependencies?
20 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.github/stale.yml:
--------------------------------------------------------------------------------
1 | # Number of days of inactivity before an issue becomes stale
2 | daysUntilStale: 60
3 | # Number of days of inactivity before a stale issue is closed
4 | daysUntilClose: 7
5 | # Issues with these labels will never be considered stale
6 | exemptLabels:
7 | - pinned
8 | - security
9 | # Label to use when marking an issue as stale
10 | staleLabel: stale
11 | # Comment to post when marking an issue as stale. Set to `false` to disable
12 | markComment: >
13 | This issue has been automatically marked as stale because it has not had
14 | recent activity. It will be closed if no further activity occurs. Thank you
15 | for your contributions.
16 | # Comment to post when closing a stale issue. Set to `false` to disable
17 | closeComment: false
18 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | _build
3 | plots
4 | work*
5 | *reps
6 |
7 | data
8 | !data/vgg/download-and-align.py
9 | !data/download-lfw-subset.sh
10 |
11 | models/facenet/*.t7
12 | models/dlib/shape_predictor_68_face_landmarks.dat
13 |
14 | *.pyc
15 | *.mp4
16 |
17 | images/examples-aligned
18 |
19 | evaluation/*/
20 | evaluation/attic/*/*.csv
21 | evaluation/attic/*/*.pdf
22 |
23 | demos/web/bower_components
24 | demos/web/unknown*.npy
25 |
26 | models/openface/*.t7
27 | models/openface/*.pkl
28 | celeb-classifier*
29 |
30 | site
31 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "training/torch-TripletEmbedding"]
2 | path = training/torch-TripletEmbedding
3 | url = https://github.com/Atcold/torch-TripletEmbedding
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: requires
2 | services:
3 | - docker
4 |
5 | language: python
6 | python:
7 | - "2.7"
8 |
9 | install:
10 | # - pip install flake8 pep257
11 | - sudo apt-get update -qq
12 | # - sudo apt-get install -y luarocks wget
13 | # - sudo luarocks install luacheck
14 | - docker pull bamos/openface
15 |
16 | script:
17 | # - flake8 --ignore=E402,E501 .
18 | # - pep257 --ignore D104,D203,D400,D402 openface
19 | # - luacheck . --no-global --no-self --exclude-files '*/torch-TripletEmbedding'
20 | - |
21 | docker run -v $PWD:/root/src/openface bamos/openface \
22 | /bin/bash -l -c \
23 | "source /root/torch/install/bin/torch-activate; \
24 | cd /root/src/openface; \
25 | ./models/get-models.sh && \
26 | ./data/download-lfw-subset.sh && \
27 | wget -nv https://cmusatyalab.org/openface-models/nn4.v1.t7 \
28 | -O ./models/openface/nn4.v1.t7 && \
29 | python2 setup.py install && \
30 | ./run-tests.sh"
31 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | Thank you for your interest in contributing to OpenFace!
2 |
3 | Please direct most questions and discussions to the
4 | [cmu-openface group](https://groups.google.com/forum/#!forum/cmu-openface)
5 | or the
6 | [gitter chat](https://gitter.im/cmusatyalab/openface).
7 | The [issue tracker](https://github.com/cmusatyalab/openface/issues)
8 | is only for development discussions and specific bug reports.
9 | For archival and management purposes,
10 | please do not post the same issue in multiple places.
11 |
12 | If you have found a bug in the code, please file an issue with a
13 | [minimal, complete, and verifiable example](http://stackoverflow.com/help/mcve)
14 | as well as your hardware and operating system information
15 | to help us track and reproduce it.
16 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM bamos/ubuntu-opencv-dlib-torch:ubuntu_14.04-opencv_2.4.11-dlib_19.0-torch_2016.07.12
2 | MAINTAINER Brandon Amos
3 |
4 | # TODO: Should be added to opencv-dlib-torch image.
5 | RUN ln -s /root/torch/install/bin/* /usr/local/bin
6 |
7 | RUN apt-get update && apt-get install -y \
8 | curl \
9 | git \
10 | graphicsmagick \
11 | libssl-dev \
12 | libffi-dev \
13 | python-dev \
14 | python-pip \
15 | python-numpy \
16 | python-nose \
17 | python-scipy \
18 | python-pandas \
19 | python-protobuf \
20 | python-openssl \
21 | wget \
22 | zip \
23 | && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
24 |
25 | ADD . /root/openface
26 | RUN python -m pip install --upgrade --force pip
27 | RUN cd ~/openface && \
28 | ./models/get-models.sh && \
29 | pip2 install -r requirements.txt && \
30 | python2 setup.py install && \
31 | pip2 install --user --ignore-installed -r demos/web/requirements.txt && \
32 | pip2 install -r training/requirements.txt
33 |
34 | EXPOSE 8000 9000
35 | CMD /bin/bash -l -c '/root/openface/demos/web/start-servers.sh'
36 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/README.md:
--------------------------------------------------------------------------------
1 | # OpenFace API Docs
2 |
3 | + Create `openface.rst` with `sphinx-apidoc -o . ../openface`.
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/_static/track.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 | try {
3 | var snowplowTracker = Snowplow.getTrackerUrl('joule.isr.cs.cmu.edu:8081');
4 | snowplowTracker.enableLinkTracking();
5 | snowplowTracker.trackPageView();
6 | } catch (err) {}
7 | });
8 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | import sys
4 | import mock
5 | import os
6 |
7 | sys.path.insert(0, os.path.abspath('..'))
8 |
9 | MOCK_MODULES = ['argparse', 'cv2', 'dlib', 'numpy', 'numpy.linalg', 'pandas']
10 | for mod_name in MOCK_MODULES:
11 | sys.modules[mod_name] = mock.MagicMock()
12 |
13 | extensions = [
14 | 'sphinx.ext.autodoc',
15 | 'sphinx.ext.coverage',
16 | 'sphinx.ext.mathjax',
17 | 'sphinx.ext.viewcode',
18 | ]
19 |
20 | autoclass_content = 'both'
21 |
22 | templates_path = ['_templates']
23 |
24 | source_suffix = '.rst'
25 |
26 | # The master toctree document.
27 | master_doc = 'index'
28 |
29 | project = 'OpenFace API Docs'
30 | copyright = '2015-2016, Carnegie Mellon University'
31 | author = 'Carnegie Mellon University'
32 |
33 | version = '0.1.1'
34 | release = '0.1.1'
35 |
36 | language = None
37 |
38 | exclude_patterns = ['_build']
39 |
40 | pygments_style = 'sphinx'
41 |
42 | todo_include_todos = True
43 |
44 |
45 | def setup(app):
46 | app.add_javascript("sp.js")
47 | app.add_javascript("track.js")
48 |
49 | # html_theme = 'alabaster'
50 | html_theme = 'sphinx_rtd_theme'
51 | html_static_path = ['_static']
52 | htmlhelp_basename = 'OpenFacedoc'
53 |
54 | latex_elements = {
55 | 'papersize': 'letterpaper',
56 | 'pointsize': '12pt',
57 | }
58 |
59 | latex_documents = [
60 | (master_doc, 'OpenFace.tex', 'OpenFace Documentation',
61 | 'Carnegie Mellon University', 'manual'),
62 | ]
63 |
64 | man_pages = [
65 | (master_doc, 'openface', 'OpenFace Documentation',
66 | [author], 1)
67 | ]
68 |
69 | texinfo_documents = [
70 | (master_doc, 'OpenFace', 'OpenFace Documentation',
71 | author, 'OpenFace', 'One line description of project.',
72 | 'Miscellaneous'),
73 | ]
74 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/index.rst:
--------------------------------------------------------------------------------
1 | OpenFace API Documentation
2 | ==========================
3 |
4 | + The code is available on GitHub at
5 | `cmusatyalab/openface `_
6 | + The main website is available at
7 | http://cmusatyalab.github.io/openface.
8 |
9 | Contents:
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 |
14 | openface
15 |
16 |
17 | Indices and tables
18 | ==================
19 |
20 | * :ref:`genindex`
21 | * :ref:`modindex`
22 | * :ref:`search`
23 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/openface.rst:
--------------------------------------------------------------------------------
1 | openface package
2 | ================
3 |
4 | openface.AlignDlib class
5 | ------------------------
6 | .. autoclass:: openface.AlignDlib
7 | :members:
8 | :undoc-members:
9 | :show-inheritance:
10 |
11 | openface.TorchNeuralNet class
12 | -----------------------------
13 | .. autoclass:: openface.TorchNeuralNet
14 | :members:
15 | :undoc-members:
16 | :show-inheritance:
17 |
18 |
19 | openface.data module
20 | --------------------
21 |
22 | .. automodule:: openface.data
23 | :members:
24 | :undoc-members:
25 | :show-inheritance:
26 |
27 | openface.helper module
28 | ----------------------
29 |
30 | .. automodule:: openface.helper
31 | :members:
32 | :undoc-members:
33 | :show-inheritance:
34 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/api-docs/requirements.txt:
--------------------------------------------------------------------------------
1 | mock
2 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/batch-represent/batch-represent.lua:
--------------------------------------------------------------------------------
1 | local ffi = require 'ffi'
2 |
3 | local batchNumber, nImgs = 0
4 |
5 | torch.setdefaulttensortype('torch.FloatTensor')
6 |
7 | function batchRepresent()
8 | local loadSize = {3, opt.imgDim, opt.imgDim}
9 | print(opt.data)
10 | local cacheFile = paths.concat(opt.data, 'cache.t7')
11 | print('cache lotation: ', cacheFile)
12 | local dumpLoader
13 | if paths.filep(cacheFile) then
14 | print('Loading metadata from cache.')
15 | print('If your dataset has changed, delete the cache file.')
16 | dumpLoader = torch.load(cacheFile)
17 | else
18 | print('Creating metadata for cache.')
19 | dumpLoader = dataLoader{
20 | paths = {opt.data},
21 | loadSize = loadSize,
22 | sampleSize = loadSize,
23 | split = 0,
24 | verbose = true
25 | }
26 | torch.save(cacheFile, dumpLoader)
27 | end
28 | collectgarbage()
29 | nImgs = dumpLoader:sizeTest()
30 | print('nImgs: ', nImgs)
31 | assert(nImgs > 0, "Failed to get nImgs")
32 |
33 | batchNumber = 0
34 |
35 | for i=1,math.ceil(nImgs/opt.batchSize) do
36 | local indexStart = (i-1) * opt.batchSize + 1
37 | local indexEnd = math.min(nImgs, indexStart + opt.batchSize - 1)
38 | local batchSz = indexEnd-indexStart+1
39 | local inputs, labels = dumpLoader:get(indexStart, indexEnd)
40 | local paths = {}
41 | for j=indexStart,indexEnd do
42 | table.insert(paths,
43 | ffi.string(dumpLoader.imagePath[dumpLoader.testIndices[j]]:data()))
44 | end
45 | repBatch(paths, inputs, labels, batchSz)
46 | if i % 5 == 0 then
47 | collectgarbage()
48 | end
49 | end
50 |
51 | if opt.cuda then
52 | cutorch.synchronize()
53 | end
54 | end
55 |
56 | function repBatch(paths, inputs, labels, batchSz)
57 | batchNumber = batchNumber + batchSz
58 |
59 | if opt.cuda then
60 | inputs = inputs:cuda()
61 | end
62 | local embeddings = model:forward(inputs):float()
63 | if opt.cuda then
64 | cutorch.synchronize()
65 | end
66 |
67 | if batchSz == 1 then
68 | embeddings = embeddings:reshape(1, embeddings:size(1))
69 | end
70 |
71 | for i=1,batchSz do
72 | labelsCSV:write({labels[i], paths[i]})
73 | repsCSV:write(embeddings[i]:totable())
74 | end
75 |
76 | print(('Represent: %d/%d'):format(batchNumber, nImgs))
77 | end
78 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/batch-represent/main.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 |
3 | require 'torch'
4 | require 'optim'
5 |
6 | require 'paths'
7 |
8 | require 'xlua'
9 | require 'csvigo'
10 |
11 | require 'nn'
12 | require 'dpnn'
13 |
14 | local opts = paths.dofile('opts.lua')
15 |
16 | opt = opts.parse(arg)
17 | print(opt)
18 |
19 | torch.setdefaulttensortype('torch.FloatTensor')
20 |
21 | if opt.cuda then
22 | require 'cutorch'
23 | require 'cunn'
24 | cutorch.setDevice(opt.device)
25 | end
26 |
27 | opt.manualSeed = 2
28 | torch.manualSeed(opt.manualSeed)
29 |
30 | paths.dofile('dataset.lua')
31 | paths.dofile('batch-represent.lua')
32 |
33 | model = torch.load(opt.model)
34 | model:evaluate()
35 | if opt.cuda then
36 | model:cuda()
37 | end
38 |
39 | repsCSV = csvigo.File(paths.concat(opt.outDir, "reps.csv"), 'w')
40 | labelsCSV = csvigo.File(paths.concat(opt.outDir, "labels.csv"), 'w')
41 |
42 | batchRepresent()
43 |
44 | repsCSV:close()
45 | labelsCSV:close()
46 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/batch-represent/opts.lua:
--------------------------------------------------------------------------------
1 | local M = { }
2 |
3 | -- http://stackoverflow.com/questions/6380820/get-containing-path-of-lua-file
4 | function script_path()
5 | local str = debug.getinfo(2, "S").source:sub(2)
6 | return str:match("(.*/)")
7 | end
8 |
9 | function M.parse(arg)
10 | local cmd = torch.CmdLine()
11 | cmd:text()
12 | cmd:text('OpenFace')
13 | cmd:text()
14 | cmd:text('Options:')
15 |
16 | ------------ General options --------------------
17 | cmd:option('-outDir', './reps/', 'Subdirectory to output the representations')
18 | cmd:option('-data',
19 | paths.concat(script_path(), '..', 'data', 'lfw', 'dlib-affine-sz:96'),
20 | 'Home of dataset')
21 | cmd:option('-model',
22 | paths.concat(script_path(), '..', 'models', 'openface', 'nn4.small2.v1.t7'),
23 | 'Path to model to use.')
24 | cmd:option('-imgDim', 96, 'Image dimension. nn1=224, nn4=96')
25 | cmd:option('-batchSize', 50, 'mini-batch size')
26 | cmd:option('-cuda', false, 'Use cuda')
27 | cmd:option('-device', 1, 'Cuda device to use')
28 | cmd:option('-cache', false, 'Cache loaded data.')
29 | cmd:text()
30 |
31 | local opt = cmd:parse(arg or {})
32 | os.execute('mkdir -p ' .. opt.outDir)
33 |
34 | return opt
35 | end
36 |
37 | return M
38 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/cloc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cloc batch-represent evaluation openface models training util \
4 | demos/*.py \
5 | demos/web/{*.{py,html,sh},js,css}
6 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/vis-outputs.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 | --
3 | -- Copyright 2015-2016 Carnegie Mellon University
4 | --
5 | -- Licensed under the Apache License, Version 2.0 (the "License");
6 | -- you may not use this file except in compliance with the License.
7 | -- You may obtain a copy of the License at
8 | --
9 | -- http://www.apache.org/licenses/LICENSE-2.0
10 | --
11 | -- Unless required by applicable law or agreed to in writing, software
12 | -- distributed under the License is distributed on an "AS IS" BASIS,
13 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | -- See the License for the specific language governing permissions and
15 | -- limitations under the License.
16 |
17 | require 'torch'
18 | require 'nn'
19 | require 'dpnn'
20 | require 'image'
21 | require 'paths'
22 |
23 | torch.setdefaulttensortype('torch.FloatTensor')
24 |
25 | local cmd = torch.CmdLine()
26 | cmd:text()
27 | cmd:text('Visualize OpenFace outputs.')
28 | cmd:text()
29 | cmd:text('Options:')
30 |
31 | cmd:option('-imgPath', 'images/examples-aligned/lennon-1.png',
32 | 'Path to aligned image.')
33 | cmd:option('-filterOutput',
34 | 'images/examples-aligned/lennon-1',
35 | 'Output directory.')
36 | cmd:option('-model', './models/openface/nn4.small2.v1.t7', 'Path to model.')
37 | cmd:option('-imgDim', 96, 'Image dimension. nn1=224, nn4=96')
38 | cmd:option('-numPreview', 39, 'Number of images to preview')
39 | cmd:text()
40 |
41 | opt = cmd:parse(arg or {})
42 | -- print(opt)
43 |
44 | os.execute('mkdir -p ' .. opt.filterOutput)
45 |
46 | if not paths.filep(opt.imgPath) then
47 | print("Unable to find: " .. opt.imgPath)
48 | os.exit(-1)
49 | end
50 |
51 | net = torch.load(opt.model)
52 | net:evaluate()
53 | print(net)
54 |
55 | local img = torch.Tensor(1, 3, opt.imgDim, opt.imgDim)
56 | local img_orig = image.load(opt.imgPath, 3)
57 | img[1] = image.scale(img_orig, opt.imgDim, opt.imgDim)
58 | net:forward(img)
59 |
60 | local fName = opt.filterOutput .. '/preview.html'
61 | print("Outputting filter preview to '" .. fName .. "'")
62 | f, err = io.open(fName, 'w')
63 | if err then
64 | print("Error: Unable to open preview.html");
65 | os.exit(-1)
66 | end
67 |
68 | torch.IntTensor({3, 7, 10, 11, 12, 13, 14, 15, 16,
69 | 17, 18, 19, 20, 21}):apply(function (i)
70 | os.execute(string.format('mkdir -p %s/%s',
71 | opt.filterOutput, i))
72 | out = net.modules[i].output[1]
73 | f:write(string.format("Layer %s
\n", i))
74 | for j = 1,out:size(1) do
75 | imgName = string.format('%s/%d/%d.png',
76 | opt.filterOutput, i, j)
77 | image.save(imgName, out[j])
78 | if j <= opt.numPreview then
79 | f:write(string.format("
\n",
80 | i, j))
81 | end
82 | end
83 | end)
84 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openface-web-demo",
3 | "version": "0.0.0",
4 | "homepage": "https://github.com/cmusatyalab/openface",
5 | "authors": [
6 | "Brandon Amos "
7 | ],
8 | "license": "Apache",
9 | "ignore": [
10 | "**/.*",
11 | "bower_components"
12 | ],
13 | "dependencies": {
14 | "bootstrap": "3.3.5",
15 | "bootstrap-toggle": "2.2.0",
16 | "font-awesome": "~4.3.0",
17 | "handlebars": "~3.0.0",
18 | "bootstrap3-dialog": "1.35.*",
19 | "underscore": "1.8.3"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/create-cert.sh:
--------------------------------------------------------------------------------
1 | # generate self-signed certs with no password for the web and socket servers
2 | # this script requires that openssl is installed: e.g. sudo apt-get install openssl
3 | mkdir tls
4 | openssl genrsa -des3 -out tls/server.key 1024
5 | openssl req -new -key tls/server.key -out tls/server.csr
6 | cp tls/server.key tls/server.key.org
7 | openssl rsa -in tls/server.key.org -out tls/server.key
8 | openssl x509 -req -days 3650 -in tls/server.csr -signkey tls/server.key -out tls/server.crt
9 | echo 'converting to pem'
10 | cat tls/server.crt tls/server.key > tls/server.pem
11 | echo 'cert complete'
12 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/create-unknown-vectors.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | #
3 | # Copyright 2015-2016 Carnegie Mellon University
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | import sys
18 | sys.path.append(".")
19 |
20 | import argparse
21 | import numpy as np
22 | import os
23 | import random
24 |
25 | import cv2
26 |
27 | import openface
28 | from openface.data import iterImgs
29 |
30 | fileDir = os.path.dirname(os.path.realpath(__file__))
31 | modelDir = os.path.join(fileDir, '..', 'models')
32 | dlibModelDir = os.path.join(modelDir, 'dlib')
33 | openfaceModelDir = os.path.join(modelDir, 'openface')
34 |
35 | parser = argparse.ArgumentParser()
36 | parser.add_argument('imgDir', type=str, help="Input image directory.")
37 | parser.add_argument('--numImages', type=int, default=1000)
38 | parser.add_argument('--model', type=str, help="TODO",
39 | default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
40 | parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
41 | default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
42 | parser.add_argument('--outputFile', type=str,
43 | help="Output file, stored in numpy serialized format.",
44 | default="./unknown.npy")
45 | parser.add_argument('--imgDim', type=int, help="Default image size.",
46 | default=96)
47 | args = parser.parse_args()
48 |
49 | align = openface.AlignDlib(args.dlibFacePredictor)
50 | net = openface.TorchNeuralNet(args.model, imgDim=args.imgDim, cuda=False)
51 |
52 |
53 | def getRep(imgPath):
54 | bgrImg = cv2.imread(imgPath)
55 | if bgrImg is None:
56 | return None
57 | rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
58 |
59 | bb = align.getLargestFaceBoundingBox(rgbImg)
60 | if bb is None:
61 | return None
62 |
63 | alignedFace = align.align(args.imgDim, rgbImg, bb)
64 | if alignedFace is None:
65 | return None
66 |
67 | rep = net.forward(alignedFace)
68 | return rep
69 |
70 | if __name__ == '__main__':
71 | allImgs = list(iterImgs(args.imgDir))
72 | imgObjs = random.sample(allImgs, args.numImages)
73 |
74 | reps = []
75 | for imgObj in imgObjs:
76 | rep = getRep(imgObj.path)
77 |
78 | if rep is not None:
79 | reps.append(rep)
80 |
81 | np.save(args.outputFile, np.row_stack(reps))
82 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/css/main.css:
--------------------------------------------------------------------------------
1 | body {
2 | padding: 10px;
3 | height: 100%;
4 | font-family: Georgia, "Times New Roman", Times, serif;
5 | color: #555;
6 | }
7 |
8 | #videoel {
9 | -o-transform : scaleX(-1);
10 | -webkit-transform : scaleX(-1);
11 | transform : scaleX(-1);
12 | -ms-filter : fliph; /*IE*/
13 | filter : fliph; /*IE*/
14 | }
15 |
16 | h1, .h1, h2, .h2, h3, .h3, {
17 | margin-top: 0;
18 | font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
19 | font-weight: normal;
20 | color: #333;
21 | }
22 |
23 | @media (min-width: 1200px) {
24 | .container {
25 | width: 970px;
26 | }
27 | }
28 |
29 | .addPersonDiv{
30 | max-width: 330px;
31 | }
32 |
33 | img {
34 | padding: 0;
35 | margin: 0;
36 | }
37 |
38 | .container .header {
39 | text-align: center;
40 | top: 0;
41 | bottom: 330px;
42 | position: absolute;
43 | right: 0;
44 | left: 0;
45 | margin-left: auto;
46 | margin-right: auto;
47 | margin-bottom: 10px;
48 | width: 80%;
49 | height: 310px;
50 | /* overflow-y: scroll; */
51 | }
52 |
53 | .container .content {
54 | top: 350px;
55 | bottom: 0;
56 | position: absolute;
57 | right: 0;
58 | left: 0;
59 | margin: auto;
60 | width: 80%;
61 | height: auto;
62 | overflow-y: scroll;
63 | }
64 |
65 | a.remove {
66 | color: red;
67 | font-size: 2em;
68 | }
69 |
70 | ul.tabs{
71 | margin: 0px;
72 | padding: 0px;
73 | list-style: none;
74 | }
75 | ul.tabs li{
76 | background: none;
77 | display: inline-block;
78 | padding: 10px 15px;
79 | cursor: pointer;
80 | }
81 |
82 | ul.tabs li.current{
83 | background: #cceeff;
84 | color: #222;
85 | }
86 |
87 | .tab-content{
88 | display: none;
89 | /* background: #ededed; */
90 | /* padding: 15px; */
91 | }
92 |
93 | .tab-content.current{
94 | display: inherit;
95 | }
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/examples.md:
--------------------------------------------------------------------------------
1 | + https://github.com/auduno/clmtrackr
2 | + http://inspirit.github.io/jsfeat/sample_bbf_face.html
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/favicon.ico:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/images/FacerecWebDemo.ai:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/images/FacerecWebDemo.ai
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/install-deps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -x -e
4 |
5 | sudo apt-get update
6 | sudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev \
7 | libopencv-dev libhdf5-serial-dev libboost-all-dev libgflags-dev \
8 | libgoogle-glog-dev liblmdb-dev protobuf-compiler libboost-all-dev \
9 | libatlas-dev libatlas-base-dev liblapack-dev libblas-dev \
10 | libssl-dev libffi-dev python-pip python-numpy python-imaging \
11 | python-openssl python-opencv \
12 | git wget cmake gfortran
13 |
14 | mkdir -p ~/src
15 | cd ~/src
16 | git clone https://github.com/bvlc/caffe.git
17 | wget https://github.com/davisking/dlib/releases/download/v18.16/dlib-18.16.tar.bz2
18 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/js/utils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Provides requestAnimationFrame in a cross browser way.
3 | */
4 | window.requestAnimFrame = (function() {
5 | return window.requestAnimationFrame ||
6 | window.webkitRequestAnimationFrame ||
7 | window.mozRequestAnimationFrame ||
8 | window.oRequestAnimationFrame ||
9 | window.msRequestAnimationFrame ||
10 | function(/* function FrameRequestCallback */ callback, /* DOMElement Element */ element) {
11 | return window.setTimeout(callback, 1000/60);
12 | };
13 | })();
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/requirements.txt:
--------------------------------------------------------------------------------
1 | autobahn == 0.10.4
2 | imagehash == 1.0
3 | twisted == 15.2.1
4 | scipy >= 0.13, < 0.17
5 | scikit-learn >= 0.17, < 0.18
6 | protobuf >= 2.5, < 2.7
7 | appdirs >= 1.4.3
8 | pyOpenSSL >= 17.0.0
9 | cryptography >= 1.8.1
10 | service-identity >= 16.0.0
11 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/simpleSSLServer.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import BaseHTTPServer
3 | import SimpleHTTPServer
4 | import ssl
5 | import sys
6 |
7 |
8 | '''Adopted from https://www.piware.de/2011/01/creating-an-https-server-in-python/'''
9 |
10 |
11 | def main(port):
12 | httpd = BaseHTTPServer.HTTPServer(('0.0.0.0', port), SimpleHTTPServer.SimpleHTTPRequestHandler)
13 | httpd.socket = ssl.wrap_socket(httpd.socket, certfile='tls/server.pem', server_side=True)
14 | print('now serving tls http on port:', port)
15 | httpd.serve_forever()
16 |
17 | if __name__ == '__main__':
18 | main(int(sys.argv[1]))
19 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/start-servers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -u
4 |
5 | function die { echo $1; exit 42; }
6 |
7 | HTTP_PORT=8000
8 | WEBSOCKET_PORT=9000
9 |
10 | case $# in
11 | 0) ;;
12 | 1) HTTP_PORT=$1
13 | ;;
14 | 2) WEBSOCKET_PORT=$2
15 | ;;
16 | *) die "Usage: $0 "
17 | ;;
18 | esac
19 |
20 | cd $(dirname $0)
21 | trap 'kill $(jobs -p)' EXIT
22 |
23 | cat <:$HTTP_PORT.
32 | If you're running on a remote computer, find the IP address
33 | and use https://:$HTTP_PORT.
34 |
35 | WARNING: Chromium will warn on self-signed certificates. Please accept the certificate
36 | and reload the app.
37 |
38 | EOF
39 |
40 | WEBSOCKET_LOG='/tmp/openface.websocket.log'
41 | printf "WebSocket Server: Logging to '%s'\n\n" $WEBSOCKET_LOG
42 |
43 | python2 simpleSSLServer.py $HTTP_PORT &> /dev/null &
44 |
45 | cd ../../ # Root OpenFace directory.
46 | ./demos/web/websocket-server.py --port $WEBSOCKET_PORT 2>&1 | tee $WEBSOCKET_LOG &
47 |
48 | wait
49 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/tls/server.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICATCCAWoCCQC0Yl1TUb3gjzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
3 | VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
4 | cyBQdHkgTHRkMB4XDTE3MDQzMDE2MDIxNFoXDTI3MDQyODE2MDIxNFowRTELMAkG
5 | A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
6 | IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEApnSL
7 | fpgnKHKCNypUxedbBMik02B40zlK5jQUqAt8ityNYM4DxZV2pOwS4RLfgDrWfLKV
8 | kOiBL+2iJmNWtc8fcU/4MnhAUCgYXvl+o3yFu8EVOLU+FXhlqJRAJOpqESMVa+II
9 | haXDSuLLnSA0e/UrxhDmWEiTGAkteWPLyEP7G6kCAwEAATANBgkqhkiG9w0BAQsF
10 | AAOBgQAwYgs2CrrCoknDs2p2bS/sEBc/cAWxlB3VA0yQXTAxh+6rLOYLwoF+z92w
11 | IbUhUkZss1r0k7zZDBZ32ZEB6Hc0+q4r599UVV3gF/2Ongc6rvtzJtRAv5EZza0d
12 | l3aaZ0aPu09XuDqv9cb/g+i/L7RgQgoEiEpK60WoTm9FeJ4Fpw==
13 | -----END CERTIFICATE-----
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/tls/server.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICWwIBAAKBgQCmdIt+mCcocoI3KlTF51sEyKTTYHjTOUrmNBSoC3yK3I1gzgPF
3 | lXak7BLhEt+AOtZ8spWQ6IEv7aImY1a1zx9xT/gyeEBQKBhe+X6jfIW7wRU4tT4V
4 | eGWolEAk6moRIxVr4giFpcNK4sudIDR79SvGEOZYSJMYCS15Y8vIQ/sbqQIDAQAB
5 | AoGAAM7D9oNKfVnA4/+ilas/t9A5bIUlUPEQOfm6t+4GVq4nSXb2cbj98GLs3Ia4
6 | 6uheLhC3xRI7vj3K8aC9xPgSUPpvdqEfef+SlfC7/lcHdtIfz1Fm2qtGdUERw2TC
7 | Iy1ttU58sDLK5dy1Igx9SeIPGMHCWemDw4CA0HVaplCIrPkCQQDRWl0HouUquzNd
8 | 7i6kk2uNKvj0Hdft5tGNdSk9diJU2d5kLravwXKxq9cFkoZ5g8bgxjGrdnguNO4y
9 | bcv/fN0LAkEAy4tED+0Etg0PLIXuYpHUjy5SGYpykaNx+Rfktv2lF5Uf2aDnh6Pv
10 | DObQEYF1NAZVcT8BsLGKta9RGFL7UJOSmwJAS3fgu2T8abgMH1tCUy+VgNEx54Zu
11 | laM0fWLz1+UjISVc5w5z6s24k9XXcHnOojVf1x17QE03q6iHCYTNGi+f2wJAXgfk
12 | VYclmgTGcccdraO5ErxPaUUwUF+1k2GaY38h+ZcGs79Ftr/g+5DVpoCr6HDUoBB/
13 | c2VRs0VerWIIf9zs6QJAI0M7qCsyLw9z3wfMt8uZjGLokeSet9+LarJyRFkDVFow
14 | PBHMPvgU1+no5L+4A61cB9azn9zkIvchI2bSG0Ubgg==
15 | -----END RSA PRIVATE KEY-----
16 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/tls/server.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICATCCAWoCCQC0Yl1TUb3gjzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJB
3 | VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
4 | cyBQdHkgTHRkMB4XDTE3MDQzMDE2MDIxNFoXDTI3MDQyODE2MDIxNFowRTELMAkG
5 | A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
6 | IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEApnSL
7 | fpgnKHKCNypUxedbBMik02B40zlK5jQUqAt8ityNYM4DxZV2pOwS4RLfgDrWfLKV
8 | kOiBL+2iJmNWtc8fcU/4MnhAUCgYXvl+o3yFu8EVOLU+FXhlqJRAJOpqESMVa+II
9 | haXDSuLLnSA0e/UrxhDmWEiTGAkteWPLyEP7G6kCAwEAATANBgkqhkiG9w0BAQsF
10 | AAOBgQAwYgs2CrrCoknDs2p2bS/sEBc/cAWxlB3VA0yQXTAxh+6rLOYLwoF+z92w
11 | IbUhUkZss1r0k7zZDBZ32ZEB6Hc0+q4r599UVV3gF/2Ongc6rvtzJtRAv5EZza0d
12 | l3aaZ0aPu09XuDqv9cb/g+i/L7RgQgoEiEpK60WoTm9FeJ4Fpw==
13 | -----END CERTIFICATE-----
14 | -----BEGIN RSA PRIVATE KEY-----
15 | MIICWwIBAAKBgQCmdIt+mCcocoI3KlTF51sEyKTTYHjTOUrmNBSoC3yK3I1gzgPF
16 | lXak7BLhEt+AOtZ8spWQ6IEv7aImY1a1zx9xT/gyeEBQKBhe+X6jfIW7wRU4tT4V
17 | eGWolEAk6moRIxVr4giFpcNK4sudIDR79SvGEOZYSJMYCS15Y8vIQ/sbqQIDAQAB
18 | AoGAAM7D9oNKfVnA4/+ilas/t9A5bIUlUPEQOfm6t+4GVq4nSXb2cbj98GLs3Ia4
19 | 6uheLhC3xRI7vj3K8aC9xPgSUPpvdqEfef+SlfC7/lcHdtIfz1Fm2qtGdUERw2TC
20 | Iy1ttU58sDLK5dy1Igx9SeIPGMHCWemDw4CA0HVaplCIrPkCQQDRWl0HouUquzNd
21 | 7i6kk2uNKvj0Hdft5tGNdSk9diJU2d5kLravwXKxq9cFkoZ5g8bgxjGrdnguNO4y
22 | bcv/fN0LAkEAy4tED+0Etg0PLIXuYpHUjy5SGYpykaNx+Rfktv2lF5Uf2aDnh6Pv
23 | DObQEYF1NAZVcT8BsLGKta9RGFL7UJOSmwJAS3fgu2T8abgMH1tCUy+VgNEx54Zu
24 | laM0fWLz1+UjISVc5w5z6s24k9XXcHnOojVf1x17QE03q6iHCYTNGi+f2wJAXgfk
25 | VYclmgTGcccdraO5ErxPaUUwUF+1k2GaY38h+ZcGs79Ftr/g+5DVpoCr6HDUoBB/
26 | c2VRs0VerWIIf9zs6QJAI0M7qCsyLw9z3wfMt8uZjGLokeSet9+LarJyRFkDVFow
27 | PBHMPvgU1+no5L+4A61cB9azn9zkIvchI2bSG0Ubgg==
28 | -----END RSA PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/update-vendor-deps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -x -e
4 |
5 | bower install
6 |
7 | rm -rf vendor
8 | mkdir -p vendor/{css,fonts,js}
9 |
10 | cp -r bower_components/bootstrap/dist/* vendor
11 | cp -r bower_components/bootstrap-toggle/css/*.min.css* vendor/css
12 | cp -r bower_components/bootstrap-toggle/js/*.min.js* vendor/js
13 | cp -r bower_components/bootstrap3-dialog/dist/* vendor
14 | cp -r bower_components/font-awesome/* vendor
15 |
16 | cp bower_components/jquery/dist/* vendor/js
17 | cp bower_components/handlebars/handlebars.min.js vendor/js
18 | cp bower_components/underscore/underscore-min.js vendor/js
19 |
20 | wget https://raw.githubusercontent.com/jstat/jstat/1.3.0/dist/jstat.min.js -P vendor/js
21 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "font-awesome",
3 | "description": "Font Awesome",
4 | "version": "4.3.0",
5 | "keywords": [],
6 | "homepage": "http://fontawesome.io",
7 | "dependencies": {},
8 | "devDependencies": {},
9 | "license": ["OFL-1.1", "MIT", "CC-BY-3.0"],
10 | "main": [
11 | "./css/font-awesome.css",
12 | "./fonts/*"
13 | ],
14 | "ignore": [
15 | "*/.*",
16 | "*.json",
17 | "src",
18 | "*.yml",
19 | "Gemfile",
20 | "Gemfile.lock",
21 | "*.md"
22 | ]
23 | }
24 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/css/bootstrap-dialog.css:
--------------------------------------------------------------------------------
1 | .bootstrap-dialog {
2 | /* dialog types */
3 | /**
4 | * Icon animation
5 | * Copied from font-awesome: http://fontawesome.io/
6 | **/
7 | /** End of icon animation **/
8 | }
9 | .bootstrap-dialog .modal-header {
10 | border-top-left-radius: 4px;
11 | border-top-right-radius: 4px;
12 | }
13 | .bootstrap-dialog .bootstrap-dialog-title {
14 | color: #fff;
15 | display: inline-block;
16 | font-size: 16px;
17 | }
18 | .bootstrap-dialog .bootstrap-dialog-message {
19 | font-size: 14px;
20 | }
21 | .bootstrap-dialog .bootstrap-dialog-button-icon {
22 | margin-right: 3px;
23 | }
24 | .bootstrap-dialog .bootstrap-dialog-close-button {
25 | font-size: 20px;
26 | float: right;
27 | filter: alpha(opacity=90);
28 | -moz-opacity: 0.9;
29 | -khtml-opacity: 0.9;
30 | opacity: 0.9;
31 | }
32 | .bootstrap-dialog .bootstrap-dialog-close-button:hover {
33 | cursor: pointer;
34 | filter: alpha(opacity=100);
35 | -moz-opacity: 1;
36 | -khtml-opacity: 1;
37 | opacity: 1;
38 | }
39 | .bootstrap-dialog.type-default .modal-header {
40 | background-color: #fff;
41 | }
42 | .bootstrap-dialog.type-default .bootstrap-dialog-title {
43 | color: #333;
44 | }
45 | .bootstrap-dialog.type-info .modal-header {
46 | background-color: #5bc0de;
47 | }
48 | .bootstrap-dialog.type-primary .modal-header {
49 | background-color: #428bca;
50 | }
51 | .bootstrap-dialog.type-success .modal-header {
52 | background-color: #5cb85c;
53 | }
54 | .bootstrap-dialog.type-warning .modal-header {
55 | background-color: #f0ad4e;
56 | }
57 | .bootstrap-dialog.type-danger .modal-header {
58 | background-color: #d9534f;
59 | }
60 | .bootstrap-dialog.size-large .bootstrap-dialog-title {
61 | font-size: 24px;
62 | }
63 | .bootstrap-dialog.size-large .bootstrap-dialog-close-button {
64 | font-size: 30px;
65 | }
66 | .bootstrap-dialog.size-large .bootstrap-dialog-message {
67 | font-size: 18px;
68 | }
69 | .bootstrap-dialog .icon-spin {
70 | display: inline-block;
71 | -moz-animation: spin 2s infinite linear;
72 | -o-animation: spin 2s infinite linear;
73 | -webkit-animation: spin 2s infinite linear;
74 | animation: spin 2s infinite linear;
75 | }
76 | @-moz-keyframes spin {
77 | 0% {
78 | -moz-transform: rotate(0deg);
79 | }
80 | 100% {
81 | -moz-transform: rotate(359deg);
82 | }
83 | }
84 | @-webkit-keyframes spin {
85 | 0% {
86 | -webkit-transform: rotate(0deg);
87 | }
88 | 100% {
89 | -webkit-transform: rotate(359deg);
90 | }
91 | }
92 | @-o-keyframes spin {
93 | 0% {
94 | -o-transform: rotate(0deg);
95 | }
96 | 100% {
97 | -o-transform: rotate(359deg);
98 | }
99 | }
100 | @-ms-keyframes spin {
101 | 0% {
102 | -ms-transform: rotate(0deg);
103 | }
104 | 100% {
105 | -ms-transform: rotate(359deg);
106 | }
107 | }
108 | @keyframes spin {
109 | 0% {
110 | transform: rotate(0deg);
111 | }
112 | 100% {
113 | transform: rotate(359deg);
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/css/bootstrap-dialog.min.css:
--------------------------------------------------------------------------------
1 | .bootstrap-dialog .modal-header{border-top-left-radius:4px;border-top-right-radius:4px}.bootstrap-dialog .bootstrap-dialog-title{color:#fff;display:inline-block;font-size:16px}.bootstrap-dialog .bootstrap-dialog-message{font-size:14px}.bootstrap-dialog .bootstrap-dialog-button-icon{margin-right:3px}.bootstrap-dialog .bootstrap-dialog-close-button{font-size:20px;float:right;filter:alpha(opacity=90);-moz-opacity:.9;-khtml-opacity:.9;opacity:.9}.bootstrap-dialog .bootstrap-dialog-close-button:hover{cursor:pointer;filter:alpha(opacity=100);-moz-opacity:1;-khtml-opacity:1;opacity:1}.bootstrap-dialog.type-default .modal-header{background-color:#fff}.bootstrap-dialog.type-default .bootstrap-dialog-title{color:#333}.bootstrap-dialog.type-info .modal-header{background-color:#5bc0de}.bootstrap-dialog.type-primary .modal-header{background-color:#428bca}.bootstrap-dialog.type-success .modal-header{background-color:#5cb85c}.bootstrap-dialog.type-warning .modal-header{background-color:#f0ad4e}.bootstrap-dialog.type-danger .modal-header{background-color:#d9534f}.bootstrap-dialog.size-large .bootstrap-dialog-title{font-size:24px}.bootstrap-dialog.size-large .bootstrap-dialog-close-button{font-size:30px}.bootstrap-dialog.size-large .bootstrap-dialog-message{font-size:18px}.bootstrap-dialog .icon-spin{display:inline-block;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;-webkit-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@-ms-keyframes spin{0%{-ms-transform:rotate(0deg)}100%{-ms-transform:rotate(359deg)}}@keyframes spin{0%{transform:rotate(0deg)}100%{transform:rotate(359deg)}}
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/css/bootstrap-toggle.min.css:
--------------------------------------------------------------------------------
1 | /*! ========================================================================
2 | * Bootstrap Toggle: bootstrap-toggle.css v2.2.0
3 | * http://www.bootstraptoggle.com
4 | * ========================================================================
5 | * Copyright 2014 Min Hur, The New York Times Company
6 | * Licensed under MIT
7 | * ======================================================================== */
8 | .checkbox label .toggle,.checkbox-inline .toggle{margin-left:-20px;margin-right:5px}
9 | .toggle{position:relative;overflow:hidden}
10 | .toggle input[type=checkbox]{display:none}
11 | .toggle-group{position:absolute;width:200%;top:0;bottom:0;left:0;transition:left .35s;-webkit-transition:left .35s;-moz-user-select:none;-webkit-user-select:none}
12 | .toggle.off .toggle-group{left:-100%}
13 | .toggle-on{position:absolute;top:0;bottom:0;left:0;right:50%;margin:0;border:0;border-radius:0}
14 | .toggle-off{position:absolute;top:0;bottom:0;left:50%;right:0;margin:0;border:0;border-radius:0}
15 | .toggle-handle{position:relative;margin:0 auto;padding-top:0;padding-bottom:0;height:100%;width:0;border-width:0 1px}
16 | .toggle.btn{min-width:59px;min-height:34px}
17 | .toggle-on.btn{padding-right:24px}
18 | .toggle-off.btn{padding-left:24px}
19 | .toggle.btn-lg{min-width:79px;min-height:45px}
20 | .toggle-on.btn-lg{padding-right:31px}
21 | .toggle-off.btn-lg{padding-left:31px}
22 | .toggle-handle.btn-lg{width:40px}
23 | .toggle.btn-sm{min-width:50px;min-height:30px}
24 | .toggle-on.btn-sm{padding-right:20px}
25 | .toggle-off.btn-sm{padding-left:20px}
26 | .toggle.btn-xs{min-width:35px;min-height:22px}
27 | .toggle-on.btn-xs{padding-right:12px}
28 | .toggle-off.btn-xs{padding-left:12px}
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/css/bootstrap2-toggle.min.css:
--------------------------------------------------------------------------------
1 | /*! ========================================================================
2 | * Bootstrap Toggle: bootstrap2-toggle.css v2.2.0
3 | * http://www.bootstraptoggle.com
4 | * ========================================================================
5 | * Copyright 2014 Min Hur, The New York Times Company
6 | * Licensed under MIT
7 | * ======================================================================== */
8 | label.checkbox .toggle,label.checkbox.inline .toggle{margin-left:-20px;margin-right:5px}
9 | .toggle{min-width:40px;height:20px;position:relative;overflow:hidden}
10 | .toggle input[type=checkbox]{display:none}
11 | .toggle-group{position:absolute;width:200%;top:0;bottom:0;left:0;transition:left .35s;-webkit-transition:left .35s;-moz-user-select:none;-webkit-user-select:none}
12 | .toggle.off .toggle-group{left:-100%}
13 | .toggle-on{position:absolute;top:0;bottom:0;left:0;right:50%;margin:0;border:0;border-radius:0}
14 | .toggle-off{position:absolute;top:0;bottom:0;left:50%;right:0;margin:0;border:0;border-radius:0}
15 | .toggle-handle{position:relative;margin:0 auto;padding-top:0;padding-bottom:0;height:100%;width:0;border-width:0 1px}
16 | .toggle-handle.btn-mini{top:-1px}
17 | .toggle.btn{min-width:30px}
18 | .toggle-on.btn{padding-right:24px}
19 | .toggle-off.btn{padding-left:24px}
20 | .toggle.btn-large{min-width:40px}
21 | .toggle-on.btn-large{padding-right:35px}
22 | .toggle-off.btn-large{padding-left:35px}
23 | .toggle.btn-small{min-width:25px}
24 | .toggle-on.btn-small{padding-right:20px}
25 | .toggle-off.btn-small{padding-left:20px}
26 | .toggle.btn-mini{min-width:20px}
27 | .toggle-on.btn-mini{padding-right:12px}
28 | .toggle-off.btn-mini{padding-left:12px}
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/FontAwesome.otf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/FontAwesome.otf
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.eot
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.ttf
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.woff
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/fontawesome-webfont.woff2
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.eot
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.ttf
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.woff
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/demos/web/vendor/fonts/glyphicons-halflings-regular.woff2
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/js/npm.js:
--------------------------------------------------------------------------------
1 | // This file is autogenerated via the `commonjs` Grunt task. You can require() this file in a CommonJS environment.
2 | require('../../js/transition.js')
3 | require('../../js/alert.js')
4 | require('../../js/button.js')
5 | require('../../js/carousel.js')
6 | require('../../js/collapse.js')
7 | require('../../js/dropdown.js')
8 | require('../../js/modal.js')
9 | require('../../js/tooltip.js')
10 | require('../../js/popover.js')
11 | require('../../js/scrollspy.js')
12 | require('../../js/tab.js')
13 | require('../../js/affix.js')
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/js/ping.min.js:
--------------------------------------------------------------------------------
1 | /*
2 | * ping.js - v0.0.1
3 | * Ping Utilities in Javascript
4 | * http://github.com/alfg/ping.js
5 | *
6 | * Made by Alfred Gutierrez
7 | * Under MIT License
8 | */
9 | var Ping=function(){this._version="0.0.1"};Ping.prototype.ping=function(a,b){this.img=new Image;var c=new Date,d=0;this.img.onload=function(){e()},this.img.onerror=function(){e()};var e=function(){var a=new Date-c;d=a,console.log(a),"function"==typeof b&&b(a)};this.img.src="//"+a+"/?"+(new Date).getTime()};
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/animated.less:
--------------------------------------------------------------------------------
1 | // Animated Icons
2 | // --------------------------
3 |
4 | .@{fa-css-prefix}-spin {
5 | -webkit-animation: fa-spin 2s infinite linear;
6 | animation: fa-spin 2s infinite linear;
7 | }
8 |
9 | .@{fa-css-prefix}-pulse {
10 | -webkit-animation: fa-spin 1s infinite steps(8);
11 | animation: fa-spin 1s infinite steps(8);
12 | }
13 |
14 | @-webkit-keyframes fa-spin {
15 | 0% {
16 | -webkit-transform: rotate(0deg);
17 | transform: rotate(0deg);
18 | }
19 | 100% {
20 | -webkit-transform: rotate(359deg);
21 | transform: rotate(359deg);
22 | }
23 | }
24 |
25 | @keyframes fa-spin {
26 | 0% {
27 | -webkit-transform: rotate(0deg);
28 | transform: rotate(0deg);
29 | }
30 | 100% {
31 | -webkit-transform: rotate(359deg);
32 | transform: rotate(359deg);
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/bordered-pulled.less:
--------------------------------------------------------------------------------
1 | // Bordered & Pulled
2 | // -------------------------
3 |
4 | .@{fa-css-prefix}-border {
5 | padding: .2em .25em .15em;
6 | border: solid .08em @fa-border-color;
7 | border-radius: .1em;
8 | }
9 |
10 | .pull-right { float: right; }
11 | .pull-left { float: left; }
12 |
13 | .@{fa-css-prefix} {
14 | &.pull-left { margin-right: .3em; }
15 | &.pull-right { margin-left: .3em; }
16 | }
17 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/core.less:
--------------------------------------------------------------------------------
1 | // Base Class Definition
2 | // -------------------------
3 |
4 | .@{fa-css-prefix} {
5 | display: inline-block;
6 | font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration
7 | font-size: inherit; // can't have font-size inherit on line above, so need to override
8 | text-rendering: auto; // optimizelegibility throws things off #1094
9 | -webkit-font-smoothing: antialiased;
10 | -moz-osx-font-smoothing: grayscale;
11 | transform: translate(0, 0); // ensures no half-pixel rendering in firefox
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/fixed-width.less:
--------------------------------------------------------------------------------
1 | // Fixed Width Icons
2 | // -------------------------
3 | .@{fa-css-prefix}-fw {
4 | width: (18em / 14);
5 | text-align: center;
6 | }
7 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/font-awesome.less:
--------------------------------------------------------------------------------
1 | /*!
2 | * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome
3 | * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
4 | */
5 |
6 | @import "variables.less";
7 | @import "mixins.less";
8 | @import "path.less";
9 | @import "core.less";
10 | @import "larger.less";
11 | @import "fixed-width.less";
12 | @import "list.less";
13 | @import "bordered-pulled.less";
14 | @import "animated.less";
15 | @import "rotated-flipped.less";
16 | @import "stacked.less";
17 | @import "icons.less";
18 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/larger.less:
--------------------------------------------------------------------------------
1 | // Icon Sizes
2 | // -------------------------
3 |
4 | /* makes the font 33% larger relative to the icon container */
5 | .@{fa-css-prefix}-lg {
6 | font-size: (4em / 3);
7 | line-height: (3em / 4);
8 | vertical-align: -15%;
9 | }
10 | .@{fa-css-prefix}-2x { font-size: 2em; }
11 | .@{fa-css-prefix}-3x { font-size: 3em; }
12 | .@{fa-css-prefix}-4x { font-size: 4em; }
13 | .@{fa-css-prefix}-5x { font-size: 5em; }
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/list.less:
--------------------------------------------------------------------------------
1 | // List Icons
2 | // -------------------------
3 |
4 | .@{fa-css-prefix}-ul {
5 | padding-left: 0;
6 | margin-left: @fa-li-width;
7 | list-style-type: none;
8 | > li { position: relative; }
9 | }
10 | .@{fa-css-prefix}-li {
11 | position: absolute;
12 | left: -@fa-li-width;
13 | width: @fa-li-width;
14 | top: (2em / 14);
15 | text-align: center;
16 | &.@{fa-css-prefix}-lg {
17 | left: (-@fa-li-width + (4em / 14));
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/mixins.less:
--------------------------------------------------------------------------------
1 | // Mixins
2 | // --------------------------
3 |
4 | .fa-icon() {
5 | display: inline-block;
6 | font: normal normal normal @fa-font-size-base/1 FontAwesome; // shortening font declaration
7 | font-size: inherit; // can't have font-size inherit on line above, so need to override
8 | text-rendering: auto; // optimizelegibility throws things off #1094
9 | -webkit-font-smoothing: antialiased;
10 | -moz-osx-font-smoothing: grayscale;
11 | transform: translate(0, 0); // ensures no half-pixel rendering in firefox
12 |
13 | }
14 |
15 | .fa-icon-rotate(@degrees, @rotation) {
16 | filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation);
17 | -webkit-transform: rotate(@degrees);
18 | -ms-transform: rotate(@degrees);
19 | transform: rotate(@degrees);
20 | }
21 |
22 | .fa-icon-flip(@horiz, @vert, @rotation) {
23 | filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation, mirror=1);
24 | -webkit-transform: scale(@horiz, @vert);
25 | -ms-transform: scale(@horiz, @vert);
26 | transform: scale(@horiz, @vert);
27 | }
28 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/path.less:
--------------------------------------------------------------------------------
1 | /* FONT PATH
2 | * -------------------------- */
3 |
4 | @font-face {
5 | font-family: 'FontAwesome';
6 | src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}');
7 | src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'),
8 | url('@{fa-font-path}/fontawesome-webfont.woff2?v=@{fa-version}') format('woff2'),
9 | url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'),
10 | url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'),
11 | url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg');
12 | // src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
13 | font-weight: normal;
14 | font-style: normal;
15 | }
16 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/rotated-flipped.less:
--------------------------------------------------------------------------------
1 | // Rotated & Flipped Icons
2 | // -------------------------
3 |
4 | .@{fa-css-prefix}-rotate-90 { .fa-icon-rotate(90deg, 1); }
5 | .@{fa-css-prefix}-rotate-180 { .fa-icon-rotate(180deg, 2); }
6 | .@{fa-css-prefix}-rotate-270 { .fa-icon-rotate(270deg, 3); }
7 |
8 | .@{fa-css-prefix}-flip-horizontal { .fa-icon-flip(-1, 1, 0); }
9 | .@{fa-css-prefix}-flip-vertical { .fa-icon-flip(1, -1, 2); }
10 |
11 | // Hook for IE8-9
12 | // -------------------------
13 |
14 | :root .@{fa-css-prefix}-rotate-90,
15 | :root .@{fa-css-prefix}-rotate-180,
16 | :root .@{fa-css-prefix}-rotate-270,
17 | :root .@{fa-css-prefix}-flip-horizontal,
18 | :root .@{fa-css-prefix}-flip-vertical {
19 | filter: none;
20 | }
21 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/less/stacked.less:
--------------------------------------------------------------------------------
1 | // Stacked Icons
2 | // -------------------------
3 |
4 | .@{fa-css-prefix}-stack {
5 | position: relative;
6 | display: inline-block;
7 | width: 2em;
8 | height: 2em;
9 | line-height: 2em;
10 | vertical-align: middle;
11 | }
12 | .@{fa-css-prefix}-stack-1x, .@{fa-css-prefix}-stack-2x {
13 | position: absolute;
14 | left: 0;
15 | width: 100%;
16 | text-align: center;
17 | }
18 | .@{fa-css-prefix}-stack-1x { line-height: inherit; }
19 | .@{fa-css-prefix}-stack-2x { font-size: 2em; }
20 | .@{fa-css-prefix}-inverse { color: @fa-inverse; }
21 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_animated.scss:
--------------------------------------------------------------------------------
1 | // Spinning Icons
2 | // --------------------------
3 |
4 | .#{$fa-css-prefix}-spin {
5 | -webkit-animation: fa-spin 2s infinite linear;
6 | animation: fa-spin 2s infinite linear;
7 | }
8 |
9 | .#{$fa-css-prefix}-pulse {
10 | -webkit-animation: fa-spin 1s infinite steps(8);
11 | animation: fa-spin 1s infinite steps(8);
12 | }
13 |
14 | @-webkit-keyframes fa-spin {
15 | 0% {
16 | -webkit-transform: rotate(0deg);
17 | transform: rotate(0deg);
18 | }
19 | 100% {
20 | -webkit-transform: rotate(359deg);
21 | transform: rotate(359deg);
22 | }
23 | }
24 |
25 | @keyframes fa-spin {
26 | 0% {
27 | -webkit-transform: rotate(0deg);
28 | transform: rotate(0deg);
29 | }
30 | 100% {
31 | -webkit-transform: rotate(359deg);
32 | transform: rotate(359deg);
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_bordered-pulled.scss:
--------------------------------------------------------------------------------
1 | // Bordered & Pulled
2 | // -------------------------
3 |
4 | .#{$fa-css-prefix}-border {
5 | padding: .2em .25em .15em;
6 | border: solid .08em $fa-border-color;
7 | border-radius: .1em;
8 | }
9 |
10 | .pull-right { float: right; }
11 | .pull-left { float: left; }
12 |
13 | .#{$fa-css-prefix} {
14 | &.pull-left { margin-right: .3em; }
15 | &.pull-right { margin-left: .3em; }
16 | }
17 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_core.scss:
--------------------------------------------------------------------------------
1 | // Base Class Definition
2 | // -------------------------
3 |
4 | .#{$fa-css-prefix} {
5 | display: inline-block;
6 | font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration
7 | font-size: inherit; // can't have font-size inherit on line above, so need to override
8 | text-rendering: auto; // optimizelegibility throws things off #1094
9 | -webkit-font-smoothing: antialiased;
10 | -moz-osx-font-smoothing: grayscale;
11 | transform: translate(0, 0); // ensures no half-pixel rendering in firefox
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_fixed-width.scss:
--------------------------------------------------------------------------------
1 | // Fixed Width Icons
2 | // -------------------------
3 | .#{$fa-css-prefix}-fw {
4 | width: (18em / 14);
5 | text-align: center;
6 | }
7 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_larger.scss:
--------------------------------------------------------------------------------
1 | // Icon Sizes
2 | // -------------------------
3 |
4 | /* makes the font 33% larger relative to the icon container */
5 | .#{$fa-css-prefix}-lg {
6 | font-size: (4em / 3);
7 | line-height: (3em / 4);
8 | vertical-align: -15%;
9 | }
10 | .#{$fa-css-prefix}-2x { font-size: 2em; }
11 | .#{$fa-css-prefix}-3x { font-size: 3em; }
12 | .#{$fa-css-prefix}-4x { font-size: 4em; }
13 | .#{$fa-css-prefix}-5x { font-size: 5em; }
14 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_list.scss:
--------------------------------------------------------------------------------
1 | // List Icons
2 | // -------------------------
3 |
4 | .#{$fa-css-prefix}-ul {
5 | padding-left: 0;
6 | margin-left: $fa-li-width;
7 | list-style-type: none;
8 | > li { position: relative; }
9 | }
10 | .#{$fa-css-prefix}-li {
11 | position: absolute;
12 | left: -$fa-li-width;
13 | width: $fa-li-width;
14 | top: (2em / 14);
15 | text-align: center;
16 | &.#{$fa-css-prefix}-lg {
17 | left: -$fa-li-width + (4em / 14);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_mixins.scss:
--------------------------------------------------------------------------------
1 | // Mixins
2 | // --------------------------
3 |
4 | @mixin fa-icon() {
5 | display: inline-block;
6 | font: normal normal normal #{$fa-font-size-base}/1 FontAwesome; // shortening font declaration
7 | font-size: inherit; // can't have font-size inherit on line above, so need to override
8 | text-rendering: auto; // optimizelegibility throws things off #1094
9 | -webkit-font-smoothing: antialiased;
10 | -moz-osx-font-smoothing: grayscale;
11 | transform: translate(0, 0); // ensures no half-pixel rendering in firefox
12 |
13 | }
14 |
15 | @mixin fa-icon-rotate($degrees, $rotation) {
16 | filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});
17 | -webkit-transform: rotate($degrees);
18 | -ms-transform: rotate($degrees);
19 | transform: rotate($degrees);
20 | }
21 |
22 | @mixin fa-icon-flip($horiz, $vert, $rotation) {
23 | filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});
24 | -webkit-transform: scale($horiz, $vert);
25 | -ms-transform: scale($horiz, $vert);
26 | transform: scale($horiz, $vert);
27 | }
28 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_path.scss:
--------------------------------------------------------------------------------
1 | /* FONT PATH
2 | * -------------------------- */
3 |
4 | @font-face {
5 | font-family: 'FontAwesome';
6 | src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}');
7 | src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'),
8 | url('#{$fa-font-path}/fontawesome-webfont.woff2?v=#{$fa-version}') format('woff2'),
9 | url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'),
10 | url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'),
11 | url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg');
12 | // src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
13 | font-weight: normal;
14 | font-style: normal;
15 | }
16 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_rotated-flipped.scss:
--------------------------------------------------------------------------------
1 | // Rotated & Flipped Icons
2 | // -------------------------
3 |
4 | .#{$fa-css-prefix}-rotate-90 { @include fa-icon-rotate(90deg, 1); }
5 | .#{$fa-css-prefix}-rotate-180 { @include fa-icon-rotate(180deg, 2); }
6 | .#{$fa-css-prefix}-rotate-270 { @include fa-icon-rotate(270deg, 3); }
7 |
8 | .#{$fa-css-prefix}-flip-horizontal { @include fa-icon-flip(-1, 1, 0); }
9 | .#{$fa-css-prefix}-flip-vertical { @include fa-icon-flip(1, -1, 2); }
10 |
11 | // Hook for IE8-9
12 | // -------------------------
13 |
14 | :root .#{$fa-css-prefix}-rotate-90,
15 | :root .#{$fa-css-prefix}-rotate-180,
16 | :root .#{$fa-css-prefix}-rotate-270,
17 | :root .#{$fa-css-prefix}-flip-horizontal,
18 | :root .#{$fa-css-prefix}-flip-vertical {
19 | filter: none;
20 | }
21 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/_stacked.scss:
--------------------------------------------------------------------------------
1 | // Stacked Icons
2 | // -------------------------
3 |
4 | .#{$fa-css-prefix}-stack {
5 | position: relative;
6 | display: inline-block;
7 | width: 2em;
8 | height: 2em;
9 | line-height: 2em;
10 | vertical-align: middle;
11 | }
12 | .#{$fa-css-prefix}-stack-1x, .#{$fa-css-prefix}-stack-2x {
13 | position: absolute;
14 | left: 0;
15 | width: 100%;
16 | text-align: center;
17 | }
18 | .#{$fa-css-prefix}-stack-1x { line-height: inherit; }
19 | .#{$fa-css-prefix}-stack-2x { font-size: 2em; }
20 | .#{$fa-css-prefix}-inverse { color: $fa-inverse; }
21 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/demos/web/vendor/scss/font-awesome.scss:
--------------------------------------------------------------------------------
1 | /*!
2 | * Font Awesome 4.3.0 by @davegandy - http://fontawesome.io - @fontawesome
3 | * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
4 | */
5 |
6 | @import "variables";
7 | @import "mixins";
8 | @import "path";
9 | @import "core";
10 | @import "larger";
11 | @import "fixed-width";
12 | @import "list";
13 | @import "bordered-pulled";
14 | @import "animated";
15 | @import "rotated-flipped";
16 | @import "stacked";
17 | @import "icons";
18 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/css/extra.css:
--------------------------------------------------------------------------------
1 | div.col-md-9 h1:first-of-type {
2 | text-align: center;
3 | font-size: 60px;
4 | font-weight: 300;
5 | }
6 |
7 | /* div.col-md-9 p:first-of-type { */
8 | /* text-align: center; */
9 | /* } */
10 |
11 | /* div.col-md-9 p.admonition-title:first-of-type { */
12 | /* text-align: left; */
13 | /* } */
14 |
15 | /* div.col-md-9 h1:first-of-type .headerlink { */
16 | /* display: none; */
17 | /* } */
18 |
19 | code.no-highlight {
20 | color: black;
21 | }
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/demo-2-comparison.md:
--------------------------------------------------------------------------------
1 | ## Demo 2: Comparing two images
2 | Released by [Brandon Amos](http://bamos.github.io) on 2015-10-13.
3 |
4 | ---
5 |
6 | The [comparison demo](https://github.com/cmusatyalab/openface/blob/master/demos/compare.py) outputs the predicted similarity
7 | score of two faces by computing the squared L2 distance between
8 | their representations.
9 | A lower score indicates two faces are more likely of the same person.
10 | Since the representations are on the unit hypersphere, the
11 | scores range from 0 (the same picture) to 4.0.
12 | The following distances between images of John Lennon and
13 | Eric Clapton were generated with
14 | `./demos/compare.py images/examples/{lennon*,clapton*}`.
15 |
16 | | Lennon 1 | Lennon 2 | Clapton 1 | Clapton 2 |
17 | |---|---|---|---|
18 | |
|
|
|
|
19 |
20 | The following table shows that a distance threshold of `0.99` would
21 | distinguish these two people.
22 | In practice, further experimentation should be done on the distance threshold.
23 | On our LFW experiments, the mean threshold across multiple
24 | experiments is `0.99`,
25 | see [accuracies.txt](https://github.com/cmusatyalab/openface/blob/master/evaluation/lfw.nn4.small2.v1/accuracies.txt).
26 |
27 | | Image 1 | Image 2 | Distance |
28 | |---|---|---|
29 | | Lennon 1 | Lennon 2 | 0.763 |
30 | | Lennon 1 | Clapton 1 | 1.132 |
31 | | Lennon 1 | Clapton 2 | 1.145 |
32 | | Lennon 2 | Clapton 1 | 1.447 |
33 | | Lennon 2 | Clapton 2 | 1.521 |
34 | | Clapton 1 | Clapton 2 | 0.318 |
35 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/js/extra.js:
--------------------------------------------------------------------------------
1 | window.onload = function() {
2 | var snowplowTracker = Snowplow.getTrackerUrl('joule.isr.cs.cmu.edu:8081');
3 | snowplowTracker.enableLinkTracking();
4 | snowplowTracker.trackPageView();
5 | }
6 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/release-notes.md:
--------------------------------------------------------------------------------
1 | # Release Notes
2 |
3 | ## 0.2.1 (2016/01/25)
4 | + Minor bug fixes and improved error messages.
5 | + Add a LFW classification experiment and an outlier detection script.
6 |
7 | ## 0.2.0 (2016/01/19)
8 | + See [this blog post](http://bamos.github.io/2016/01/19/openface-0.2.0/)
9 | for an overview and
10 | [the GitHub Milestone](https://github.com/cmusatyalab/openface/milestones/v0.2.0)
11 | for a high-level issue summary.
12 | + Training improvements from resulting in an accuracy increase from **76.1% to 92.9%**,
13 | which are from Bartosz Ludwiczuk's ideas and implementations in
14 | [this mailing list thread](https://groups.google.com/forum/#!topic/cmu-openface/dcPh883T1rk).
15 | These improvements also reduce the training time from a week to a day.
16 | + Nearly halved execution time thanks to [Hervé Bredin's](http://herve.niderb.fr/)
17 | suggestions and sample code for image alignment in
18 | [Issue 50](https://github.com/cmusatyalab/openface/issues/50).
19 | + Hosted
20 | [Python API Documentation](http://openface-api.readthedocs.org/en/latest/index.html).
21 | + [Docker automated build](https://hub.docker.com/r/bamos/openface) online.
22 | + Initial automatic tests written in [tests](https://github.com/cmusatyalab/openface/tree/0.2.0/tests).
23 | + [Tests successfully passing](https://travis-ci.org/cmusatyalab/openface/branches)
24 | in the Docker automated build in Travis.
25 | + Add
26 | [util/profile-pipeline.py](https://github.com/cmusatyalab/openface/tree/0.2.0/util/profile-pipeline.py)
27 | to profile the overall execution time on a single image.
28 |
29 | ## 0.1.1 (2015/10/15)
30 | + Fix debug mode of NaiveDlib alignment.
31 | + Add
32 | [util/prune-dataset.py](https://github.com/cmusatyalab/openface/tree/0.1.1/util/prune-dataset.py)
33 | for dataset processing.
34 | + Correct Docker dependencies.
35 |
36 | ## 0.1.0 (2015/10/13)
37 | + Initial release.
38 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | ## [API Documentation](http://openface-api.readthedocs.org/en/latest/index.html)
4 |
5 | ## Example
6 |
7 | See [the image comparison demo](https://github.com/cmusatyalab/openface/blob/master/demos/compare.py) for a complete example
8 | written in Python using a naive Torch subprocess to process the faces.
9 |
10 | ```Python
11 | import openface
12 |
13 | # `args` are parsed command-line arguments.
14 |
15 | align = openface.AlignDlib(args.dlibFacePredictor)
16 | net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)
17 |
18 | # `img` is a numpy matrix containing the RGB pixels of the image.
19 | bb = align.getLargestFaceBoundingBox(img)
20 | alignedFace = align.align(args.imgDim, img, bb,
21 | landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
22 | rep1 = net.forward(alignedFace)
23 |
24 | # `rep2` obtained similarly.
25 | d = rep1 - rep2
26 | distance = np.dot(d, d)
27 | ```
28 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/docs/visualizations.md:
--------------------------------------------------------------------------------
1 | # Visualizing representations with t-SNE
2 | [t-SNE](http://lvdmaaten.github.io/tsne/) is a dimensionality
3 | reduction technique that can be used to visualize the
4 | 128-dimensional features OpenFace produces.
5 | The following shows the visualization of the three people
6 | in the training and testing dataset with the most images.
7 |
8 | **Training**
9 |
10 | 
11 |
12 | **Testing**
13 |
14 | 
15 |
16 | These can be generated with the following commands from the root
17 | `openface` directory.
18 |
19 | ## 1. Create raw image directory.
20 | Create a directory for a subset of raw images that you want to visualize
21 | with TSNE.
22 | Make images from different
23 | people are in different subdirectories. The names of the labels or
24 | images do not matter, and each person can have a different amount of images.
25 | The images should be formatted as `jpg` or `png` and have
26 | a lowercase extension.
27 |
28 | ```
29 | $ tree data/mydataset-subset/raw
30 | person-1
31 | ├── image-1.jpg
32 | ├── image-2.png
33 | ...
34 | └── image-p.png
35 |
36 | ...
37 |
38 | person-m
39 | ├── image-1.png
40 | ├── image-2.jpg
41 | ...
42 | └── image-q.png
43 | ```
44 |
45 |
46 | ## 2. Preprocess the raw images
47 | Change `8` to however many
48 | separate processes you want to run:
49 | `for N in {1..8}; do ./util/align-dlib.py align outerEyesAndNose --size 96 & done`.
50 |
51 | If failed alignment attempts causes your directory to have too few images,
52 | you can use our utility script
53 | [./util/prune-dataset.py](https://github.com/cmusatyalab/openface/blob/master/util/prune-dataset.py)
54 | to deletes directories with less than a specified number of images.
55 |
56 | ## 3. Generate Representations
57 | `./batch-represent/main.lua -outDir -data `
58 | creates `reps.csv` and `labels.csv` in ``.
59 |
60 | ## 4. Generate TSNE visualization
61 | Generate the t-SNE visualization with
62 | `./util/tsne.py --names ... `,
63 | where `name i` corresponds to label `i` from the
64 | left-most column in `labels.csv`.
65 | This creates `tsne.pdf` in ``.
66 |
67 | # Visualizing layer outputs
68 | Visualizing the output feature maps of each layer
69 | is sometimes helpful to understand what features
70 | the network has learned to extract.
71 | With faces, the locations of the eyes, nose, and
72 | mouth should play an important role.
73 |
74 | [demos/vis-outputs.lua](https://github.com/cmusatyalab/openface/blob/master/demos/vis-outputs.lua)
75 | outputs the feature maps from an aligned image.
76 | The following shows the first 39 filters of the
77 | first convolutional layer on two images
78 | of John Lennon.
79 |
80 | 
81 | 
82 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/dlib-landmark-mean.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/dlib-landmark-mean.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/adams.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/adams.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/carell.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/carell.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/clapton-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/clapton-1.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/clapton-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/clapton-2.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/lennon-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/lennon-1.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/lennon-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/lennon-2.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/examples/longoria-cooper.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/examples/longoria-cooper.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/nn4.v1.conv1.lennon-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/nn4.v1.conv1.lennon-1.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/nn4.v1.conv1.lennon-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/nn4.v1.conv1.lennon-2.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/nn4.v1.loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/nn4.v1.loss.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/performance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/performance.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/data-afterlives-poster.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/data-afterlives-poster.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/demo.gif
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/exhibit-amos.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/exhibit-amos.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/exhibits-all.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/exhibits-all.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/exhibits-nosenzo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/exhibits-nosenzo.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/sphere-demo/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/sphere-demo/screenshot.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/summary.ai:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/summary.ai
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/summary.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/summary.jpg
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/train-tsne.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/train-tsne.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/val-tsne.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/val-tsne.png
--------------------------------------------------------------------------------
/metrics_evaluation/openface/images/youtube-web.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/images/youtube-web.gif
--------------------------------------------------------------------------------
/metrics_evaluation/openface/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: OpenFace
2 | repo_url: https://github.com/cmusatyalab/openface
3 | #theme: readthedocs
4 | copyright: 'Crafted by Brandon Amos at Carnegie Mellon University.'
5 | theme: mkdocs
6 | extra_css:
7 | - css/extra.css
8 | extra_javascript:
9 | - js/sp.js
10 | - js/extra.js
11 | pages:
12 | - Home: index.md
13 | - Demos:
14 | - Demo 1 - Real-time Web: demo-1-web.md
15 | - Demo 2 - Comparison: demo-2-comparison.md
16 | - Demo 3 - Training a Classifier: demo-3-classifier.md
17 | - Demo 4 - Real-time Sphere Visualization: demo-4-sphere.md
18 | - User Guide:
19 | - Usage and API Docs: usage.md
20 | - Setup: setup.md
21 | - FAQ: faq.md
22 | - DNN Models:
23 | - Models and Accuracies: models-and-accuracies.md
24 | - Training a DNN Model: training-new-models.md
25 | - Visualizations: visualizations.md
26 | - Release Notes: release-notes.md
27 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/models/dlib/mean.csv:
--------------------------------------------------------------------------------
1 | 0.0792396913815,0.339223741112
2 | 0.0829219487236,0.456955367943
3 | 0.0967927109165,0.575648016728
4 | 0.122141515615,0.691921601066
5 | 0.168687863544,0.800341263616
6 | 0.239789390707,0.895732504778
7 | 0.325662452515,0.977068762493
8 | 0.422318282013,1.04329000149
9 | 0.531777802068,1.06080371126
10 | 0.641296298053,1.03981924107
11 | 0.738105872266,0.972268833998
12 | 0.824444363295,0.889624082279
13 | 0.894792677532,0.792494155836
14 | 0.939395486253,0.681546643421
15 | 0.96111933829,0.562238253072
16 | 0.970579841181,0.441758925744
17 | 0.971193274221,0.322118743967
18 | 0.163846223133,0.249151738053
19 | 0.21780354657,0.204255863861
20 | 0.291299351124,0.192367318323
21 | 0.367460241458,0.203582210627
22 | 0.4392945113,0.233135599851
23 | 0.586445962425,0.228141644834
24 | 0.660152671635,0.195923841854
25 | 0.737466449096,0.182360984545
26 | 0.813236546239,0.192828009114
27 | 0.8707571886,0.235293377042
28 | 0.51534533827,0.31863546193
29 | 0.516221448289,0.396200446263
30 | 0.517118861835,0.473797687758
31 | 0.51816430343,0.553157797772
32 | 0.433701156035,0.604054457668
33 | 0.475501237769,0.62076344024
34 | 0.520712933176,0.634268222208
35 | 0.565874114041,0.618796581487
36 | 0.607054002672,0.60157671656
37 | 0.252418718401,0.331052263829
38 | 0.298663015648,0.302646354002
39 | 0.355749724218,0.303020650651
40 | 0.403718978315,0.33867711083
41 | 0.352507175597,0.349987615384
42 | 0.296791759886,0.350478978225
43 | 0.631326076346,0.334136672344
44 | 0.679073381078,0.29645404267
45 | 0.73597236153,0.294721285802
46 | 0.782865376271,0.321305281656
47 | 0.740312274764,0.341849376713
48 | 0.68499850091,0.343734332172
49 | 0.353167761422,0.746189164237
50 | 0.414587777921,0.719053835073
51 | 0.477677654595,0.706835892494
52 | 0.522732900812,0.717092275768
53 | 0.569832064287,0.705414478982
54 | 0.635195811927,0.71565572516
55 | 0.69951672331,0.739419187253
56 | 0.639447159575,0.805236879972
57 | 0.576410514055,0.835436670169
58 | 0.525398405766,0.841706377792
59 | 0.47641545769,0.837505914975
60 | 0.41379548902,0.810045601727
61 | 0.380084785646,0.749979603086
62 | 0.477955996282,0.74513234612
63 | 0.523389793327,0.748924302636
64 | 0.571057789237,0.74332894691
65 | 0.672409137852,0.744177032192
66 | 0.572539621444,0.776609286626
67 | 0.5240106503,0.783370783245
68 | 0.477561227414,0.778476346951
69 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/models/dlib/std.csv:
--------------------------------------------------------------------------------
1 | 0.112516272083,0.0966494270875
2 | 0.106931056524,0.0905330955364
3 | 0.107088874308,0.0869577503945
4 | 0.108441760151,0.0853784218469
5 | 0.102666562345,0.0828098630378
6 | 0.0931066068649,0.0781438813598
7 | 0.0862195403558,0.073607785192
8 | 0.0866718169754,0.0716659844416
9 | 0.0887307938159,0.0713501697149
10 | 0.0879516731961,0.0718835713222
11 | 0.0885481894261,0.074610592141
12 | 0.0965382295197,0.0797108371891
13 | 0.107150533642,0.0848365666684
14 | 0.113814897009,0.0872701144436
15 | 0.113250427279,0.0886737755813
16 | 0.112886966543,0.0921730470649
17 | 0.118067943064,0.0981814085873
18 | 0.0634303741581,0.0757569410385
19 | 0.0649729619891,0.0691687008157
20 | 0.0664154905063,0.0619954216813
21 | 0.0685098559834,0.0560473055645
22 | 0.0704879349606,0.050720304179
23 | 0.0726429145037,0.0499952839052
24 | 0.0722418947111,0.0549833852792
25 | 0.0717292304623,0.0610921077411
26 | 0.0713816417095,0.0687054225814
27 | 0.0700553731897,0.0761075895411
28 | 0.0621672499764,0.0447546505805
29 | 0.0650848569336,0.0450294720133
30 | 0.0726856657946,0.0491824459587
31 | 0.0828916552599,0.0557005786862
32 | 0.0524570448913,0.0496627915275
33 | 0.0575094803185,0.0501830904089
34 | 0.0604964569741,0.050652857225
35 | 0.0579067043158,0.049592654737
36 | 0.0533211203675,0.0490343160913
37 | 0.0485486252069,0.0619168011914
38 | 0.0487033558758,0.0576308899473
39 | 0.0469052199165,0.051654192586
40 | 0.0412176324241,0.0461083273343
41 | 0.0425714419973,0.0506936741285
42 | 0.0448132755197,0.0559942438001
43 | 0.0441859012174,0.0455362522792
44 | 0.0509251139579,0.0510215936658
45 | 0.0538077250474,0.0569728270175
46 | 0.0543541563728,0.0612991695378
47 | 0.0502871615944,0.0554205433039
48 | 0.0468101360149,0.050198047634
49 | 0.0565486921015,0.0641600106223
50 | 0.0512134179212,0.0572722850894
51 | 0.0548204403799,0.0545298937778
52 | 0.0562494620852,0.0546160201278
53 | 0.0556065655952,0.0542806062847
54 | 0.0529801595698,0.0568390905343
55 | 0.0592921393119,0.0637878874753
56 | 0.0616377521119,0.0589464007714
57 | 0.06479137104,0.0612782924056
58 | 0.0648496833714,0.0615071879615
59 | 0.0640379519101,0.0613747050009
60 | 0.0599906728879,0.0592181620108
61 | 0.0561131831257,0.0606149250546
62 | 0.0560425487024,0.056085203945
63 | 0.0572358955235,0.0554251594804
64 | 0.0568509695242,0.055765821017
65 | 0.0585893823961,0.0602861158378
66 | 0.059328626624,0.0563893824624
67 | 0.0594441666238,0.0563232987951
68 | 0.0584465247132,0.0562904593331
69 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/models/openface/vgg-face.def.lua:
--------------------------------------------------------------------------------
1 | -- Model: vgg-face.def.lua
2 | -- Description: VGG Face's network:
3 | -- http://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf
4 | --
5 | -- Input size: 3x224x224
6 | -- Number of Parameters from net:getParameters() with embSize=128: 118003648
7 | -- Components: Mostly `nn`
8 | -- Devices: CPU and CUDA
9 | --
10 | -- Brandon Amos
11 | -- 2016-06-08
12 | --
13 | -- Copyright 2016 Carnegie Mellon University
14 | --
15 | -- Licensed under the Apache License, Version 2.0 (the "License");
16 | -- you may not use this file except in compliance with the License.
17 | -- You may obtain a copy of the License at
18 | --
19 | -- http://www.apache.org/licenses/LICENSE-2.0
20 | --
21 | -- Unless required by applicable law or agreed to in writing, software
22 | -- distributed under the License is distributed on an "AS IS" BASIS,
23 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24 | -- See the License for the specific language governing permissions and
25 | -- limitations under the License.
26 |
27 | imgDim = 224
28 |
29 | local conv = nn.SpatialConvolutionMM
30 | local relu = nn.ReLU
31 | local mp = nn.SpatialMaxPooling
32 |
33 | function createModel()
34 | local net = nn.Sequential()
35 |
36 | net:add(conv(3, 64, 3,3, 1,1, 1,1))
37 | net:add(relu(true))
38 | net:add(conv(64, 64, 3,3, 1,1, 1,1))
39 | net:add(relu(true))
40 | net:add(mp(2,2, 2,2))
41 | net:add(conv(64, 128, 3,3, 1,1, 1,1))
42 | net:add(relu(true))
43 | net:add(conv(128, 128, 3,3, 1,1, 1,1))
44 | net:add(relu(true))
45 | net:add(mp(2,2, 2,2))
46 | net:add(conv(128, 256, 3,3, 1,1, 1,1))
47 | net:add(relu(true))
48 | net:add(conv(256, 256, 3,3, 1,1, 1,1))
49 | net:add(relu(true))
50 | net:add(conv(256, 256, 3,3, 1,1, 1,1))
51 | net:add(relu(true))
52 | net:add(mp(2,2, 2,2))
53 | net:add(conv(256, 512, 3,3, 1,1, 1,1))
54 | net:add(relu(true))
55 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
56 | net:add(relu(true))
57 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
58 | net:add(relu(true))
59 | net:add(mp(2,2, 2,2))
60 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
61 | net:add(relu(true))
62 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
63 | net:add(relu(true))
64 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
65 | net:add(relu(true))
66 | net:add(mp(2,2, 2,2))
67 |
68 | -- Validate shape with:
69 | -- net:add(nn.Reshape(25088))
70 |
71 | net:add(nn.View(25088))
72 | net:add(nn.Linear(25088, 4096))
73 | net:add(relu(true))
74 |
75 | net:add(nn.Linear(4096, opt.embSize))
76 | net:add(nn.Normalize(2))
77 |
78 | return net
79 | end
80 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/models/openface/vgg-face.small1.def.lua:
--------------------------------------------------------------------------------
1 | -- Model: vgg-face.small1.def.lua
2 | -- Description: Modified VGG Face network. Smaller and with batch normalization.
3 | -- !! In progress, may change.
4 | -- Input size: 3x96x96
5 | -- Number of Parameters from net:getParameters() with embSize=128: TODO
6 | -- Components: Mostly `nn`
7 | -- Devices: CPU and CUDA
8 | --
9 | -- Brandon Amos
10 | -- 2016-06-08
11 | --
12 | -- Copyright 2016 Carnegie Mellon University
13 | --
14 | -- Licensed under the Apache License, Version 2.0 (the "License");
15 | -- you may not use this file except in compliance with the License.
16 | -- You may obtain a copy of the License at
17 | --
18 | -- http://www.apache.org/licenses/LICENSE-2.0
19 | --
20 | -- Unless required by applicable law or agreed to in writing, software
21 | -- distributed under the License is distributed on an "AS IS" BASIS,
22 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23 | -- See the License for the specific language governing permissions and
24 | -- limitations under the License.
25 |
26 | imgDim = 96
27 |
28 | local conv = nn.SpatialConvolutionMM
29 | local sbn = nn.SpatialBatchNormalization
30 | local relu = nn.ReLU
31 | local mp = nn.SpatialMaxPooling
32 |
33 | function createModel()
34 | local net = nn.Sequential()
35 |
36 | net:add(conv(3, 64, 3,3, 1,1, 1,1))
37 | net:add(sbn(64))
38 | net:add(relu(true))
39 | net:add(conv(64, 64, 3,3, 1,1, 1,1))
40 | net:add(sbn(64))
41 | net:add(relu(true))
42 | net:add(mp(2,2, 2,2))
43 | net:add(conv(64, 128, 3,3, 1,1, 1,1))
44 | net:add(sbn(128))
45 | net:add(relu(true))
46 | net:add(conv(128, 128, 3,3, 1,1, 1,1))
47 | net:add(sbn(128))
48 | net:add(relu(true))
49 | net:add(mp(2,2, 2,2))
50 | net:add(conv(128, 256, 3,3, 1,1, 1,1))
51 | net:add(sbn(256))
52 | net:add(relu(true))
53 | net:add(conv(256, 256, 3,3, 1,1, 1,1))
54 | net:add(sbn(256))
55 | net:add(relu(true))
56 | net:add(conv(256, 256, 3,3, 1,1, 1,1))
57 | net:add(sbn(256))
58 | net:add(relu(true))
59 | net:add(mp(2,2, 2,2))
60 | net:add(conv(256, 512, 3,3, 1,1, 1,1))
61 | net:add(sbn(512))
62 | net:add(relu(true))
63 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
64 | net:add(sbn(512))
65 | net:add(relu(true))
66 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
67 | net:add(sbn(512))
68 | net:add(relu(true))
69 | net:add(mp(2,2, 2,2))
70 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
71 | net:add(sbn(512))
72 | net:add(relu(true))
73 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
74 | net:add(sbn(512))
75 | net:add(relu(true))
76 | net:add(conv(512, 512, 3,3, 1,1, 1,1))
77 | net:add(sbn(512))
78 | net:add(relu(true))
79 | net:add(mp(2,2, 2,2))
80 |
81 | -- Validate shape with:
82 | net:add(nn.Reshape(4608))
83 |
84 | net:add(nn.View(4608))
85 | net:add(nn.Linear(4608, 1024))
86 | net:add(relu(true))
87 |
88 | net:add(nn.Linear(1024, opt.embSize))
89 | net:add(nn.Normalize(2))
90 |
91 | return net
92 | end
93 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/opencv-dlib-torch.Dockerfile:
--------------------------------------------------------------------------------
1 | # Note from Brandon on 2015-01-13:
2 | #
3 | # Always push this from an OSX Docker machine.
4 | #
5 | # If I build this on my Arch Linux desktop it works fine locally,
6 | # but dlib gives an `Illegal Instruction (core dumped)` error in
7 | # dlib.get_frontal_face_detector() when running on OSX in a Docker machine.
8 | # Building in a Docker machine on OSX fixes this issue and the built
9 | # container successfully deploys on my Arch Linux desktop.
10 | #
11 | # Building and pushing:
12 | # docker build -f opencv-dlib-torch.Dockerfile -t opencv-dlib-torch .
13 | # docker tag bamos/ubuntu-opencv-dlib-torch:ubuntu_14.04-opencv_2.4.11-dlib_18.16-torch_2016.03.19
14 | # docker push bamos/ubuntu-opencv-dlib-torch:ubuntu_14.04-opencv_2.4.11-dlib_18.16-torch_2016.03.19
15 |
16 | FROM ubuntu:14.04
17 | MAINTAINER Brandon Amos
18 |
19 | RUN apt-get update && apt-get install -y \
20 | build-essential \
21 | cmake \
22 | curl \
23 | gfortran \
24 | git \
25 | graphicsmagick \
26 | libgraphicsmagick1-dev \
27 | libatlas-dev \
28 | libavcodec-dev \
29 | libavformat-dev \
30 | libboost-all-dev \
31 | libgtk2.0-dev \
32 | libjpeg-dev \
33 | liblapack-dev \
34 | libswscale-dev \
35 | pkg-config \
36 | python-dev \
37 | python-numpy \
38 | python-protobuf\
39 | software-properties-common \
40 | zip \
41 | && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
42 |
43 | RUN curl -s https://raw.githubusercontent.com/torch/ezinstall/master/install-deps | bash -e
44 | RUN git clone https://github.com/torch/distro.git ~/torch --recursive
45 | RUN cd ~/torch && ./install.sh && \
46 | cd install/bin && \
47 | ./luarocks install nn && \
48 | ./luarocks install dpnn && \
49 | ./luarocks install image && \
50 | ./luarocks install optim && \
51 | ./luarocks install csvigo && \
52 | ./luarocks install torchx && \
53 | ./luarocks install tds
54 |
55 | RUN cd ~ && \
56 | mkdir -p ocv-tmp && \
57 | cd ocv-tmp && \
58 | curl -L https://github.com/Itseez/opencv/archive/2.4.11.zip -o ocv.zip && \
59 | unzip ocv.zip && \
60 | cd opencv-2.4.11 && \
61 | mkdir release && \
62 | cd release && \
63 | cmake -D CMAKE_BUILD_TYPE=RELEASE \
64 | -D CMAKE_INSTALL_PREFIX=/usr/local \
65 | -D BUILD_PYTHON_SUPPORT=ON \
66 | .. && \
67 | make -j8 && \
68 | make install && \
69 | rm -rf ~/ocv-tmp
70 |
71 | RUN cd ~ && \
72 | mkdir -p dlib-tmp && \
73 | cd dlib-tmp && \
74 | curl -L \
75 | https://github.com/davisking/dlib/archive/v19.0.tar.gz \
76 | -o dlib.tar.bz2 && \
77 | tar xf dlib.tar.bz2 && \
78 | cd dlib-19.0/python_examples && \
79 | mkdir build && \
80 | cd build && \
81 | cmake ../../tools/python && \
82 | cmake --build . --config Release && \
83 | cp dlib.so /usr/local/lib/python2.7/dist-packages && \
84 | rm -rf ~/dlib-tmp
85 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/openface/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from __future__ import absolute_import
4 |
5 | from .align_dlib import AlignDlib
6 | from .torch_neural_net import TorchNeuralNet
7 |
8 | from . import data
9 | from . import helper
10 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/openface/helper.py:
--------------------------------------------------------------------------------
1 | """OpenFace helper functions."""
2 |
3 | import errno
4 | import os
5 |
6 |
7 | def mkdirP(path):
8 | """
9 | Create a directory and don't error if the path already exists.
10 |
11 | If the directory already exists, don't do anything.
12 |
13 | :param path: The directory to create.
14 | :type path: str
15 | """
16 | assert path is not None
17 |
18 | try:
19 | os.makedirs(path)
20 | except OSError as exc: # Python >2.5
21 | if exc.errno == errno.EEXIST and os.path.isdir(path):
22 | pass
23 | else:
24 | raise
25 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/openface/openface_server.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 | --
3 | -- Copyright 2015-2016 Carnegie Mellon University
4 | --
5 | -- Licensed under the Apache License, Version 2.0 (the "License");
6 | -- you may not use this file except in compliance with the License.
7 | -- You may obtain a copy of the License at
8 | --
9 | -- http://www.apache.org/licenses/LICENSE-2.0
10 | --
11 | -- Unless required by applicable law or agreed to in writing, software
12 | -- distributed under the License is distributed on an "AS IS" BASIS,
13 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | -- See the License for the specific language governing permissions and
15 | -- limitations under the License.
16 |
17 |
18 | require 'torch'
19 | require 'nn'
20 | require 'dpnn'
21 | require 'image'
22 |
23 | io.stdout:setvbuf 'no'
24 | torch.setdefaulttensortype('torch.FloatTensor')
25 |
26 | -- OpenMP-acceleration causes slower performance. Related issues:
27 | -- https://groups.google.com/forum/#!topic/cmu-openface/vqkkDlbfWZw
28 | -- https://github.com/torch/torch7/issues/691
29 | -- https://github.com/torch/image/issues/7
30 | torch.setnumthreads(1)
31 |
32 | local cmd = torch.CmdLine()
33 | cmd:text()
34 | cmd:text('Face recognition server.')
35 | cmd:text()
36 | cmd:text('Options:')
37 |
38 | cmd:option('-model', './models/openface/nn4.v1.t7', 'Path to model.')
39 | cmd:option('-imgDim', 96, 'Image dimension. nn1=224, nn4=96')
40 | cmd:option('-cuda', false)
41 | cmd:text()
42 |
43 | opt = cmd:parse(arg or {})
44 | -- print(opt)
45 |
46 | net = torch.load(opt.model)
47 | net:evaluate()
48 | -- print(net)
49 |
50 | local imgCuda = nil
51 | if opt.cuda then
52 | require 'cutorch'
53 | require 'cunn'
54 | net = net:cuda()
55 | imgCuda = torch.CudaTensor(1, 3, opt.imgDim, opt.imgDim)
56 | end
57 |
58 | local img = torch.Tensor(1, 3, opt.imgDim, opt.imgDim)
59 | while true do
60 | -- Read a path to an image on stdin and output the representation
61 | -- as a CSV.
62 | local imgPath = io.read("*line")
63 | if imgPath and imgPath:len() ~= 0 then
64 | img[1] = image.load(imgPath, 3, 'float')
65 | img[1] = image.scale(img[1], opt.imgDim, opt.imgDim)
66 | local rep
67 | if opt.cuda then
68 | imgCuda:copy(img)
69 | rep = net:forward(imgCuda):float()
70 | else
71 | rep = net:forward(img)
72 | end
73 | local sz = rep:size(1)
74 | for i = 1,sz do
75 | io.write(rep[i])
76 | if i < sz then
77 | io.write(',')
78 | end
79 | end
80 | io.write('\n')
81 | io.stdout:flush()
82 | end
83 | end
84 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy >= 1.1, < 2.0
2 | scipy >= 0.13, < 0.17
3 | pandas >= 0.13, < 0.18
4 | scikit-learn >= 0.17, < 0.18
5 | nose >= 1.3.1, < 1.4
6 | nolearn == 0.5b1
7 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/run-tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | cd $(dirname $0)
6 |
7 | nosetests-2.7 -v
8 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 |
3 | setup(
4 | name='openface',
5 | version='0.2.1',
6 | description="Face recognition with Google's FaceNet deep neural network.",
7 | url='https://github.com/cmusatyalab/openface',
8 | packages=['openface'],
9 | package_data={'openface': ['*.lua']},
10 | )
11 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zoezhou1999/BeautifyBasedOnGAN/68f210525748636088aa9defce0e0d0c50f7fb10/metrics_evaluation/openface/tests/__init__.py
--------------------------------------------------------------------------------
/metrics_evaluation/openface/tests/openface_api_tests.py:
--------------------------------------------------------------------------------
1 | # OpenFace API tests.
2 | #
3 | # Copyright 2015-2016 Carnegie Mellon University
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 |
18 | import cv2
19 | import os
20 |
21 | import numpy as np
22 | np.set_printoptions(precision=2)
23 |
24 | import scipy
25 | import scipy.spatial
26 |
27 | import openface
28 |
29 | openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
30 | modelDir = os.path.join(openfaceDir, 'models')
31 | dlibModelDir = os.path.join(modelDir, 'dlib')
32 | openfaceModelDir = os.path.join(modelDir, 'openface')
33 |
34 | exampleImages = os.path.join(openfaceDir, 'images', 'examples')
35 | lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
36 |
37 | dlibFacePredictor = os.path.join(dlibModelDir,
38 | "shape_predictor_68_face_landmarks.dat")
39 | model = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
40 | imgDim = 96
41 |
42 | align = openface.AlignDlib(dlibFacePredictor)
43 | net = openface.TorchNeuralNet(model, imgDim=imgDim)
44 |
45 |
46 | def test_pipeline():
47 | imgPath = os.path.join(exampleImages, 'lennon-1.jpg')
48 | bgrImg = cv2.imread(imgPath)
49 | if bgrImg is None:
50 | raise Exception("Unable to load image: {}".format(imgPath))
51 | rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
52 | # assert np.isclose(norm(rgbImg), 11.1355)
53 |
54 | bb = align.getLargestFaceBoundingBox(rgbImg)
55 | print ("Bounding box found was: ")
56 | print (bb)
57 | assert bb.left() == 341
58 | assert bb.right() == 1006
59 | assert bb.top() == 193
60 | assert bb.bottom() == 859
61 |
62 | alignedFace = align.align(imgDim, rgbImg, bb,
63 | landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
64 | # assert np.isclose(norm(alignedFace), 7.61577)
65 |
66 | rep = net.forward(alignedFace)
67 | cosDist = scipy.spatial.distance.cosine(rep, np.ones(128))
68 | print(cosDist)
69 | assert np.isclose(cosDist, 0.938840385931)
70 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/attic/model.lua:
--------------------------------------------------------------------------------
1 | require 'nn'
2 |
3 | require 'cunn'
4 | require 'dpnn'
5 |
6 | require 'fbnn'
7 | require 'fbcunn'
8 |
9 | require 'optim'
10 |
11 | paths.dofile('torch-TripletEmbedding/TripletEmbedding.lua')
12 |
13 | if opt.retrain ~= 'none' then
14 | assert(paths.filep(opt.retrain), 'File not found: ' .. opt.retrain)
15 | print('Loading model from file: ' .. opt.retrain);
16 | modelAnchor = torch.load(opt.retrain)
17 | else
18 | paths.dofile(opt.modelDef)
19 | modelAnchor = createModel(opt.nGPU)
20 | end
21 |
22 | modelPos = modelAnchor:clone('weight', 'bias', 'gradWeight', 'gradBias')
23 | modelNeg = modelAnchor:clone('weight', 'bias', 'gradWeight', 'gradBias')
24 |
25 | model = nn.ParallelTable()
26 | model:add(modelAnchor)
27 | model:add(modelPos)
28 | model:add(modelNeg)
29 |
30 | alpha = 0.2
31 | criterion = nn.TripletEmbeddingCriterion(alpha)
32 |
33 | model = model:cuda()
34 | criterion:cuda()
35 |
36 | print('=> Model')
37 | print(model)
38 |
39 | print('=> Criterion')
40 | print(criterion)
41 |
42 | collectgarbage()
43 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/attic/sanitize.lua:
--------------------------------------------------------------------------------
1 | -- From https://github.com/e-lab/torch-toolbox/blob/master/Sanitize/sanitize.lua
2 |
3 | require('torch')
4 | require('nn')
5 |
6 |
7 | -- common obj name to be freed
8 | local common = {'output', 'gradInput'}
9 |
10 | -- temporary buffer name other than output/gradInput
11 | local t = {
12 | -- convolution
13 | ['nn.SpatialConvolution'] = {'finput', 'fgradInput'},
14 | ['nn.SpatialConvolutionMM'] = {'finput', 'fgradInput'},
15 |
16 | -- pooling
17 | ['nn.SpatialMaxPooling'] = {'indices'},
18 | ['nn.TemporalMaxPooling'] = {'indices'},
19 | ['nn.VolumetricMaxPooling'] = {'indices'},
20 | ['nn.SpatialFractionalMaxPooling'] = {'indices'},
21 |
22 | -- regularizer
23 | ['nn.BatchNormalization'] = {'buffer', 'buffer2', 'centered', 'normalized'},
24 | ['nn.SpatialBatchNormalization'] = {'buffer', 'buffer2','centered', 'normalized'},
25 | ['nn.Dropout'] = {'noise'},
26 | ['nn.SpatialDropout'] = {'noise'},
27 |
28 | -- transfer
29 | ['nn.PReLU'] = {'gradWeightBuf', 'gradWeightBuf2'},
30 | ['nn.LogSigmoid'] = {'buffer'},
31 |
32 | -- etc
33 | ['nn.Mean'] = {'_gradInput'},
34 | ['nn.Normalize'] = {'_output', 'norm', 'normp'},
35 | ['nn.PairwiseDistance'] = {'diff'},
36 | ['nn.Reshape'] = {'_input', '_gradOutput'},
37 |
38 | -- fbcunn
39 | ['nn.AbstractParallel'] = {'homeGradBuffers', 'input_gpu', 'gradOutput_gpu', 'gradInput_gpu'},
40 | ['nn.DataParallel'] = {'homeGradBuffers', 'input_gpu', 'gradOutput_gpu', 'gradInput_gpu'},
41 | ['nn.ModelParallel'] = {'homeGradBuffers', 'input_gpu', 'gradOutput_gpu', 'gradInput_gpu'},
42 | }
43 |
44 |
45 | local function free_table_or_tensor(val, name, field)
46 | if type(val[name]) == 'table' then
47 | val[name] = {}
48 | elseif type(val[name]) == 'userdata' then
49 | val[name] = field.new()
50 | end
51 | end
52 |
53 |
54 | local function is_member(name, f)
55 | if f == nil then
56 | return false
57 | end
58 |
59 | for _, value in pairs(f) do
60 | if name == value then
61 | return true
62 | end
63 | end
64 | return false
65 | end
66 |
67 |
68 | -- Taken and modified from Soumith's imagenet-multiGPU.torch code
69 | -- https://github.com/soumith/imagenet-multiGPU.torch/blob/master/train.lua
70 | local function sanitize(model)
71 | local list = model:listModules()
72 | for _,val in ipairs(list) do
73 | for name,field in pairs(val) do
74 |
75 | -- remove ffi obj
76 | if torch.type(field) == 'cdata' then
77 | val[name] = nil
78 |
79 | -- remove common obj
80 | elseif is_member(name, common) then
81 | free_table_or_tensor(val, name, field)
82 |
83 | -- remove specific obj
84 | elseif is_member(name, t[val.__typename]) then
85 | free_table_or_tensor(val, name, field)
86 | end
87 | end
88 | end
89 | return model
90 | end
91 |
92 |
93 | return sanitize
94 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/attic/test.lua:
--------------------------------------------------------------------------------
1 | -- Copyright 2015-2016 Carnegie Mellon University
2 | --
3 | -- Licensed under the Apache License, Version 2.0 (the "License");
4 | -- you may not use this file except in compliance with the License.
5 | -- You may obtain a copy of the License at
6 | --
7 | -- http://www.apache.org/licenses/LICENSE-2.0
8 | --
9 | -- Unless required by applicable law or agreed to in writing, software
10 | -- distributed under the License is distributed on an "AS IS" BASIS,
11 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | -- See the License for the specific language governing permissions and
13 | -- limitations under the License.
14 |
15 | testLogger = optim.Logger(paths.concat(opt.save, 'test.log'))
16 |
17 | local batchNumber
18 | local triplet_loss
19 | local timer = torch.Timer()
20 |
21 | function test()
22 | print('==> doing epoch on validation data:')
23 | print("==> online epoch # " .. epoch)
24 |
25 | batchNumber = 0
26 | if opt.cuda then
27 | cutorch.synchronize()
28 | end
29 | timer:reset()
30 |
31 | model:evaluate()
32 | if opt.cuda then
33 | model:cuda()
34 | end
35 |
36 | triplet_loss = 0
37 | for i=1,opt.testEpochSize do
38 | donkeys:addjob(
39 | function()
40 | local inputs, _ = testLoader:sampleTriplet(opt.batchSize)
41 | inputs = inputs:float()
42 | return sendTensor(inputs)
43 | end,
44 | testBatch
45 | )
46 | if i % 5 == 0 then
47 | donkeys:synchronize()
48 | collectgarbage()
49 | end
50 | end
51 |
52 | donkeys:synchronize()
53 | if opt.cuda then
54 | cutorch.synchronize()
55 | end
56 |
57 | triplet_loss = triplet_loss / opt.testEpochSize
58 | testLogger:add{
59 | ['avg triplet loss (test set)'] = triplet_loss
60 | }
61 | print(string.format('Epoch: [%d][TESTING SUMMARY] Total Time(s): %.2f \t'
62 | .. 'average triplet loss (per batch): %.2f',
63 | epoch, timer:time().real, triplet_loss))
64 | print('\n')
65 |
66 |
67 | end
68 |
69 | local inputsCPU = torch.FloatTensor()
70 | local inputs
71 | if opt.cuda then
72 | inputs = torch.CudaTensor()
73 | else
74 | inputs = torch.FloatTensor()
75 | end
76 |
77 | function testBatch(inputsThread)
78 | receiveTensor(inputsThread, inputsCPU)
79 | inputs:resize(inputsCPU:size()):copy(inputsCPU)
80 |
81 | local embeddings = model:forward({
82 | inputs:sub(1,opt.batchSize),
83 | inputs:sub(opt.batchSize+1, 2*opt.batchSize),
84 | inputs:sub(2*opt.batchSize+1, 3*opt.batchSize)})
85 | local err = criterion:forward(embeddings)
86 | if opt.cuda then
87 | cutorch.synchronize()
88 | end
89 |
90 | triplet_loss = triplet_loss + err
91 | print(('Epoch: Testing [%d][%d/%d] Triplet Loss: %.2f'):format(epoch, batchNumber,
92 | opt.testEpochSize, err))
93 | batchNumber = batchNumber + 1
94 | end
95 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/data.lua:
--------------------------------------------------------------------------------
1 | -- Source: https://github.com/soumith/imagenet-multiGPU.torch/blob/master/data.lua
2 | --
3 | -- Copyright (c) 2014, Facebook, Inc.
4 | -- All rights reserved.
5 | --
6 | -- This source code is licensed under the BSD-style license found in the
7 | -- LICENSE file in the root directory of this source tree. An additional grant
8 | -- of patent rights can be found in the PATENTS file in the same directory.
9 | --
10 | local Threads = require 'threads'
11 |
12 | -- This script contains the logic to create K threads for parallel data-loading.
13 | -- For the data-loading details, look at donkey.lua
14 | -------------------------------------------------------------------------------
15 | do -- start K datathreads (donkeys)
16 | if opt.nDonkeys > 0 then
17 | local options = opt -- make an upvalue to serialize over to donkey threads
18 | donkeys = Threads(
19 | opt.nDonkeys,
20 | function()
21 | require 'torch'
22 | end,
23 | function(idx)
24 | opt = options -- pass to all donkeys via upvalue
25 | tid = idx
26 | local seed = opt.manualSeed + idx
27 | torch.manualSeed(seed)
28 | print(string.format('Starting donkey with id: %d seed: %d', tid, seed))
29 | paths.dofile('donkey.lua')
30 | end
31 | );
32 | else -- single threaded data loading. useful for debugging
33 | paths.dofile('donkey.lua')
34 | donkeys = {}
35 | function donkeys:addjob(f1, f2) f2(f1()) end
36 | function donkeys:synchronize() end
37 | end
38 | end
39 |
40 | nClasses = nil
41 | classes = nil
42 | donkeys:addjob(
43 | function() return trainLoader.classes end,
44 | function(c) classes = c end)
45 | donkeys:synchronize()
46 | nClasses = #classes
47 | assert(nClasses, "Failed to get nClasses")
48 | print('nClasses: ', nClasses)
49 | torch.save(paths.concat(opt.save, 'classes.t7'), classes)
50 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/lfw-latest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright 2015-2016 Carnegie Mellon University
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | cd $(dirname $0)/..
18 |
19 | TAG=lfw.nn4.v2
20 | WORK_DIR=2
21 |
22 | LATEST_MODEL=$(ls -t training/work/$WORK_DIR/model_* | \
23 | head -1 | sed 's/.*model_\(.*\)\.t7/\1/')
24 |
25 | printf "\n=== TAG: $TAG\n"
26 | printf "=== WORK_DIR: $WORK_DIR\n"
27 | printf "=== Model: $LATEST_MODEL\n\n"
28 |
29 | set -x -e -u
30 |
31 | ./batch-represent/main.lua \
32 | -outDir evaluation/$TAG.e$LATEST_MODEL \
33 | -model ./training/work/$WORK_DIR/model_$LATEST_MODEL.t7 \
34 | -data data/lfw/dlib.affine.sz:96.OuterEyesAndNose \
35 | -batchSize 100 \
36 | -cuda
37 |
38 | cd evaluation
39 | ./lfw.py --workDir $TAG.e$LATEST_MODEL
40 |
41 | tail $TAG.*/accuracies.txt -n 1
42 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/main.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 |
3 | require 'torch'
4 | require 'optim'
5 |
6 | require 'paths'
7 |
8 | require 'xlua'
9 |
10 | local opts = paths.dofile('opts.lua')
11 |
12 | opt = opts.parse(arg)
13 | print(opt)
14 |
15 | if opt.cuda then
16 | require 'cutorch'
17 | cutorch.setDevice(opt.device)
18 | end
19 |
20 | torch.save(paths.concat(opt.save, 'opts.t7'), opt, 'ascii')
21 | print('Saving everything to: ' .. opt.save)
22 |
23 | torch.setdefaulttensortype('torch.FloatTensor')
24 |
25 | torch.manualSeed(opt.manualSeed)
26 |
27 | paths.dofile('data.lua')
28 | paths.dofile('util.lua')
29 | model = nil
30 | criterion = nil
31 | paths.dofile('train.lua')
32 | paths.dofile('test.lua')
33 |
34 | if opt.peoplePerBatch > nClasses then
35 | print('\n\nError: opt.peoplePerBatch > number of classes. Please decrease this value.')
36 | print(' + opt.peoplePerBatch: ', opt.peoplePerBatch)
37 | print(' + number of classes: ', nClasses)
38 | os.exit(-1)
39 | end
40 |
41 | epoch = opt.epochNumber
42 |
43 | for _=1,opt.nEpochs do
44 | train()
45 | model = saveModel(model)
46 | if opt.testing then
47 | test()
48 | end
49 | epoch = epoch + 1
50 | end
51 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/model.lua:
--------------------------------------------------------------------------------
1 | require 'nn'
2 |
3 | require 'dpnn'
4 |
5 | require 'optim'
6 |
7 | if opt.cuda then
8 | require 'cunn'
9 | if opt.cudnn then
10 | require 'cudnn'
11 | cudnn.benchmark = opt.cudnn_bench
12 | cudnn.fastest = true
13 | cudnn.verbose = false
14 | end
15 | end
16 |
17 | paths.dofile('torch-TripletEmbedding/TripletEmbedding.lua')
18 |
19 |
20 | local M = {}
21 |
22 | function M.modelSetup(continue)
23 | if continue then
24 | model = continue
25 | elseif opt.retrain ~= 'none' then
26 | assert(paths.filep(opt.retrain), 'File not found: ' .. opt.retrain)
27 | print('Loading model from file: ' .. opt.retrain);
28 | model = torch.load(opt.retrain)
29 | print("Using imgDim = ", opt.imgDim)
30 | else
31 | paths.dofile(opt.modelDef)
32 | assert(imgDim, "Model definition must set global variable 'imgDim'")
33 | assert(imgDim == opt.imgDim, "Model definiton's imgDim must match imgDim option.")
34 | model = createModel()
35 | end
36 |
37 | -- First remove any DataParallelTable
38 | if torch.type(model) == 'nn.DataParallelTable' then
39 | model = model:get(1)
40 | end
41 |
42 | criterion = nn.TripletEmbeddingCriterion(opt.alpha)
43 |
44 | if opt.cuda then
45 | model = model:cuda()
46 | if opt.cudnn then
47 | cudnn.convert(model,cudnn)
48 | end
49 | criterion:cuda()
50 | else
51 | model:float()
52 | criterion:float()
53 | end
54 |
55 | optimizeNet(model, opt.imgDim)
56 |
57 | if opt.cuda and opt.nGPU > 1 then
58 | model = makeDataParallel(model, opt.nGPU)
59 | end
60 |
61 | collectgarbage()
62 | return model, criterion
63 | end
64 |
65 | return M
66 |
67 |
68 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/opts.lua:
--------------------------------------------------------------------------------
1 | local M = { }
2 |
3 | -- http://stackoverflow.com/questions/6380820/get-containing-path-of-lua-file
4 | function script_path()
5 | local str = debug.getinfo(2, "S").source:sub(2)
6 | return str:match("(.*/)")
7 | end
8 |
9 | function M.parse(arg)
10 |
11 | local cmd = torch.CmdLine()
12 | cmd:text()
13 | cmd:text('OpenFace')
14 | cmd:text()
15 | cmd:text('Options:')
16 |
17 | ------------ General options --------------------
18 | cmd:option('-cache',
19 | paths.concat(script_path(), 'work'),
20 | 'Directory to cache experiments and data.')
21 | cmd:option('-save', '', 'Directory to save experiment.')
22 | cmd:option('-data',
23 | paths.concat(os.getenv('HOME'), 'openface', 'data',
24 | 'casia-facescrub',
25 | 'dlib-affine-sz:96'),
26 | -- 'dlib-affine-224-split'),
27 | 'Home of dataset. Images separated by identity.')
28 | cmd:option('-manualSeed', 2, 'Manually set RNG seed')
29 | cmd:option('-cuda', true, 'Use cuda.')
30 | cmd:option('-device', 1, 'Cuda device to use.')
31 | cmd:option('-nGPU', 1, 'Number of GPUs to use by default')
32 | cmd:option('-cudnn', true, 'Convert the model to cudnn.')
33 | cmd:option('-cudnn_bench', false, 'Run cudnn to choose fastest option. Increase memory usage')
34 |
35 | ------------- Data options ------------------------
36 | cmd:option('-nDonkeys', 2, 'number of donkeys to initialize (data loading threads)')
37 |
38 | ------------- Training options --------------------
39 | cmd:option('-nEpochs', 1000, 'Number of total epochs to run')
40 | cmd:option('-epochSize', 250, 'Number of batches per epoch')
41 | cmd:option('-epochNumber', 1, 'Manual epoch number (useful on restarts)')
42 | -- GPU memory usage depends on peoplePerBatch and imagesPerPerson.
43 | cmd:option('-peoplePerBatch', 15, 'Number of people to sample in each mini-batch.')
44 | cmd:option('-imagesPerPerson', 20, 'Number of images to sample per person in each mini-batch.')
45 | cmd:option('-testing', true, 'Test with the LFW.')
46 | cmd:option('-testBatchSize', 800, 'Batch size for testing.')
47 | cmd:option('-lfwDir', '../data/lfw/aligned', 'LFW aligned image directory for testing.')
48 |
49 | ---------- Model options ----------------------------------
50 | cmd:option('-retrain', 'none', 'provide path to model to retrain with')
51 | cmd:option('-modelDef', '../models/openface/nn4.def.lua', 'path to model definiton')
52 | cmd:option('-imgDim', 96, 'Image dimension. nn2=224, nn4=96')
53 | cmd:option('-embSize', 128, 'size of embedding from model')
54 | cmd:option('-alpha', 0.2, 'margin in TripletLoss')
55 | cmd:text()
56 |
57 | local opt = cmd:parse(arg or {})
58 | os.execute('mkdir -p ' .. opt.cache)
59 |
60 | if opt.save == '' then
61 | opt.save = paths.concat(opt.cache, os.date("%Y-%m-%d_%H-%M-%S"))
62 | end
63 | os.execute('mkdir -p ' .. opt.save)
64 |
65 | return opt
66 | end
67 |
68 | return M
69 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib >= 1.4, < 1.5.0
2 | numpy >= 1.1, < 2.0
3 | pandas >= 0.13, < 0.18
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/training/test.lua:
--------------------------------------------------------------------------------
1 | -- Copyright 2016 Carnegie Mellon University
2 | --
3 | -- Licensed under the Apache License, Version 2.0 (the "License");
4 | -- you may not use this file except in compliance with the License.
5 | -- You may obtain a copy of the License at
6 | --
7 | -- http://www.apache.org/licenses/LICENSE-2.0
8 | --
9 | -- Unless required by applicable law or agreed to in writing, software
10 | -- distributed under the License is distributed on an "AS IS" BASIS,
11 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | -- See the License for the specific language governing permissions and
13 | -- limitations under the License.
14 |
15 | require 'io'
16 | require 'string'
17 | require 'sys'
18 |
19 | local batchRepresent = "../batch-represent/main.lua"
20 | local lfwEval = "../evaluation/lfw.py"
21 |
22 | local testLogger = optim.Logger(paths.concat(opt.save, 'test.log'))
23 |
24 | local function getLfwAcc(fName)
25 | local f = io.open(fName, 'r')
26 | io.input(f)
27 | local lastLine = nil
28 | while true do
29 | local line = io.read("*line")
30 | if line == nil then break end
31 | lastLine = line
32 | end
33 | io.close()
34 | return tonumber(string.sub(lastLine, 6, 11))
35 | end
36 |
37 | function test()
38 | if opt.cuda then
39 | model = model:float()
40 | end
41 | local latestModelFile = paths.concat(opt.save, 'model_' .. epoch .. '.t7')
42 | local outDir = paths.concat(opt.save, 'lfw-' .. epoch)
43 | print(latestModelFile)
44 | print(outDir)
45 | local cmd = batchRepresent
46 | if opt.cuda then
47 | assert(opt.device ~= nil)
48 | cmd = cmd .. ' -cuda -device ' .. opt.device .. ' '
49 | end
50 | cmd = cmd .. ' -batchSize ' .. opt.testBatchSize ..
51 | ' -model ' .. latestModelFile ..
52 | ' -data ' .. opt.lfwDir ..
53 | ' -outDir ' .. outDir ..
54 | ' -imgDim ' .. opt.imgDim
55 | os.execute(cmd)
56 |
57 | cmd = lfwEval .. ' Epoch' .. epoch .. ' ' .. outDir
58 | os.execute(cmd)
59 |
60 | lfwAcc = getLfwAcc(paths.concat(outDir, "accuracies.txt"))
61 | testLogger:add{
62 | ['lfwAcc'] = lfwAcc
63 | }
64 | end
65 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/annotate-image.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | #
3 | # Copyright 2015-2016 Carnegie Mellon University
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 |
18 | # Example usage: ./util/annotate-image.py /data/path_to_your_image.jpg outerEyesAndNose
19 |
20 | import os
21 | import sys
22 | fileDir = os.path.dirname(os.path.realpath(__file__))
23 | sys.path.append(os.path.join(fileDir, ".."))
24 |
25 | import argparse
26 | import cv2
27 |
28 | from openface.align_dlib import AlignDlib
29 |
30 | modelDir = os.path.join(fileDir, '..', 'models')
31 | dlibModelDir = os.path.join(modelDir, 'dlib')
32 | openfaceModelDir = os.path.join(modelDir, 'openface')
33 |
34 |
35 | def main(args):
36 | align = AlignDlib(args.dlibFacePredictor)
37 |
38 | bgrImg = cv2.imread(args.img)
39 | if bgrImg is None:
40 | raise Exception("Unable to load image: {}".format(args.img))
41 | rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
42 |
43 | bb = align.getLargestFaceBoundingBox(rgbImg)
44 | if bb is None:
45 | raise Exception("Unable to find a face: {}".format(args.img))
46 |
47 | landmarks = align.findLandmarks(rgbImg, bb)
48 | if landmarks is None:
49 | raise Exception("Unable to find landmarks within image: {}".format(args.img))
50 |
51 | bl = (bb.left(), bb.bottom())
52 | tr = (bb.right(), bb.top())
53 | cv2.rectangle(bgrImg, bl, tr, color=(153, 255, 204), thickness=3)
54 | for landmark in landmarks:
55 | cv2.circle(bgrImg, center=landmark, radius=3, color=(102, 204, 255), thickness=-1)
56 | print("Saving image to 'annotated.png'")
57 | cv2.imwrite("annotated.png", bgrImg)
58 |
59 | if __name__ == '__main__':
60 | parser = argparse.ArgumentParser()
61 |
62 | parser.add_argument('img', type=str, help="Input image.")
63 | parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
64 | default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
65 | parser.add_argument('landmarks', type=str,
66 | choices=['outerEyesAndNose', 'innerEyesAndBottomLip'],
67 | help='The landmarks to align to.')
68 | parser.add_argument('--size', type=int, help="Default image size.",
69 | default=96)
70 | args = parser.parse_args()
71 |
72 | main(args)
73 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/check-links.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | from subprocess import Popen, PIPE
4 |
5 | import os
6 | import urllib2
7 | import sys
8 |
9 | utilDir = os.path.dirname(os.path.realpath(__file__))
10 |
11 | ignores = ['localhost', '127.0.0.1', 'your-server', 'docker-ip',
12 | 'ghbtns', 'sphinx-doc']
13 |
14 |
15 | def ignoreURL(url):
16 | for ignore in ignores:
17 | if ignore in url:
18 | return True
19 | return False
20 |
21 | hdr = {
22 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
23 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
24 | 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
25 | 'Accept-Encoding': 'none',
26 | 'Accept-Language': 'en-US,en;q=0.8',
27 | 'Connection': 'keep-alive'
28 | }
29 |
30 | dirs = ['api-docs', 'batch-represent', 'docs', 'evaluation',
31 | 'openface', 'training', 'util']
32 | dirs = [os.path.join(utilDir, '..', d) for d in dirs]
33 | cmd = ['grep', '-I', '--no-filename',
34 | '-o', '\(http\|https\)://[^"\')}`<> ]*',
35 | '-R'] + dirs + \
36 | ['--exclude-dir=_build']
37 |
38 | p = Popen(cmd, stdout=PIPE)
39 | out = p.communicate()[0]
40 | urls = set(out.split())
41 |
42 | badURLs = []
43 | for url in urls:
44 | if not ignoreURL(url):
45 | if url.endswith('.'):
46 | url = url[:-1]
47 | print("+ {}".format(url))
48 | try:
49 | req = urllib2.Request(url, headers=hdr)
50 | resp = urllib2.urlopen(req)
51 | except Exception as e:
52 | print(" + Error:\n\n")
53 | print(e)
54 | print("\n\n")
55 | badURLs.append(url)
56 |
57 | print('\nFound {} bad of {} URLs'.format(len(badURLs), len(urls)))
58 | if len(badURLs) > 0:
59 | print("\n\n=== Bad URLs.\n")
60 | for url in badURLs:
61 | print("+ {}".format(url))
62 | sys.exit(-1)
63 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/detect-outliers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | #
3 | # Detect outlier faces (not of the same person) in a directory
4 | # of aligned images.
5 | # Brandon Amos
6 | # 2016/02/14
7 | #
8 | # Copyright 2015-2016 Carnegie Mellon University
9 | #
10 | # Licensed under the Apache License, Version 2.0 (the "License");
11 | # you may not use this file except in compliance with the License.
12 | # You may obtain a copy of the License at
13 | #
14 | # http://www.apache.org/licenses/LICENSE-2.0
15 | #
16 | # Unless required by applicable law or agreed to in writing, software
17 | # distributed under the License is distributed on an "AS IS" BASIS,
18 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | # See the License for the specific language governing permissions and
20 | # limitations under the License.
21 |
22 | import time
23 |
24 | start = time.time()
25 |
26 | import argparse
27 | import os
28 | import glob
29 |
30 | import numpy as np
31 | np.set_printoptions(precision=2)
32 |
33 | from sklearn.metrics.pairwise import euclidean_distances
34 |
35 | import cv2
36 | import openface
37 |
38 | fileDir = os.path.dirname(os.path.realpath(__file__))
39 | modelDir = os.path.join(fileDir, '..', 'models')
40 | openfaceModelDir = os.path.join(modelDir, 'openface')
41 |
42 |
43 | def main():
44 | parser = argparse.ArgumentParser()
45 |
46 | parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
47 | default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
48 | parser.add_argument('--imgDim', type=int,
49 | help="Default image dimension.", default=96)
50 | parser.add_argument('--cuda', action='store_true')
51 | parser.add_argument('--threshold', type=float, default=0.9)
52 | parser.add_argument('--delete', action='store_true', help='Delete the outliers.')
53 | parser.add_argument('directory')
54 |
55 | args = parser.parse_args()
56 |
57 | net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)
58 |
59 | reps = []
60 | paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png'))))
61 | print("=== {} ===".format(args.directory))
62 | for imgPath in paths:
63 | if cv2.imread(imgPath) is None:
64 | print("Warning: Skipping bad image file: {}".format(imgPath))
65 | if args.delete:
66 | # Remove the file if it's not a valid image.
67 | os.remove(imgPath)
68 | else:
69 | reps.append(net.forwardPath(imgPath))
70 |
71 | mean = np.mean(reps, axis=0)
72 | dists = euclidean_distances(reps, mean)
73 | outliers = []
74 | for path, dist in zip(paths, dists):
75 | dist = dist.take(0)
76 | if dist > args.threshold:
77 | outliers.append((path, dist))
78 |
79 | print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths)))
80 | for path, dist in outliers:
81 | print(" + {} ({:0.2f})".format(path, dist))
82 | if args.delete:
83 | os.remove(path)
84 |
85 | if __name__ == '__main__':
86 | main()
87 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/email-broken-links.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | cd $(dirname $0)
4 | git pull --rebase
5 | OUT="$(./check-links.py)"
6 |
7 | if [[ $? != 0 ]]; then
8 | echo "$OUT" | mutt brandon.amos.cs+openface.broken@gmail.com \
9 | -s "Broken OpenFace Links"
10 | fi
11 |
12 | echo "$OUT"
13 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/print-network-table.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 |
3 | require 'torch'
4 | require 'nn'
5 | require 'dpnn'
6 |
7 | torch.setdefaulttensortype('torch.FloatTensor')
8 |
9 | local cmd = torch.CmdLine()
10 | cmd:text()
11 | cmd:text('Print network table.')
12 | cmd:text()
13 | cmd:text('Options:')
14 |
15 | cmd:option('-modelDef', '/home/bamos/repos/openface/models/openface/nn4.small2.def.lua', 'Path to model definition.')
16 | cmd:option('-imgDim', 96, 'Image dimension. nn1=224, nn4=96')
17 | cmd:option('-embSize', 128)
18 | cmd:text()
19 |
20 | opt = cmd:parse(arg or {})
21 |
22 | paths.dofile(opt.modelDef)
23 | local net = createModel()
24 |
25 | local img = torch.randn(1, 3, opt.imgDim, opt.imgDim)
26 | net:forward(img)
27 |
28 | for i=1,#net.modules do
29 | local module = net.modules[i]
30 | local out = torch.typename(module) .. ": "
31 | for _, sz in ipairs(torch.totable(module.output:size())) do
32 | out = out .. sz .. ', '
33 | end
34 | out = string.sub(out, 1, -3)
35 | print(out)
36 | end
37 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/profile-network.lua:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env th
2 | --
3 | -- Outputs the number of parameters in a network for a single image
4 | -- in evaluation mode.
5 |
6 | require 'torch'
7 | require 'nn'
8 | require 'dpnn'
9 |
10 | torch.setdefaulttensortype('torch.FloatTensor')
11 |
12 | local cmd = torch.CmdLine()
13 | cmd:text()
14 | cmd:text('Network Size.')
15 | cmd:text()
16 | cmd:text('Options:')
17 |
18 | cmd:option('-model', './models/openface/nn4.small2.v1.t7', 'Path to model.')
19 | cmd:option('-imgDim', 96, 'Image dimension. nn1=224, nn4=96')
20 | cmd:option('-numIter', 500)
21 | cmd:option('-cuda', false)
22 | cmd:text()
23 |
24 | local opt = cmd:parse(arg or {})
25 | -- print(opt)
26 |
27 | local net = torch.load(opt.model):float()
28 | net:evaluate()
29 |
30 | local img = torch.randn(opt.numIter, 1, 3, opt.imgDim, opt.imgDim)
31 |
32 | if opt.cuda then
33 | require 'cutorch'
34 | require 'cunn'
35 | net = net:cuda()
36 | img = img:cuda()
37 | end
38 |
39 | local times = torch.Tensor(opt.numIter)
40 |
41 | for i=1,opt.numIter do
42 | local timer = torch.Timer()
43 | local _ = net:forward(img[i])
44 | times[i] = 1000.0*timer:time().real
45 | end
46 |
47 | print(string.format('Single image forward pass: %.2f ms +/- %.2f ms',
48 | torch.mean(times), torch.std(times)))
49 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/prune-dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import os
5 | import shutil
6 |
7 | if __name__ == '__main__':
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('inPlaceDir', type=str,
10 | help="Directory to prune in-place.")
11 | parser.add_argument('--numImagesThreshold', type=int,
12 | help="Delete directories with less than this many images.",
13 | default=10)
14 | args = parser.parse_args()
15 |
16 | exts = ["jpg", "png"]
17 |
18 | for subdir, dirs, files in os.walk(args.inPlaceDir):
19 | if subdir == args.inPlaceDir:
20 | continue
21 | nImgs = 0
22 | for fName in files:
23 | (imageClass, imageName) = (os.path.basename(subdir), fName)
24 | if any(imageName.lower().endswith("." + ext) for ext in exts):
25 | nImgs += 1
26 | if nImgs < args.numImagesThreshold:
27 | print("Removing {}".format(subdir))
28 | shutil.rmtree(subdir)
29 |
--------------------------------------------------------------------------------
/metrics_evaluation/openface/util/tsne.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | import numpy as np
4 | import pandas as pd
5 |
6 | from sklearn.decomposition import PCA
7 | from sklearn.manifold import TSNE
8 |
9 | import matplotlib as mpl
10 | mpl.use('Agg')
11 | import matplotlib.pyplot as plt
12 | import matplotlib.cm as cm
13 | plt.style.use('bmh')
14 |
15 | import argparse
16 |
17 | print("""
18 |
19 | Note: This example assumes that `name i` corresponds to `label i`
20 | in `labels.csv`.
21 |
22 | """)
23 |
24 | parser = argparse.ArgumentParser()
25 | parser.add_argument('workDir', type=str)
26 | parser.add_argument('--names', type=str, nargs='+', required=True)
27 | args = parser.parse_args()
28 |
29 | y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0]
30 | X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix()
31 |
32 | target_names = np.array(args.names)
33 | colors = cm.Dark2(np.linspace(0, 1, len(target_names)))
34 |
35 | X_pca = PCA(n_components=50).fit_transform(X, X)
36 | tsne = TSNE(n_components=2, init='random', random_state=0)
37 | X_r = tsne.fit_transform(X_pca)
38 |
39 | for c, i, target_name in zip(colors,
40 | list(range(1, len(target_names) + 1)),
41 | target_names):
42 | plt.scatter(X_r[y == i, 0], X_r[y == i, 1],
43 | c=c, label=target_name)
44 | plt.legend()
45 |
46 | out = "{}/tsne.pdf".format(args.workDir)
47 | plt.savefig(out)
48 | print("Saved to: {}".format(out))
49 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.246608031;
3 | mean_res_qualityscore: 6.97911210045;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-id-presearving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 1.3392485338;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-recloss-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.246608031;
3 | mean_res_qualityscore: 15.1308818381;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-recloss-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 1.21177047822;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-recloss-nolabelloss-brisque.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_ori_qualityscore: 17.221051952444885;
3 | mean_res_qualityscore: 14.16845982231859;
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-0-combined-recloss-nolabelloss-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 1.00594174578;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-6000k-id-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 1.3456444627016644;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-6000k-id-recloss-nolabel-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 0.9900117110796017;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/ori-6000k-id-recloss-preserving.txt:
--------------------------------------------------------------------------------
1 | image num: 400;
2 | mean_dis: 1.0958210347622064;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/test-id.csv:
--------------------------------------------------------------------------------
1 | image_name,squared l2 distance
2 | 00054,0.177196354615123
3 | 00166,0.24036735137527088
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/test-id.txt:
--------------------------------------------------------------------------------
1 | image num: 2;
2 | mean_dis: 0.208781852995;
3 |
--------------------------------------------------------------------------------
/metrics_evaluation/test.csv:
--------------------------------------------------------------------------------
1 | image_name,ori_qualityscore,res_qualityscore
2 | 00054,16.369865321273636,13.901883143515278
3 | 00166,2.6981779699086417,10.05267461805326
4 |
--------------------------------------------------------------------------------
/metrics_evaluation/test.txt:
--------------------------------------------------------------------------------
1 | image num: 2;
2 | mean_ori_qualityscore: 9.53402164559;
3 | mean_res_qualityscore: 11.9772788808;
4 |
--------------------------------------------------------------------------------
/requirements-pip.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.13.3
2 | scipy>=1.0.0
3 | tensorflow-gpu>=1.6.0
4 | moviepy>=0.2.3.2
5 | Pillow>=3.1.1
6 | lmdb>=0.93
7 | opencv-python>=3.4.0.12
8 | cryptography>=2.1.4
9 | h5py>=2.7.1
10 | six>=1.11.0
11 | torch==0.4.1
12 | torchvision
--------------------------------------------------------------------------------
/selectimages.py:
--------------------------------------------------------------------------------
1 | import random
2 | import subprocess
3 | import os
4 |
5 | i=0
6 | indices=[]
7 | while True:
8 | if i==400:
9 | break
10 | index=random.randint(0,69999)
11 | if index not in indices:
12 | indices.append(index)
13 | i=i+1
14 | if i<=100:
15 | subprocess.check_output(['bash', '-c', "cp "+os.path.join("../datasets/ffhq_128x128/img","%05d.png" % index)+" ../datasets/ffhq_selected_1/"])
16 | elif i<=200:
17 | subprocess.check_output(['bash', '-c', "cp "+os.path.join("../datasets/ffhq_128x128/img","%05d.png" % index)+" ../datasets/ffhq_selected_2/"])
18 | elif i<=300:
19 | subprocess.check_output(['bash', '-c', "cp "+os.path.join("../datasets/ffhq_128x128/img","%05d.png" % index)+" ../datasets/ffhq_selected_3/"])
20 | else:
21 | subprocess.check_output(['bash', '-c', "cp "+os.path.join("../datasets/ffhq_128x128/img","%05d.png" % index)+" ../datasets/ffhq_selected_4/"])
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import config
2 | import identity_predicition.face_model
3 |
--------------------------------------------------------------------------------
/utils/plot_beauty_distribution.py:
--------------------------------------------------------------------------------
1 | import os
2 | import csv
3 | import numpy as np
4 | import argparse
5 | import matplotlib.pyplot as plt
6 |
7 | # initialize parser arguments
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('--csv', '-csv', help='path to csv file', default='../All_Ratings.csv', type=str)
10 | parser.add_argument('--density', '-density', help='configure plot density', default=0.05, type=float)
11 | args = parser.parse_args()
12 |
13 | # initiate list of beauty rates means
14 | beauty_rates_mean = []
15 |
16 | # read raters csv file
17 | with open(args.csv, 'r') as csvfile:
18 |
19 | # Dictionary to load images from csv
20 | # key: image name
21 | # value: list of 60 beauty rates from raters
22 | csv_dict = {}
23 |
24 | raw_dataset = csv.reader(csvfile, delimiter=',', quotechar='|')
25 |
26 | # fill the dictionary
27 | for i, row in enumerate(raw_dataset):
28 | row = ','.join(row)
29 | row = row.split(',')
30 |
31 | # create list of rates for each image
32 | if row[1] in csv_dict:
33 | csv_dict[row[1]][0].append(float(row[2]))
34 | else:
35 | csv_dict[row[1]] = [[float(row[2])]]
36 |
37 | # move dict to lists, convert beauty rates to numpy ranged in [0,1]
38 | for key, value in csv_dict.items():
39 | beauty_rates_mean.append(np.mean(np.asarray(value, dtype=np.float32)))
40 |
41 | # create a x axis with the given density and zeros as y axis to be filled next
42 | x_values = np.arange(0.0, 5.0, args.density)
43 | y_values = [0]*len(x_values)
44 |
45 | # for each mean, increase the counter in the correct location
46 | for val in beauty_rates_mean:
47 | y_values[int(round(val/args.density))] += 1
48 |
49 | # plot the results
50 | plt.plot(x_values, y_values)
51 | plt.xlabel('beauty rates')
52 | plt.ylabel('number of subjects')
53 | plt.title('Beauty Rates Distribution')
54 | plt.grid(True)
55 | plt.savefig(os.path.basename(args.csv).split(".")[-2]+ ".png")
56 |
57 |
--------------------------------------------------------------------------------
/utils/transform_images.py:
--------------------------------------------------------------------------------
1 | import os
2 | from PIL import Image
3 |
4 | # select dataset folder to check and destination folder to put output images in
5 | path = '../datasets/beauty_dataset/img/beauty_dataset'
6 | dest_path = '../datasets/beauty_dataset/img/beauty_dataset_scaled'
7 |
8 | # destination resolution
9 | dest_res = 2 ** 8
10 |
11 | for i, file in enumerate(os.listdir(path)):
12 |
13 | # open image using PIL to detect resolution.
14 | img = Image.open(os.path.join(path,file))
15 | width, height = img.size
16 |
17 | # pad image if necessary
18 | if width != height:
19 | # create a new black picture in size of (max(height,width), max(height,width))
20 | padded_size = (max(height,width), max(height,width))
21 | black_img = Image.new("RGB", padded_size) #
22 | # define origin to paste the image on the newly created image
23 | location_x = int((padded_size[0] - width) / 2)
24 | location_y = int((padded_size[1] - height) / 2)
25 | # paste the image
26 | black_img.paste(img, (location_x,location_y))
27 | img = black_img
28 |
29 | # resize image to destination resolution and save in dest folder
30 | img = img.resize((dest_res,dest_res),Image.ANTIALIAS)
31 | img.save(os.path.join(dest_path,file),quality=95)
32 |
33 | if i % 100 == 0:
34 | print("saved {}/{} images".format(i,len(os.listdir(path))))
35 |
--------------------------------------------------------------------------------