├── facenet
├── __init__.py
├── contributed
│ ├── __init__.py
│ ├── real_time_face_recognition.py
│ ├── batch_represent.py
│ └── face.py
├── src
│ ├── align
│ │ ├── __init__.py
│ │ ├── det1.npy
│ │ ├── det2.npy
│ │ └── det3.npy
│ ├── generative
│ │ ├── __init__.py
│ │ └── models
│ │ │ ├── __init__.py
│ │ │ ├── vae_base.py
│ │ │ ├── dfc_vae.py
│ │ │ ├── dfc_vae_large.py
│ │ │ └── dfc_vae_resnet.py
│ ├── __init__.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── dummy.py
│ │ └── squeezenet.py
│ ├── download_and_extract.py
│ ├── lfw.py
│ ├── decode_msceleb_dataset.py
│ ├── freeze_graph.py
│ ├── compare.py
│ └── calculate_filtering_metrics.py
├── tmp
│ ├── __init__.py
│ ├── test1.py
│ ├── pilatus800.jpg
│ ├── select_triplets_test.py
│ ├── dataset_read_speed.py
│ ├── cacd2000_split_identities.py
│ ├── rename_casia_directories.py
│ ├── test_align.py
│ ├── visualize_vggface.py
│ ├── invariance_test.txt
│ ├── mtcnn.py
│ ├── visualize_vgg_model.py
│ ├── vggverydeep19.py
│ ├── nn4_small2_v1.py
│ ├── random_test.py
│ ├── funnel_dataset.py
│ ├── nn4.py
│ ├── nn2.py
│ ├── nn3.py
│ ├── mtcnn_test.py
│ ├── vggface16.py
│ ├── mtcnn_test_pnet_dbg.py
│ ├── download_vgg_face_dataset.py
│ ├── visualize.py
│ └── seed_test.py
├── data
│ ├── images
│ │ ├── Anthony_Hopkins_0001.jpg
│ │ ├── Anthony_Hopkins_0002.jpg
│ │ └── test_aligned
│ │ │ └── revision_info.txt
│ ├── learning_rate_retrain_tripletloss.txt
│ ├── learning_rate_schedule_classifier_casia.txt
│ ├── learning_rate_schedule_classifier_msceleb.txt
│ └── learning_rate_schedule_classifier_vggface2.txt
├── requirements.txt
├── .project
├── .pydevproject
├── .travis.yml
├── LICENSE.md
├── .gitignore
├── test
│ ├── triplet_loss_test.py
│ ├── batch_norm_test.py
│ └── center_loss_test.py
└── README.md
├── Results_Images
├── Similarity_Matching
│ ├── README.md
│ ├── 1046.png
│ ├── 1057.png
│ ├── 1076.png
│ ├── 1086.png
│ ├── 1098.png
│ ├── 1108.png
│ ├── 844.png
│ ├── 844_t.png
│ ├── 848.png
│ ├── 848_t.png
│ ├── 849.png
│ ├── 849_t.png
│ ├── 854.png
│ ├── 854_t.png
│ ├── 856.png
│ ├── 856_t.png
│ ├── 857.png
│ ├── 857_t.png
│ ├── 940.png
│ ├── 940_t.png
│ ├── 948.png
│ ├── 948_t.png
│ ├── 952.png
│ ├── 952_t.png
│ ├── 996.png
│ ├── 996_t.png
│ ├── 1046_t.png
│ ├── 1057_t.png
│ ├── 1076_t.png
│ ├── 1086_t.png
│ ├── 1098_t.png
│ └── 1108_t.png
├── Face_recognition_given_dataset
│ ├── README.md
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── 1a.jpg
│ ├── 2a.jpg
│ ├── 119#_7e2ebcc1-d116-4622-9b87-518fe3c57c3f1262.jpg
│ ├── Vakilya#_76a6356b-604f-4592-9b90-bf66f709b6871257.jpg
│ ├── CR NO 132 #_a1ee9974-34ae-42f8-a914-05cb2f4c08971251.jpg
│ └── CR NO 134 #_8fd14b88-18a4-4621-abf4-02b1a8399d871251.jpg
├── Face_recognition_Web_scraped_dataset
│ ├── README.md
│ ├── bum.jpg
│ ├── ntr.jpg
│ ├── pr.jpg
│ ├── pr1.jpg
│ ├── rk.jpg
│ ├── rk1.jpg
│ ├── si.jpg
│ ├── si1.jpg
│ ├── .DS_Store
│ ├── bum1.jpg
│ ├── dhan.jpg
│ ├── dhan1.jpg
│ ├── dhoni.jpg
│ ├── kohli.jpg
│ ├── ntr1.jpg
│ ├── rohit.jpg
│ ├── dhoni1.jpg
│ ├── kohli1.jpg
│ ├── rohit,jpg.jpg
│ ├── rohit1.jpg.jpg
│ ├── mahesh-babu.jpg
│ └── mahesh-babu1.jpg
├── Face_recognition_different_hardware
│ ├── README.md
│ ├── test
│ │ ├── README.md
│ │ ├── chirag
│ │ │ ├── README.md
│ │ │ ├── 2.jpg
│ │ │ ├── 2019-11-17-102234.jpg
│ │ │ └── Photo on 17-11-19 at 10.03 AM #3.jpg
│ │ └── shubham
│ │ │ ├── README.md
│ │ │ ├── 1.jpg
│ │ │ └── 2019-11-17-091132.jpg
│ └── train
│ │ ├── README.md
│ │ ├── chirag
│ │ ├── README.md
│ │ └── 20191117_095752.jpg
│ │ └── shubham
│ │ ├── README.md
│ │ └── 20191117_090952.jpg
└── README.md
├── Best_Match
├── Best_match.xlsx
├── 12_3#_04ffa69e-a6fd-4e66-8acc-1b6d59e94a401241.png
├── 7_t01#_33e844f0-7990-4d1f-9414-bb717cb8d43f55.png
├── 0_t(3)#_77fa9c14-3f4b-46ba-987b-18755f5ebf0d1392.png
├── 11_t01#_c9601533-cac4-4113-9b5a-e8cbf46ae37e242.png
├── 126_t103#_d8a07334-ca27-4a57-a794-c00b3777d339242.png
├── 12_t01#_e6dcbdf3-9438-4cf1-b987-effa63a19f5c1107.png
├── 140_t109#_0519e591-a848-420d-acca-1c38abfaadb9242.png
├── 144_t11#_582ed4cb-7815-43d9-b2ec-a8b7feef79cf898.png
├── 421_119#_c2a6ac0a-0d1a-4829-8918-0c5b65f7016f1262.png
├── 49_160#_021c49d6-f877-4226-8438-86829ce364071247.png
├── 571_CR#_6efa8103-929c-4782-9ccf-693c5beab1b11405.png
├── 571_t27#_59ae09a0-5455-4bd5-8709-2f1ce260cbc1829.png
├── 576_16#_99747776-1530-483f-b3ff-93bb0b8905c2311.png
├── 96_Photo#_0a6dce98-dd56-4f10-85e7-c2000c92be10805.png
├── 0_Bhavana#_b74a8507-a919-444a-a449-d52cb97fb6af1043.png
├── 11_MISSING#_14c2d2fa-d2ab-4cbb-b71e-8209b5f86b93848.png
├── 123_t10222#_7281792c-aac8-4357-a39b-2a0b79a38956242.png
├── 144_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png
├── 15_108-8#_51e7ba1a-e27f-4793-bc71-9377a1a053d41249.png
├── 15_t01-19#_07884214-6dac-437b-b180-5a83f37be6241357.png
├── 16_t01-19#_89bbb83e-11f5-45b5-9b8f-289b779e24c51279.png
├── 1848_375#_91a655e9-c817-4cb7-91a9-ee008b98ec8f1445.png
├── 21_t02-18#_432f5efd-8fe9-4861-8e49-3bc625507eab1658.png
├── 24_Photo#_aa08bebc-fdf7-41b2-aafe-b946d0e28a2a1339.png
├── 24_t02-19#_5397d5f7-1b26-4d0b-8131-750e9f5047511342.png
├── 31_80-2018#_109e92ff-f3d8-4bc0-9dc4-7e9ff964991b306.png
├── 445_495-1#_a9263173-9955-40e4-8ce0-9d65953306d51350.png
├── 49_t05-19#_16fe71db-cbbe-453a-b7c8-f1ad92f89a8f1392.png
├── 576_t27-18#_45a25d12-97e0-4435-8aa1-2b0b6398fcd746.png
├── 747_t50-18#_fc3a0297-98b4-45f1-a485-e05932227b48798.png
├── 94_t1-18#_1033c10c-0500-4985-939e-bd69389e3cc81430.png
├── 140_SADAPPA#_d1ed4517-5d1f-48ae-9bf1-b37327ada1931255.png
├── 157_t11-2019#_1c1b27b0-71f9-490e-9af9-8a5f8436853f1642.png
├── 16_SHRIKANT#_ba648bb5-d4e7-4a68-a582-df7b7b18cab51243.png
├── 1804_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png
├── 1848_tUDR 14#_f252bd0e-9d44-4f66-93e9-0bb0a816c656901.png
├── 28_t02-2019#_abef75a2-3f6a-4fec-bac2-48d29faf3b97867.png
├── 31_t03-018#_87bf80d0-e382-40b1-abcd-7c789499e3b11364.png
├── 414_t20-19#_d6ba591f-b129-4f7e-a37f-851be1e417e21286.png
├── 627_IMG_0001#_7e175f5f-a2c6-4115-b0f8-a8f1c8594ca2347.png
├── 627_t34-18#_4b24f772-172c-49d0-b151-0e0c8f2758921422.png
├── 747_88-2018#_36c98580-165b-40c5-8c17-d4c5bd2a9d1a337.png
├── 7_234-2018 #_5fe4b5c0-cf9a-4317-bf33-f40554ad9d7b338.png
├── 96_t1-19-1#_ff85327b-8833-4413-ae98-0fff81c7ff8a1384.png
├── 1028_338-2018#_a510420d-55a9-4ba9-a205-5bd5dd4b0d492144.png
├── 123_Sabasteen #_fe544455-2046-4d21-93ab-e19bfc9807351411.png
├── 126_CR NO 471-#_38e502b5-5815-4c0b-b7f3-bacc8721fc761914.png
├── 157_Scan10005#_edbb2810-0fc8-404b-8410-984e425b481d2171.png
├── 1821_Srinivasa#_f7995090-cc94-4800-a30c-c5630ac18563351.png
├── 1857_IMG_0004#_b26565de-0197-45c9-b119-dfb83ed1ed791259.png
├── 21_CR No 81-2#_283c6998-2878-49c8-b3be-016433e368761103.png
├── 28_386-18 Mis#_50263c9f-6656-4f84-807d-da02e42165c41413.png
├── 414_A7-Ramanji#_8955b910-36c3-4dd9-9f94-0124ed65cdf9356.png
├── 421_t20180130_1#_857abf88-b505-4040-b91d-424c017d8f92276.png
├── 94_Hanamant K#_7bf2fa69-f76b-4ef6-8f20-a8449565cb341255.png
├── 1028_tIMG-201804#_b048a3f8-975b-44b7-8ac6-f68218fae7ec954.png
├── 1804_tUDR 01 pho#_9126ea1a-f179-485c-b7d8-246b7be247a11089.png
├── 1821_tUDR 06-201#_25bb1b73-1ad1-42d2-a4c6-02fbb39004481873.png
├── 1857_tUDR 15-201#_add97930-e47d-4729-bfa1-8bf254ddb2561432.png
└── 445_t20180520_0#_7a450b67-7eef-4241-9043-fa24152c003b1430.png
└── IPH-KSP-Deep_Learners-Face_Recognition.pptx
/facenet/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/facenet/contributed/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/facenet/src/align/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/facenet/src/generative/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/facenet/src/generative/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/facenet/src/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 |
--------------------------------------------------------------------------------
/facenet/tmp/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 |
--------------------------------------------------------------------------------
/facenet/tmp/test1.py:
--------------------------------------------------------------------------------
1 | print('Hello world')
2 |
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/facenet/src/models/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/train/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/chirag/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/shubham/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/train/chirag/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/train/shubham/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Best_Match/Best_match.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/Best_match.xlsx
--------------------------------------------------------------------------------
/facenet/src/align/det1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/src/align/det1.npy
--------------------------------------------------------------------------------
/facenet/src/align/det2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/src/align/det2.npy
--------------------------------------------------------------------------------
/facenet/src/align/det3.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/src/align/det3.npy
--------------------------------------------------------------------------------
/facenet/tmp/pilatus800.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/tmp/pilatus800.jpg
--------------------------------------------------------------------------------
/IPH-KSP-Deep_Learners-Face_Recognition.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/IPH-KSP-Deep_Learners-Face_Recognition.pptx
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1046.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1057.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1076.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1076.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1086.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1086.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1098.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1098.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1108.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1108.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/844.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/844.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/844_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/844_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/848.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/848.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/848_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/848_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/849.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/849.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/849_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/849_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/854.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/854.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/854_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/854_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/856.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/856.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/856_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/856_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/857.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/857.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/857_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/857_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/940.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/940.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/940_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/940_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/948.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/948.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/948_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/948_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/952.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/952.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/952_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/952_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/996.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/996.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/996_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/996_t.png
--------------------------------------------------------------------------------
/facenet/data/images/Anthony_Hopkins_0001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/data/images/Anthony_Hopkins_0001.jpg
--------------------------------------------------------------------------------
/facenet/data/images/Anthony_Hopkins_0002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/facenet/data/images/Anthony_Hopkins_0002.jpg
--------------------------------------------------------------------------------
/facenet/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow==1.7
2 | scipy
3 | scikit-learn
4 | opencv-python
5 | h5py
6 | matplotlib
7 | Pillow
8 | requests
9 | psutil
10 |
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1046_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1046_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1057_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1057_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1076_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1076_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1086_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1086_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1098_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1098_t.png
--------------------------------------------------------------------------------
/Results_Images/Similarity_Matching/1108_t.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Similarity_Matching/1108_t.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/2.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/1a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/1a.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/2a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/2a.jpg
--------------------------------------------------------------------------------
/facenet/data/learning_rate_retrain_tripletloss.txt:
--------------------------------------------------------------------------------
1 | # Learning rate schedule
2 | # Maps an epoch number to a learning rate
3 | 0: 0.1
4 | 300: 0.01
5 | 400: 0.001
6 | 1000: 0.0001
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/bum.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/bum.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/ntr.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/ntr.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/pr.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/pr.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/pr1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/pr1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/rk.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/rk.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/rk1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/rk1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/si.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/si.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/si1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/si1.jpg
--------------------------------------------------------------------------------
/facenet/data/learning_rate_schedule_classifier_casia.txt:
--------------------------------------------------------------------------------
1 | # Learning rate schedule
2 | # Maps an epoch number to a learning rate
3 | 0: 0.05
4 | 60: 0.005
5 | 80: 0.0005
6 | 91: -1
7 |
--------------------------------------------------------------------------------
/facenet/data/learning_rate_schedule_classifier_msceleb.txt:
--------------------------------------------------------------------------------
1 | # Learning rate schedule
2 | # Maps an epoch number to a learning rate
3 | 0: 0.1
4 | 150: 0.01
5 | 180: 0.001
6 | 251: 0.0001
--------------------------------------------------------------------------------
/facenet/data/learning_rate_schedule_classifier_vggface2.txt:
--------------------------------------------------------------------------------
1 | # Learning rate schedule
2 | # Maps an epoch number to a learning rate
3 | 0: 0.05
4 | 100: 0.005
5 | 200: 0.0005
6 | 276: -1
--------------------------------------------------------------------------------
/Best_Match/12_3#_04ffa69e-a6fd-4e66-8acc-1b6d59e94a401241.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/12_3#_04ffa69e-a6fd-4e66-8acc-1b6d59e94a401241.png
--------------------------------------------------------------------------------
/Best_Match/7_t01#_33e844f0-7990-4d1f-9414-bb717cb8d43f55.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/7_t01#_33e844f0-7990-4d1f-9414-bb717cb8d43f55.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/.DS_Store
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/bum1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/bum1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/dhan.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/dhan.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/dhan1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/dhan1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/dhoni.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/dhoni.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/kohli.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/kohli.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/ntr1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/ntr1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/rohit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/rohit.jpg
--------------------------------------------------------------------------------
/Best_Match/0_t(3)#_77fa9c14-3f4b-46ba-987b-18755f5ebf0d1392.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/0_t(3)#_77fa9c14-3f4b-46ba-987b-18755f5ebf0d1392.png
--------------------------------------------------------------------------------
/Best_Match/11_t01#_c9601533-cac4-4113-9b5a-e8cbf46ae37e242.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/11_t01#_c9601533-cac4-4113-9b5a-e8cbf46ae37e242.png
--------------------------------------------------------------------------------
/Best_Match/126_t103#_d8a07334-ca27-4a57-a794-c00b3777d339242.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/126_t103#_d8a07334-ca27-4a57-a794-c00b3777d339242.png
--------------------------------------------------------------------------------
/Best_Match/12_t01#_e6dcbdf3-9438-4cf1-b987-effa63a19f5c1107.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/12_t01#_e6dcbdf3-9438-4cf1-b987-effa63a19f5c1107.png
--------------------------------------------------------------------------------
/Best_Match/140_t109#_0519e591-a848-420d-acca-1c38abfaadb9242.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/140_t109#_0519e591-a848-420d-acca-1c38abfaadb9242.png
--------------------------------------------------------------------------------
/Best_Match/144_t11#_582ed4cb-7815-43d9-b2ec-a8b7feef79cf898.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/144_t11#_582ed4cb-7815-43d9-b2ec-a8b7feef79cf898.png
--------------------------------------------------------------------------------
/Best_Match/421_119#_c2a6ac0a-0d1a-4829-8918-0c5b65f7016f1262.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/421_119#_c2a6ac0a-0d1a-4829-8918-0c5b65f7016f1262.png
--------------------------------------------------------------------------------
/Best_Match/49_160#_021c49d6-f877-4226-8438-86829ce364071247.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/49_160#_021c49d6-f877-4226-8438-86829ce364071247.png
--------------------------------------------------------------------------------
/Best_Match/571_CR#_6efa8103-929c-4782-9ccf-693c5beab1b11405.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/571_CR#_6efa8103-929c-4782-9ccf-693c5beab1b11405.png
--------------------------------------------------------------------------------
/Best_Match/571_t27#_59ae09a0-5455-4bd5-8709-2f1ce260cbc1829.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/571_t27#_59ae09a0-5455-4bd5-8709-2f1ce260cbc1829.png
--------------------------------------------------------------------------------
/Best_Match/576_16#_99747776-1530-483f-b3ff-93bb0b8905c2311.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/576_16#_99747776-1530-483f-b3ff-93bb0b8905c2311.png
--------------------------------------------------------------------------------
/Best_Match/96_Photo#_0a6dce98-dd56-4f10-85e7-c2000c92be10805.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/96_Photo#_0a6dce98-dd56-4f10-85e7-c2000c92be10805.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/dhoni1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/dhoni1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/kohli1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/kohli1.jpg
--------------------------------------------------------------------------------
/Best_Match/0_Bhavana#_b74a8507-a919-444a-a449-d52cb97fb6af1043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/0_Bhavana#_b74a8507-a919-444a-a449-d52cb97fb6af1043.png
--------------------------------------------------------------------------------
/Best_Match/11_MISSING#_14c2d2fa-d2ab-4cbb-b71e-8209b5f86b93848.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/11_MISSING#_14c2d2fa-d2ab-4cbb-b71e-8209b5f86b93848.png
--------------------------------------------------------------------------------
/Best_Match/123_t10222#_7281792c-aac8-4357-a39b-2a0b79a38956242.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/123_t10222#_7281792c-aac8-4357-a39b-2a0b79a38956242.png
--------------------------------------------------------------------------------
/Best_Match/144_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/144_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png
--------------------------------------------------------------------------------
/Best_Match/15_108-8#_51e7ba1a-e27f-4793-bc71-9377a1a053d41249.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/15_108-8#_51e7ba1a-e27f-4793-bc71-9377a1a053d41249.png
--------------------------------------------------------------------------------
/Best_Match/15_t01-19#_07884214-6dac-437b-b180-5a83f37be6241357.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/15_t01-19#_07884214-6dac-437b-b180-5a83f37be6241357.png
--------------------------------------------------------------------------------
/Best_Match/16_t01-19#_89bbb83e-11f5-45b5-9b8f-289b779e24c51279.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/16_t01-19#_89bbb83e-11f5-45b5-9b8f-289b779e24c51279.png
--------------------------------------------------------------------------------
/Best_Match/1848_375#_91a655e9-c817-4cb7-91a9-ee008b98ec8f1445.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1848_375#_91a655e9-c817-4cb7-91a9-ee008b98ec8f1445.png
--------------------------------------------------------------------------------
/Best_Match/21_t02-18#_432f5efd-8fe9-4861-8e49-3bc625507eab1658.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/21_t02-18#_432f5efd-8fe9-4861-8e49-3bc625507eab1658.png
--------------------------------------------------------------------------------
/Best_Match/24_Photo#_aa08bebc-fdf7-41b2-aafe-b946d0e28a2a1339.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/24_Photo#_aa08bebc-fdf7-41b2-aafe-b946d0e28a2a1339.png
--------------------------------------------------------------------------------
/Best_Match/24_t02-19#_5397d5f7-1b26-4d0b-8131-750e9f5047511342.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/24_t02-19#_5397d5f7-1b26-4d0b-8131-750e9f5047511342.png
--------------------------------------------------------------------------------
/Best_Match/31_80-2018#_109e92ff-f3d8-4bc0-9dc4-7e9ff964991b306.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/31_80-2018#_109e92ff-f3d8-4bc0-9dc4-7e9ff964991b306.png
--------------------------------------------------------------------------------
/Best_Match/445_495-1#_a9263173-9955-40e4-8ce0-9d65953306d51350.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/445_495-1#_a9263173-9955-40e4-8ce0-9d65953306d51350.png
--------------------------------------------------------------------------------
/Best_Match/49_t05-19#_16fe71db-cbbe-453a-b7c8-f1ad92f89a8f1392.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/49_t05-19#_16fe71db-cbbe-453a-b7c8-f1ad92f89a8f1392.png
--------------------------------------------------------------------------------
/Best_Match/576_t27-18#_45a25d12-97e0-4435-8aa1-2b0b6398fcd746.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/576_t27-18#_45a25d12-97e0-4435-8aa1-2b0b6398fcd746.png
--------------------------------------------------------------------------------
/Best_Match/747_t50-18#_fc3a0297-98b4-45f1-a485-e05932227b48798.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/747_t50-18#_fc3a0297-98b4-45f1-a485-e05932227b48798.png
--------------------------------------------------------------------------------
/Best_Match/94_t1-18#_1033c10c-0500-4985-939e-bd69389e3cc81430.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/94_t1-18#_1033c10c-0500-4985-939e-bd69389e3cc81430.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/rohit,jpg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/rohit,jpg.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/rohit1.jpg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/rohit1.jpg.jpg
--------------------------------------------------------------------------------
/Best_Match/140_SADAPPA#_d1ed4517-5d1f-48ae-9bf1-b37327ada1931255.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/140_SADAPPA#_d1ed4517-5d1f-48ae-9bf1-b37327ada1931255.png
--------------------------------------------------------------------------------
/Best_Match/157_t11-2019#_1c1b27b0-71f9-490e-9af9-8a5f8436853f1642.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/157_t11-2019#_1c1b27b0-71f9-490e-9af9-8a5f8436853f1642.png
--------------------------------------------------------------------------------
/Best_Match/16_SHRIKANT#_ba648bb5-d4e7-4a68-a582-df7b7b18cab51243.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/16_SHRIKANT#_ba648bb5-d4e7-4a68-a582-df7b7b18cab51243.png
--------------------------------------------------------------------------------
/Best_Match/1804_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1804_108-6#_e715567b-5db2-4439-aba8-5252df12b2661249.png
--------------------------------------------------------------------------------
/Best_Match/1848_tUDR 14#_f252bd0e-9d44-4f66-93e9-0bb0a816c656901.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1848_tUDR 14#_f252bd0e-9d44-4f66-93e9-0bb0a816c656901.png
--------------------------------------------------------------------------------
/Best_Match/28_t02-2019#_abef75a2-3f6a-4fec-bac2-48d29faf3b97867.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/28_t02-2019#_abef75a2-3f6a-4fec-bac2-48d29faf3b97867.png
--------------------------------------------------------------------------------
/Best_Match/31_t03-018#_87bf80d0-e382-40b1-abcd-7c789499e3b11364.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/31_t03-018#_87bf80d0-e382-40b1-abcd-7c789499e3b11364.png
--------------------------------------------------------------------------------
/Best_Match/414_t20-19#_d6ba591f-b129-4f7e-a37f-851be1e417e21286.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/414_t20-19#_d6ba591f-b129-4f7e-a37f-851be1e417e21286.png
--------------------------------------------------------------------------------
/Best_Match/627_IMG_0001#_7e175f5f-a2c6-4115-b0f8-a8f1c8594ca2347.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/627_IMG_0001#_7e175f5f-a2c6-4115-b0f8-a8f1c8594ca2347.png
--------------------------------------------------------------------------------
/Best_Match/627_t34-18#_4b24f772-172c-49d0-b151-0e0c8f2758921422.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/627_t34-18#_4b24f772-172c-49d0-b151-0e0c8f2758921422.png
--------------------------------------------------------------------------------
/Best_Match/747_88-2018#_36c98580-165b-40c5-8c17-d4c5bd2a9d1a337.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/747_88-2018#_36c98580-165b-40c5-8c17-d4c5bd2a9d1a337.png
--------------------------------------------------------------------------------
/Best_Match/7_234-2018 #_5fe4b5c0-cf9a-4317-bf33-f40554ad9d7b338.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/7_234-2018 #_5fe4b5c0-cf9a-4317-bf33-f40554ad9d7b338.png
--------------------------------------------------------------------------------
/Best_Match/96_t1-19-1#_ff85327b-8833-4413-ae98-0fff81c7ff8a1384.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/96_t1-19-1#_ff85327b-8833-4413-ae98-0fff81c7ff8a1384.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/mahesh-babu.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/mahesh-babu.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_Web_scraped_dataset/mahesh-babu1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_Web_scraped_dataset/mahesh-babu1.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/chirag/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/test/chirag/2.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/shubham/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/test/shubham/1.jpg
--------------------------------------------------------------------------------
/Best_Match/1028_338-2018#_a510420d-55a9-4ba9-a205-5bd5dd4b0d492144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1028_338-2018#_a510420d-55a9-4ba9-a205-5bd5dd4b0d492144.png
--------------------------------------------------------------------------------
/Best_Match/123_Sabasteen #_fe544455-2046-4d21-93ab-e19bfc9807351411.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/123_Sabasteen #_fe544455-2046-4d21-93ab-e19bfc9807351411.png
--------------------------------------------------------------------------------
/Best_Match/126_CR NO 471-#_38e502b5-5815-4c0b-b7f3-bacc8721fc761914.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/126_CR NO 471-#_38e502b5-5815-4c0b-b7f3-bacc8721fc761914.png
--------------------------------------------------------------------------------
/Best_Match/157_Scan10005#_edbb2810-0fc8-404b-8410-984e425b481d2171.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/157_Scan10005#_edbb2810-0fc8-404b-8410-984e425b481d2171.png
--------------------------------------------------------------------------------
/Best_Match/1821_Srinivasa#_f7995090-cc94-4800-a30c-c5630ac18563351.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1821_Srinivasa#_f7995090-cc94-4800-a30c-c5630ac18563351.png
--------------------------------------------------------------------------------
/Best_Match/1857_IMG_0004#_b26565de-0197-45c9-b119-dfb83ed1ed791259.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1857_IMG_0004#_b26565de-0197-45c9-b119-dfb83ed1ed791259.png
--------------------------------------------------------------------------------
/Best_Match/21_CR No 81-2#_283c6998-2878-49c8-b3be-016433e368761103.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/21_CR No 81-2#_283c6998-2878-49c8-b3be-016433e368761103.png
--------------------------------------------------------------------------------
/Best_Match/28_386-18 Mis#_50263c9f-6656-4f84-807d-da02e42165c41413.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/28_386-18 Mis#_50263c9f-6656-4f84-807d-da02e42165c41413.png
--------------------------------------------------------------------------------
/Best_Match/414_A7-Ramanji#_8955b910-36c3-4dd9-9f94-0124ed65cdf9356.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/414_A7-Ramanji#_8955b910-36c3-4dd9-9f94-0124ed65cdf9356.png
--------------------------------------------------------------------------------
/Best_Match/421_t20180130_1#_857abf88-b505-4040-b91d-424c017d8f92276.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/421_t20180130_1#_857abf88-b505-4040-b91d-424c017d8f92276.png
--------------------------------------------------------------------------------
/Best_Match/94_Hanamant K#_7bf2fa69-f76b-4ef6-8f20-a8449565cb341255.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/94_Hanamant K#_7bf2fa69-f76b-4ef6-8f20-a8449565cb341255.png
--------------------------------------------------------------------------------
/Best_Match/1028_tIMG-201804#_b048a3f8-975b-44b7-8ac6-f68218fae7ec954.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1028_tIMG-201804#_b048a3f8-975b-44b7-8ac6-f68218fae7ec954.png
--------------------------------------------------------------------------------
/Best_Match/1804_tUDR 01 pho#_9126ea1a-f179-485c-b7d8-246b7be247a11089.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1804_tUDR 01 pho#_9126ea1a-f179-485c-b7d8-246b7be247a11089.png
--------------------------------------------------------------------------------
/Best_Match/1821_tUDR 06-201#_25bb1b73-1ad1-42d2-a4c6-02fbb39004481873.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1821_tUDR 06-201#_25bb1b73-1ad1-42d2-a4c6-02fbb39004481873.png
--------------------------------------------------------------------------------
/Best_Match/1857_tUDR 15-201#_add97930-e47d-4729-bfa1-8bf254ddb2561432.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/1857_tUDR 15-201#_add97930-e47d-4729-bfa1-8bf254ddb2561432.png
--------------------------------------------------------------------------------
/Best_Match/445_t20180520_0#_7a450b67-7eef-4241-9043-fa24152c003b1430.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Best_Match/445_t20180520_0#_7a450b67-7eef-4241-9043-fa24152c003b1430.png
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/chirag/2019-11-17-102234.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/test/chirag/2019-11-17-102234.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/train/chirag/20191117_095752.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/train/chirag/20191117_095752.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/train/shubham/20191117_090952.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/train/shubham/20191117_090952.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/shubham/2019-11-17-091132.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/test/shubham/2019-11-17-091132.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/119#_7e2ebcc1-d116-4622-9b87-518fe3c57c3f1262.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/119#_7e2ebcc1-d116-4622-9b87-518fe3c57c3f1262.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_different_hardware/test/chirag/Photo on 17-11-19 at 10.03 AM #3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_different_hardware/test/chirag/Photo on 17-11-19 at 10.03 AM #3.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/Vakilya#_76a6356b-604f-4592-9b90-bf66f709b6871257.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/Vakilya#_76a6356b-604f-4592-9b90-bf66f709b6871257.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/CR NO 132 #_a1ee9974-34ae-42f8-a914-05cb2f4c08971251.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/CR NO 132 #_a1ee9974-34ae-42f8-a914-05cb2f4c08971251.jpg
--------------------------------------------------------------------------------
/Results_Images/Face_recognition_given_dataset/CR NO 134 #_8fd14b88-18a4-4621-abf4-02b1a8399d871251.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Anil-matcha/KSP-IPH-2019-table30/master/Results_Images/Face_recognition_given_dataset/CR NO 134 #_8fd14b88-18a4-4621-abf4-02b1a8399d871251.jpg
--------------------------------------------------------------------------------
/Results_Images/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Similarity Matching
3 |
4 | It contains small set of images which are similar to each other, the reason we have not uploaded all the set is because of the size limitation in github
5 |
6 | ## Face Recognition Given dataset
7 |
8 | It contains the images that are recognized by the model as same person
9 |
--------------------------------------------------------------------------------
/facenet/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | facenet
4 |
5 |
6 |
7 |
8 |
9 | org.python.pydev.PyDevBuilder
10 |
11 |
12 |
13 |
14 |
15 | org.python.pydev.pythonNature
16 |
17 |
18 |
--------------------------------------------------------------------------------
/facenet/.pydevproject:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /${PROJECT_DIR_NAME}
5 | /${PROJECT_DIR_NAME}/src
6 |
7 | python 2.7
8 | Default
9 |
10 |
--------------------------------------------------------------------------------
/facenet/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | sudo: required
3 | python:
4 | - "2.7"
5 | - "3.5"
6 | # command to install dependencies
7 | install:
8 | # numpy not using wheel to avoid problem described in
9 | # https://github.com/tensorflow/tensorflow/issues/6968
10 | - pip install --no-binary numpy --upgrade numpy
11 | - pip install -r requirements.txt
12 | # command to run tests
13 | script:
14 | - export PYTHONPATH=./src:./src/models:./src/align
15 | - python -m unittest discover -s test --pattern=*.py 1>&2
16 | dist: trusty
17 |
18 |
--------------------------------------------------------------------------------
/facenet/tmp/select_triplets_test.py:
--------------------------------------------------------------------------------
1 | import facenet
2 | import numpy as np
3 | import tensorflow as tf
4 |
5 | FLAGS = tf.app.flags.FLAGS
6 |
7 | tf.app.flags.DEFINE_integer('people_per_batch', 45,
8 | """Number of people per batch.""")
9 | tf.app.flags.DEFINE_integer('alpha', 0.2,
10 | """Positive to negative triplet distance margin.""")
11 |
12 |
13 | embeddings = np.zeros((1800,128))
14 |
15 | np.random.seed(123)
16 | for ix in range(embeddings.shape[0]):
17 | for jx in range(embeddings.shape[1]):
18 | rnd = 1.0*np.random.randint(1,2**32)/2**32
19 | embeddings[ix][jx] = rnd
20 |
21 |
22 | emb_array = embeddings
23 | image_data = np.zeros((1800,96,96,3))
24 |
25 |
26 | num_per_class = [40 for i in range(45)]
27 |
28 |
29 | np.random.seed(123)
30 | apn, nrof_random_negs, nrof_triplets = facenet.select_triplets(emb_array, num_per_class, image_data)
31 |
--------------------------------------------------------------------------------
/facenet/tmp/dataset_read_speed.py:
--------------------------------------------------------------------------------
1 | import facenet
2 | import argparse
3 | import sys
4 | import time
5 | import numpy as np
6 |
7 | def main(args):
8 |
9 | dataset = facenet.get_dataset(args.dir)
10 | paths, _ = facenet.get_image_paths_and_labels(dataset)
11 | t = np.zeros((len(paths)))
12 | x = time.time()
13 | for i, path in enumerate(paths):
14 | start_time = time.time()
15 | with open(path, mode='rb') as f:
16 | _ = f.read()
17 | duration = time.time() - start_time
18 | t[i] = duration
19 | if i % 1000 == 0 or i==len(paths)-1:
20 | print('File %d/%d Total time: %.2f Avg: %.3f Std: %.3f' % (i, len(paths), time.time()-x, np.mean(t[0:i])*1000, np.std(t[0:i])*1000))
21 |
22 |
23 | def parse_arguments(argv):
24 | parser = argparse.ArgumentParser()
25 | parser.add_argument('dir', type=str,
26 | help='Directory with dataset to test')
27 | return parser.parse_args(argv)
28 |
29 |
30 | if __name__ == '__main__':
31 | main(parse_arguments(sys.argv[1:]))
32 |
--------------------------------------------------------------------------------
/facenet/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 David Sandberg
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/facenet/tmp/cacd2000_split_identities.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import argparse
3 | import os
4 | import sys
5 |
6 | def main(args):
7 | src_path_exp = os.path.expanduser(args.src_path)
8 | dst_path_exp = os.path.expanduser(args.dst_path)
9 | if not os.path.exists(dst_path_exp):
10 | os.makedirs(dst_path_exp)
11 | files = os.listdir(src_path_exp)
12 | for f in files:
13 | file_name = '.'.join(f.split('.')[0:-1])
14 | x = file_name.split('_')
15 | dir_name = '_'.join(x[1:-1])
16 | class_dst_path = os.path.join(dst_path_exp, dir_name)
17 | if not os.path.exists(class_dst_path):
18 | os.makedirs(class_dst_path)
19 | src_file_path = os.path.join(src_path_exp, f)
20 | dst_file = os.path.join(class_dst_path, f)
21 | print('%s -> %s' % (src_file_path, dst_file))
22 | shutil.copyfile(src_file_path, dst_file)
23 |
24 | def parse_arguments(argv):
25 | parser = argparse.ArgumentParser()
26 |
27 | parser.add_argument('src_path', type=str, help='Path to the source directory.')
28 | parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
29 | return parser.parse_args(argv)
30 |
31 | if __name__ == '__main__':
32 | main(parse_arguments(sys.argv[1:]))
33 |
--------------------------------------------------------------------------------
/facenet/.gitignore:
--------------------------------------------------------------------------------
1 | led / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *,cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # dotenv
80 | .env
81 |
82 | # virtualenv
83 | .venv
84 | venv/
85 | ENV/
86 |
87 | # Spyder project settings
88 | .spyderproject
89 |
90 | # Rope project settings
91 | .ropeproject
92 |
93 | # PyCharm project setting
94 | .idea
95 |
--------------------------------------------------------------------------------
/facenet/tmp/rename_casia_directories.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import argparse
3 | import os
4 | import sys
5 |
6 | def main(args):
7 |
8 | identity_map = {}
9 | with open(os.path.expanduser(args.map_file_name), "r") as f:
10 | for line in f:
11 | fields = line.split(' ')
12 | dir_name = fields[0]
13 | class_name = fields[1].replace('\n', '').replace('\r', '')
14 | if class_name not in identity_map.values():
15 | identity_map[dir_name] = class_name
16 | else:
17 | print('Duplicate class names: %s' % class_name)
18 |
19 | dataset_path_exp = os.path.expanduser(args.dataset_path)
20 | dirs = os.listdir(dataset_path_exp)
21 | for f in dirs:
22 | old_path = os.path.join(dataset_path_exp, f)
23 | if f in identity_map:
24 | new_path = os.path.join(dataset_path_exp, identity_map[f])
25 | if os.path.isdir(old_path):
26 | print('Renaming %s to %s' % (old_path, new_path))
27 | shutil.move(old_path, new_path)
28 |
29 | def parse_arguments(argv):
30 | parser = argparse.ArgumentParser()
31 |
32 | parser.add_argument('map_file_name', type=str, help='Name of the text file that contains the directory to class name mappings.')
33 | parser.add_argument('dataset_path', type=str, help='Path to the dataset directory.')
34 | return parser.parse_args(argv)
35 |
36 | if __name__ == '__main__':
37 | main(parse_arguments(sys.argv[1:]))
38 |
--------------------------------------------------------------------------------
/facenet/tmp/test_align.py:
--------------------------------------------------------------------------------
1 | import facenet
2 | import os
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 |
7 | def main():
8 | image_size = 96
9 | old_dataset = '/home/david/datasets/facescrub/fs_aligned_new_oean/'
10 | new_dataset = '/home/david/datasets/facescrub/facescrub_110_96/'
11 | eq = 0
12 | num = 0
13 | l = []
14 | dataset = facenet.get_dataset(old_dataset)
15 | for cls in dataset:
16 | new_class_dir = os.path.join(new_dataset, cls.name)
17 | for image_path in cls.image_paths:
18 | try:
19 | filename = os.path.splitext(os.path.split(image_path)[1])[0]
20 | new_filename = os.path.join(new_class_dir, filename+'.png')
21 | #print(image_path)
22 | if os.path.exists(new_filename):
23 | a = facenet.load_data([image_path, new_filename], False, False, image_size, do_prewhiten=False)
24 | if np.array_equal(a[0], a[1]):
25 | eq+=1
26 | num+=1
27 | err = np.sum(np.square(np.subtract(a[0], a[1])))
28 | #print(err)
29 | l.append(err)
30 | if err>2000:
31 | fig = plt.figure(1)
32 | p1 = fig.add_subplot(121)
33 | p1.imshow(a[0])
34 | p2 = fig.add_subplot(122)
35 | p2.imshow(a[1])
36 | print('%6.1f: %s\n' % (err, new_filename))
37 | pass
38 | else:
39 | pass
40 | #print('File not found: %s' % new_filename)
41 | except:
42 | pass
43 | if __name__ == '__main__':
44 | main()
45 |
--------------------------------------------------------------------------------
/facenet/data/images/test_aligned/revision_info.txt:
--------------------------------------------------------------------------------
1 | arguments: facenet/src/align/align_dataset_mtcnn.py facenet/data/images/test_raw facenet/data/images/test_aligned --image_size 160
2 | --------------------
3 | tensorflow version: 1.7.0
4 | --------------------
5 | git hash: b'096ed770f163957c1e56efa7feeb194773920f6e'
6 | --------------------
7 | b"diff --git a/src/align/align_dataset_mtcnn.py b/src/align/align_dataset_mtcnn.py\nindex 7d5e735..887ad9f 100644\n--- a/src/align/align_dataset_mtcnn.py\n+++ b/src/align/align_dataset_mtcnn.py\n@@ -32,7 +32,7 @@ import argparse\n import tensorflow as tf\n import numpy as np\n import facenet\n-import align.detect_face\n+import detect_face\n import random\n from time import sleep\n \n@@ -52,7 +52,7 @@ def main(args):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n- pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n+ pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n \n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n@@ -93,7 +93,7 @@ def main(args):\n img = facenet.to_rgb(img)\n img = img[:,:,0:3]\n \n- bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n+ bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n nrof_faces = bounding_boxes.shape[0]\n if nrof_faces>0:\n det = bounding_boxes[:,0:4]"
--------------------------------------------------------------------------------
/facenet/tmp/visualize_vggface.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | import matplotlib.pyplot as plt
4 | import tmp.vggface16
5 |
6 | def main():
7 |
8 | sess = tf.Session()
9 |
10 | t_input = tf.placeholder(np.float32, name='input') # define the input tensor
11 | image_mean = 117.0
12 | t_preprocessed = tf.expand_dims(t_input-image_mean, 0)
13 |
14 | # Build the inference graph
15 | nodes = tmp.vggface16.load('data/vgg_face.mat', t_preprocessed)
16 |
17 | img_noise = np.random.uniform(size=(224,224,3)) + 117.0
18 |
19 | # Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
20 | # to have non-zero gradients for features with negative initial activations.
21 | layer = 'conv5_3'
22 | channel = 140 # picking some feature channel to visualize
23 | img = render_naive(sess, t_input, nodes[layer][:,:,:,channel], img_noise)
24 | showarray(img)
25 |
26 | def showarray(a):
27 | a = np.uint8(np.clip(a, 0, 1)*255)
28 | plt.imshow(a)
29 | plt.show()
30 |
31 | def visstd(a, s=0.1):
32 | '''Normalize the image range for visualization'''
33 | return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
34 |
35 | def render_naive(sess, t_input, t_obj, img0, iter_n=20, step=1.0):
36 | t_score = tf.reduce_mean(t_obj) # defining the optimization objective
37 | t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
38 |
39 | img = img0.copy()
40 | for _ in range(iter_n):
41 | g, _ = sess.run([t_grad, t_score], {t_input:img})
42 | # normalizing the gradient, so the same step size should work
43 | g /= g.std()+1e-8 # for different layers and networks
44 | img += g*step
45 | return visstd(img)
46 |
47 |
48 | if __name__ == '__main__':
49 | main()
50 |
--------------------------------------------------------------------------------
/facenet/src/download_and_extract.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import zipfile
3 | import os
4 |
5 | model_dict = {
6 | 'lfw-subset': '1B5BQUZuJO-paxdN8UclxeHAR1WnR_Tzi',
7 | '20170131-234652': '0B5MzpY9kBtDVSGM0RmVET2EwVEk',
8 | '20170216-091149': '0B5MzpY9kBtDVTGZjcWkzT3pldDA',
9 | '20170512-110547': '0B5MzpY9kBtDVZ2RpVDYwWmxoSUk',
10 | '20180402-114759': '1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-'
11 | }
12 |
13 | def download_and_extract_file(model_name, data_dir):
14 | file_id = model_dict[model_name]
15 | destination = os.path.join(data_dir, model_name + '.zip')
16 | if not os.path.exists(destination):
17 | print('Downloading file to %s' % destination)
18 | download_file_from_google_drive(file_id, destination)
19 | with zipfile.ZipFile(destination, 'r') as zip_ref:
20 | print('Extracting file to %s' % data_dir)
21 | zip_ref.extractall(data_dir)
22 |
23 | def download_file_from_google_drive(file_id, destination):
24 |
25 | URL = "https://drive.google.com/uc?export=download"
26 |
27 | session = requests.Session()
28 |
29 | response = session.get(URL, params = { 'id' : file_id }, stream = True)
30 | token = get_confirm_token(response)
31 |
32 | if token:
33 | params = { 'id' : file_id, 'confirm' : token }
34 | response = session.get(URL, params = params, stream = True)
35 |
36 | save_response_content(response, destination)
37 |
38 | def get_confirm_token(response):
39 | for key, value in response.cookies.items():
40 | if key.startswith('download_warning'):
41 | return value
42 |
43 | return None
44 |
45 | def save_response_content(response, destination):
46 | CHUNK_SIZE = 32768
47 |
48 | with open(destination, "wb") as f:
49 | for chunk in response.iter_content(CHUNK_SIZE):
50 | if chunk: # filter out keep-alive new chunks
51 | f.write(chunk)
52 |
--------------------------------------------------------------------------------
/facenet/tmp/invariance_test.txt:
--------------------------------------------------------------------------------
1 | Accuracy: 0.860±0.009
2 | Accuracy: 0.861±0.008
3 | Accuracy: 0.870±0.011
4 | Accuracy: 0.885±0.012
5 | Accuracy: 0.896±0.013
6 | Accuracy: 0.899±0.015
7 | Accuracy: 0.887±0.011
8 | Accuracy: 0.885±0.011
9 | Accuracy: 0.890±0.011
10 | Accuracy: 0.910±0.014
11 | Accuracy: 0.918±0.012
12 | Accuracy: 0.904±0.013
13 | Accuracy: 0.895±0.012
14 | Accuracy: 0.884±0.018
15 | Accuracy: 0.891±0.012
16 | Accuracy: 0.891±0.008
17 | Accuracy: 0.889±0.009
18 | Accuracy: 0.871±0.012
19 | Accuracy: 0.844±0.012
20 | Accuracy: 0.835±0.016
21 | Accuracy: 0.823±0.015
22 | Hoffset: Accuracy:
23 | -30.0000 0.8600
24 | -27.0000 0.8607
25 | -24.0000 0.8697
26 | -21.0000 0.8848
27 | -18.0000 0.8963
28 | -15.0000 0.8992
29 | -12.0000 0.8865
30 | -9.0000 0.8853
31 | -6.0000 0.8900
32 | -3.0000 0.9097
33 | 0.0000 0.9182
34 | 3.0000 0.9040
35 | 6.0000 0.8953
36 | 9.0000 0.8843
37 | 12.0000 0.8905
38 | 15.0000 0.8913
39 | 18.0000 0.8888
40 | 21.0000 0.8708
41 | 24.0000 0.8440
42 | 27.0000 0.8348
43 | 30.0000 0.8233
44 | Accuracy: 0.823±0.014
45 | Accuracy: 0.800±0.010
46 | Accuracy: 0.800±0.015
47 | Accuracy: 0.818±0.018
48 | Accuracy: 0.852±0.012
49 | Accuracy: 0.864±0.011
50 | Accuracy: 0.844±0.016
51 | Accuracy: 0.851±0.014
52 | Accuracy: 0.875±0.012
53 | Accuracy: 0.898±0.010
54 | Accuracy: 0.918±0.012
55 | Accuracy: 0.886±0.015
56 | Accuracy: 0.849±0.012
57 | Accuracy: 0.812±0.015
58 | Accuracy: 0.780±0.012
59 | Accuracy: 0.787±0.012
60 | Accuracy: 0.755±0.016
61 | Accuracy: 0.709±0.010
62 | Accuracy: 0.676±0.017
63 | Accuracy: 0.653±0.011
64 | Accuracy: 0.648±0.016
65 | Voffset: Accuracy:
66 | -30.0000 0.8230
67 | -27.0000 0.7997
68 | -24.0000 0.7995
69 | -21.0000 0.8183
70 | -18.0000 0.8523
71 | -15.0000 0.8638
72 | -12.0000 0.8442
73 | -9.0000 0.8507
74 | -6.0000 0.8755
75 | -3.0000 0.8982
76 | 0.0000 0.9182
77 | 3.0000 0.8862
78 | 6.0000 0.8493
79 | 9.0000 0.8118
80 | 12.0000 0.7803
81 | 15.0000 0.7868
82 | 18.0000 0.7548
83 | 21.0000 0.7093
84 | 24.0000 0.6763
85 | 27.0000 0.6533
86 | 30.0000 0.6483
87 |
--------------------------------------------------------------------------------
/facenet/src/generative/models/vae_base.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2017 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | """Base class for variational autoencoders containing an encoder and a decoder
24 | """
25 |
26 | from __future__ import absolute_import
27 | from __future__ import division
28 | from __future__ import print_function
29 |
30 | import tensorflow as tf
31 |
32 | class Vae(object):
33 |
34 | def __init__(self, latent_variable_dim, image_size):
35 | self.latent_variable_dim = latent_variable_dim
36 | self.image_size = image_size
37 | self.batch_norm_params = {
38 | # Decay for the moving averages.
39 | 'decay': 0.995,
40 | # epsilon to prevent 0s in variance.
41 | 'epsilon': 0.001,
42 | # force in-place updates of mean and variance estimates
43 | 'updates_collections': None,
44 | # Moving averages ends up in the trainable variables collection
45 | 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
46 | }
47 |
48 | def encoder(self, images, is_training):
49 | # Must be overridden in implementation classes
50 | raise NotImplementedError
51 |
52 | def decoder(self, latent_var, is_training):
53 | # Must be overridden in implementation classes
54 | raise NotImplementedError
55 |
56 | def get_image_size(self):
57 | return self.image_size
58 |
--------------------------------------------------------------------------------
/facenet/test/triplet_loss_test.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | import unittest
24 | import tensorflow as tf
25 | import numpy as np
26 | import facenet
27 |
28 | class DemuxEmbeddingsTest(unittest.TestCase):
29 |
30 | def testDemuxEmbeddings(self):
31 | batch_size = 3*12
32 | embedding_size = 16
33 | alpha = 0.2
34 |
35 | with tf.Graph().as_default():
36 |
37 | embeddings = tf.placeholder(tf.float64, shape=(batch_size, embedding_size), name='embeddings')
38 | anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1,3,embedding_size]), 3, 1)
39 | triplet_loss = facenet.triplet_loss(anchor, positive, negative, alpha)
40 |
41 | sess = tf.Session()
42 | with sess.as_default():
43 | np.random.seed(seed=666)
44 | emb = np.random.uniform(size=(batch_size, embedding_size))
45 | tf_triplet_loss = sess.run(triplet_loss, feed_dict={embeddings:emb})
46 |
47 | pos_dist_sqr = np.sum(np.square(emb[0::3,:]-emb[1::3,:]),1)
48 | neg_dist_sqr = np.sum(np.square(emb[0::3,:]-emb[2::3,:]),1)
49 | np_triplet_loss = np.mean(np.maximum(0.0, pos_dist_sqr - neg_dist_sqr + alpha))
50 |
51 | np.testing.assert_almost_equal(tf_triplet_loss, np_triplet_loss, decimal=5, err_msg='Triplet loss is incorrect')
52 |
53 | if __name__ == "__main__":
54 | unittest.main()
55 |
--------------------------------------------------------------------------------
/facenet/test/batch_norm_test.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | import unittest
24 | import tensorflow as tf
25 | import models
26 | import numpy as np
27 | import numpy.testing as testing
28 |
29 | class BatchNormTest(unittest.TestCase):
30 |
31 |
32 | @unittest.skip("Skip batch norm test case")
33 | def testBatchNorm(self):
34 |
35 | tf.set_random_seed(123)
36 |
37 | x = tf.placeholder(tf.float32, [None, 20, 20, 10], name='input')
38 | phase_train = tf.placeholder(tf.bool, name='phase_train')
39 |
40 | # generate random noise to pass into batch norm
41 | #x_gen = tf.random_normal([50,20,20,10])
42 |
43 | bn = models.network.batch_norm(x, phase_train)
44 |
45 | init = tf.global_variables_initializer()
46 | sess = tf.Session(config=tf.ConfigProto())
47 | sess.run(init)
48 |
49 | with sess.as_default():
50 |
51 | #generate a constant variable to pass into batch norm
52 | y = np.random.normal(0, 1, size=(50,20,20,10))
53 |
54 | feed_dict = {x: y, phase_train: True}
55 | sess.run(bn, feed_dict=feed_dict)
56 |
57 | feed_dict = {x: y, phase_train: False}
58 | y1 = sess.run(bn, feed_dict=feed_dict)
59 | y2 = sess.run(bn, feed_dict=feed_dict)
60 |
61 | testing.assert_almost_equal(y1, y2, 10, 'Output from two forward passes with phase_train==false should be equal')
62 |
63 |
64 | if __name__ == "__main__":
65 | unittest.main()
66 |
--------------------------------------------------------------------------------
/facenet/src/models/dummy.py:
--------------------------------------------------------------------------------
1 | """Dummy model used only for testing
2 | """
3 | # MIT License
4 | #
5 | # Copyright (c) 2016 David Sandberg
6 | #
7 | # Permission is hereby granted, free of charge, to any person obtaining a copy
8 | # of this software and associated documentation files (the "Software"), to deal
9 | # in the Software without restriction, including without limitation the rights
10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | # copies of the Software, and to permit persons to whom the Software is
12 | # furnished to do so, subject to the following conditions:
13 | #
14 | # The above copyright notice and this permission notice shall be included in all
15 | # copies or substantial portions of the Software.
16 | #
17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | # SOFTWARE.
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | import tensorflow as tf
30 | import tensorflow.contrib.slim as slim
31 | import numpy as np
32 |
33 | def inference(images, keep_probability, phase_train=True, # @UnusedVariable
34 | bottleneck_layer_size=128, bottleneck_layer_activation=None, weight_decay=0.0, reuse=None): # @UnusedVariable
35 | batch_norm_params = {
36 | # Decay for the moving averages.
37 | 'decay': 0.995,
38 | # epsilon to prevent 0s in variance.
39 | 'epsilon': 0.001,
40 | # force in-place updates of mean and variance estimates
41 | 'updates_collections': None,
42 | # Moving averages ends up in the trainable variables collection
43 | 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
44 | }
45 |
46 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
47 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
48 | weights_regularizer=slim.l2_regularizer(weight_decay),
49 | normalizer_fn=slim.batch_norm,
50 | normalizer_params=batch_norm_params):
51 | size = np.prod(images.get_shape()[1:].as_list())
52 | net = slim.fully_connected(tf.reshape(images, (-1,size)), bottleneck_layer_size, activation_fn=None,
53 | scope='Bottleneck', reuse=False)
54 | return net, None
55 |
--------------------------------------------------------------------------------
/facenet/tmp/mtcnn.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | from __future__ import absolute_import
24 | from __future__ import division
25 | from __future__ import print_function
26 |
27 | import tensorflow as tf
28 | import align.detect_face
29 | from scipy import misc
30 |
31 | with tf.Graph().as_default():
32 |
33 | sess = tf.Session()
34 | with sess.as_default():
35 | with tf.variable_scope('pnet'):
36 | data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
37 | pnet = align.detect_face.PNet({'data':data})
38 | pnet.load('../../data/det1.npy', sess)
39 | with tf.variable_scope('rnet'):
40 | data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
41 | rnet = align.detect_face.RNet({'data':data})
42 | rnet.load('../../data/det2.npy', sess)
43 | with tf.variable_scope('onet'):
44 | data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
45 | onet = align.detect_face.ONet({'data':data})
46 | onet.load('../../data/det3.npy', sess)
47 |
48 | pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
49 | rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
50 | onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
51 |
52 | minsize = 20 # minimum size of face
53 | threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
54 | factor = 0.709 # scale factor
55 |
56 | source_path = '/home/david/datasets/casia/CASIA-maxpy-clean/0000045/002.jpg'
57 | img = misc.imread(source_path)
58 |
59 | bounding_boxes, points = align.detect_face.detect_face(img, minsize, pnet_fun, rnet_fun, onet_fun, threshold, factor)
60 |
61 | print('Bounding box: %s' % bounding_boxes)
62 |
63 |
64 |
--------------------------------------------------------------------------------
/facenet/src/models/squeezenet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | import tensorflow.contrib.slim as slim
7 |
8 | def fire_module(inputs,
9 | squeeze_depth,
10 | expand_depth,
11 | reuse=None,
12 | scope=None,
13 | outputs_collections=None):
14 | with tf.variable_scope(scope, 'fire', [inputs], reuse=reuse):
15 | with slim.arg_scope([slim.conv2d, slim.max_pool2d],
16 | outputs_collections=None):
17 | net = squeeze(inputs, squeeze_depth)
18 | outputs = expand(net, expand_depth)
19 | return outputs
20 |
21 | def squeeze(inputs, num_outputs):
22 | return slim.conv2d(inputs, num_outputs, [1, 1], stride=1, scope='squeeze')
23 |
24 | def expand(inputs, num_outputs):
25 | with tf.variable_scope('expand'):
26 | e1x1 = slim.conv2d(inputs, num_outputs, [1, 1], stride=1, scope='1x1')
27 | e3x3 = slim.conv2d(inputs, num_outputs, [3, 3], scope='3x3')
28 | return tf.concat([e1x1, e3x3], 3)
29 |
30 | def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
31 | batch_norm_params = {
32 | # Decay for the moving averages.
33 | 'decay': 0.995,
34 | # epsilon to prevent 0s in variance.
35 | 'epsilon': 0.001,
36 | # force in-place updates of mean and variance estimates
37 | 'updates_collections': None,
38 | # Moving averages ends up in the trainable variables collection
39 | 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
40 | }
41 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
42 | weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
43 | weights_regularizer=slim.l2_regularizer(weight_decay),
44 | normalizer_fn=slim.batch_norm,
45 | normalizer_params=batch_norm_params):
46 | with tf.variable_scope('squeezenet', [images], reuse=reuse):
47 | with slim.arg_scope([slim.batch_norm, slim.dropout],
48 | is_training=phase_train):
49 | net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1')
50 | net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
51 | net = fire_module(net, 16, 64, scope='fire2')
52 | net = fire_module(net, 16, 64, scope='fire3')
53 | net = fire_module(net, 32, 128, scope='fire4')
54 | net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4')
55 | net = fire_module(net, 32, 128, scope='fire5')
56 | net = fire_module(net, 48, 192, scope='fire6')
57 | net = fire_module(net, 48, 192, scope='fire7')
58 | net = fire_module(net, 64, 256, scope='fire8')
59 | net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
60 | net = fire_module(net, 64, 256, scope='fire9')
61 | net = slim.dropout(net, keep_probability)
62 | net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10')
63 | net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10')
64 | net = tf.squeeze(net, [1, 2], name='logits')
65 | net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
66 | scope='Bottleneck', reuse=False)
67 | return net, None
68 |
--------------------------------------------------------------------------------
/facenet/src/lfw.py:
--------------------------------------------------------------------------------
1 | """Helper for evaluation on the Labeled Faces in the Wild dataset
2 | """
3 |
4 | # MIT License
5 | #
6 | # Copyright (c) 2016 David Sandberg
7 | #
8 | # Permission is hereby granted, free of charge, to any person obtaining a copy
9 | # of this software and associated documentation files (the "Software"), to deal
10 | # in the Software without restriction, including without limitation the rights
11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | # copies of the Software, and to permit persons to whom the Software is
13 | # furnished to do so, subject to the following conditions:
14 | #
15 | # The above copyright notice and this permission notice shall be included in all
16 | # copies or substantial portions of the Software.
17 | #
18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | # SOFTWARE.
25 |
26 | from __future__ import absolute_import
27 | from __future__ import division
28 | from __future__ import print_function
29 |
30 | import os
31 | import numpy as np
32 | import facenet
33 |
34 | def evaluate(embeddings, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
35 | # Calculate evaluation metrics
36 | thresholds = np.arange(0, 4, 0.01)
37 | embeddings1 = embeddings[0::2]
38 | embeddings2 = embeddings[1::2]
39 | tpr, fpr, accuracy = facenet.calculate_roc(thresholds, embeddings1, embeddings2,
40 | np.asarray(actual_issame), nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
41 | thresholds = np.arange(0, 4, 0.001)
42 | val, val_std, far = facenet.calculate_val(thresholds, embeddings1, embeddings2,
43 | np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
44 | return tpr, fpr, accuracy, val, val_std, far
45 |
46 | def get_paths(lfw_dir, pairs):
47 | nrof_skipped_pairs = 0
48 | path_list = []
49 | issame_list = []
50 | for pair in pairs:
51 | if len(pair) == 3:
52 | path0 = add_extension(os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[1])))
53 | path1 = add_extension(os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[2])))
54 | issame = True
55 | elif len(pair) == 4:
56 | path0 = add_extension(os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[1])))
57 | path1 = add_extension(os.path.join(lfw_dir, pair[2], pair[2] + '_' + '%04d' % int(pair[3])))
58 | issame = False
59 | if os.path.exists(path0) and os.path.exists(path1): # Only add the pair if both paths exist
60 | path_list += (path0,path1)
61 | issame_list.append(issame)
62 | else:
63 | nrof_skipped_pairs += 1
64 | if nrof_skipped_pairs>0:
65 | print('Skipped %d image pairs' % nrof_skipped_pairs)
66 |
67 | return path_list, issame_list
68 |
69 | def add_extension(path):
70 | if os.path.exists(path+'.jpg'):
71 | return path+'.jpg'
72 | elif os.path.exists(path+'.png'):
73 | return path+'.png'
74 | else:
75 | raise RuntimeError('No file "%s" with extension png or jpg.' % path)
76 |
77 | def read_pairs(pairs_filename):
78 | pairs = []
79 | with open(pairs_filename, 'r') as f:
80 | for line in f.readlines()[1:]:
81 | pair = line.strip().split()
82 | pairs.append(pair)
83 | return np.array(pairs)
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/facenet/contributed/real_time_face_recognition.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """Performs face detection in realtime.
3 |
4 | Based on code from https://github.com/shanren7/real_time_face_recognition
5 | """
6 | # MIT License
7 | #
8 | # Copyright (c) 2017 François Gervais
9 | #
10 | # Permission is hereby granted, free of charge, to any person obtaining a copy
11 | # of this software and associated documentation files (the "Software"), to deal
12 | # in the Software without restriction, including without limitation the rights
13 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 | # copies of the Software, and to permit persons to whom the Software is
15 | # furnished to do so, subject to the following conditions:
16 | #
17 | # The above copyright notice and this permission notice shall be included in all
18 | # copies or substantial portions of the Software.
19 | #
20 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 | # SOFTWARE.
27 | import argparse
28 | import sys
29 | import time
30 |
31 | import cv2
32 |
33 | import face
34 |
35 |
36 | def add_overlays(frame, faces, frame_rate):
37 | if faces is not None:
38 | for face in faces:
39 | face_bb = face.bounding_box.astype(int)
40 | cv2.rectangle(frame,
41 | (face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
42 | (0, 255, 0), 2)
43 | if face.name is not None:
44 | cv2.putText(frame, face.name, (face_bb[0], face_bb[3]),
45 | cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
46 | thickness=2, lineType=2)
47 |
48 | cv2.putText(frame, str(frame_rate) + " fps", (10, 30),
49 | cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
50 | thickness=2, lineType=2)
51 |
52 |
53 | def main(args):
54 | frame_interval = 3 # Number of frames after which to run face detection
55 | fps_display_interval = 5 # seconds
56 | frame_rate = 0
57 | frame_count = 0
58 |
59 | video_capture = cv2.VideoCapture(0)
60 | face_recognition = face.Recognition()
61 | start_time = time.time()
62 |
63 | if args.debug:
64 | print("Debug enabled")
65 | face.debug = True
66 |
67 | while True:
68 | # Capture frame-by-frame
69 | ret, frame = video_capture.read()
70 |
71 | if (frame_count % frame_interval) == 0:
72 | faces = face_recognition.identify(frame)
73 |
74 | # Check our current fps
75 | end_time = time.time()
76 | if (end_time - start_time) > fps_display_interval:
77 | frame_rate = int(frame_count / (end_time - start_time))
78 | start_time = time.time()
79 | frame_count = 0
80 |
81 | add_overlays(frame, faces, frame_rate)
82 |
83 | frame_count += 1
84 | cv2.imshow('Video', frame)
85 |
86 | if cv2.waitKey(1) & 0xFF == ord('q'):
87 | break
88 |
89 | # When everything is done, release the capture
90 | video_capture.release()
91 | cv2.destroyAllWindows()
92 |
93 |
94 | def parse_arguments(argv):
95 | parser = argparse.ArgumentParser()
96 |
97 | parser.add_argument('--debug', action='store_true',
98 | help='Enable some debug outputs.')
99 | return parser.parse_args(argv)
100 |
101 |
102 | if __name__ == '__main__':
103 | main(parse_arguments(sys.argv[1:]))
104 |
--------------------------------------------------------------------------------
/facenet/src/decode_msceleb_dataset.py:
--------------------------------------------------------------------------------
1 | """Decode the MsCelebV1 dataset in TSV (tab separated values) format downloaded from
2 | https://www.microsoft.com/en-us/research/project/ms-celeb-1m-challenge-recognizing-one-million-celebrities-real-world/
3 | """
4 | # MIT License
5 | #
6 | # Copyright (c) 2016 David Sandberg
7 | #
8 | # Permission is hereby granted, free of charge, to any person obtaining a copy
9 | # of this software and associated documentation files (the "Software"), to deal
10 | # in the Software without restriction, including without limitation the rights
11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | # copies of the Software, and to permit persons to whom the Software is
13 | # furnished to do so, subject to the following conditions:
14 | #
15 | # The above copyright notice and this permission notice shall be included in all
16 | # copies or substantial portions of the Software.
17 | #
18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | # SOFTWARE.
25 |
26 | from __future__ import absolute_import
27 | from __future__ import division
28 | from __future__ import print_function
29 |
30 | from scipy import misc
31 | import numpy as np
32 | import base64
33 | import sys
34 | import os
35 | import cv2
36 | import argparse
37 | import facenet
38 |
39 |
40 | # File format: text files, each line is an image record containing 6 columns, delimited by TAB.
41 | # Column1: Freebase MID
42 | # Column2: Query/Name
43 | # Column3: ImageSearchRank
44 | # Column4: ImageURL
45 | # Column5: PageURL
46 | # Column6: ImageData_Base64Encoded
47 |
48 | def main(args):
49 | output_dir = os.path.expanduser(args.output_dir)
50 |
51 | if not os.path.exists(output_dir):
52 | os.mkdir(output_dir)
53 |
54 | # Store some git revision info in a text file in the output directory
55 | src_path,_ = os.path.split(os.path.realpath(__file__))
56 | facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
57 |
58 | i = 0
59 | for f in args.tsv_files:
60 | for line in f:
61 | fields = line.split('\t')
62 | class_dir = fields[0]
63 | img_name = fields[1] + '-' + fields[4] + '.' + args.output_format
64 | img_string = fields[5]
65 | img_dec_string = base64.b64decode(img_string)
66 | img_data = np.fromstring(img_dec_string, dtype=np.uint8)
67 | img = cv2.imdecode(img_data, cv2.IMREAD_COLOR) #pylint: disable=maybe-no-member
68 | if args.size:
69 | img = misc.imresize(img, (args.size, args.size), interp='bilinear')
70 | full_class_dir = os.path.join(output_dir, class_dir)
71 | if not os.path.exists(full_class_dir):
72 | os.mkdir(full_class_dir)
73 | full_path = os.path.join(full_class_dir, img_name.replace('/','_'))
74 | cv2.imwrite(full_path, img) #pylint: disable=maybe-no-member
75 | print('%8d: %s' % (i, full_path))
76 | i += 1
77 |
78 | if __name__ == '__main__':
79 | parser = argparse.ArgumentParser()
80 |
81 | parser.add_argument('output_dir', type=str, help='Output base directory for the image dataset')
82 | parser.add_argument('tsv_files', type=argparse.FileType('r'), nargs='+', help='Input TSV file name(s)')
83 | parser.add_argument('--size', type=int, help='Images are resized to the given size')
84 | parser.add_argument('--output_format', type=str, help='Format of the output images', default='png', choices=['png', 'jpg'])
85 |
86 | main(parser.parse_args())
87 |
88 |
--------------------------------------------------------------------------------
/facenet/test/center_loss_test.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | import unittest
24 | import tensorflow as tf
25 | import numpy as np
26 | import facenet
27 |
28 | class CenterLossTest(unittest.TestCase):
29 |
30 |
31 |
32 | def testCenterLoss(self):
33 | batch_size = 16
34 | nrof_features = 2
35 | nrof_classes = 16
36 | alfa = 0.5
37 |
38 | with tf.Graph().as_default():
39 |
40 | features = tf.placeholder(tf.float32, shape=(batch_size, nrof_features), name='features')
41 | labels = tf.placeholder(tf.int32, shape=(batch_size,), name='labels')
42 |
43 | # Define center loss
44 | center_loss, centers = facenet.center_loss(features, labels, alfa, nrof_classes)
45 |
46 | label_to_center = np.array( [
47 | [-3,-3], [-3,-1], [-3,1], [-3,3],
48 | [-1,-3], [-1,-1], [-1,1], [-1,3],
49 | [ 1,-3], [ 1,-1], [ 1,1], [ 1,3],
50 | [ 3,-3], [ 3,-1], [ 3,1], [ 3,3]
51 | ])
52 |
53 | sess = tf.Session()
54 | with sess.as_default():
55 | sess.run(tf.global_variables_initializer())
56 | np.random.seed(seed=666)
57 |
58 | for _ in range(0,100):
59 | # Create array of random labels
60 | lbls = np.random.randint(low=0, high=nrof_classes, size=(batch_size,))
61 | feats = create_features(label_to_center, batch_size, nrof_features, lbls)
62 |
63 | center_loss_, centers_ = sess.run([center_loss, centers], feed_dict={features:feats, labels:lbls})
64 |
65 | # After a large number of updates the estimated centers should be close to the true ones
66 | np.testing.assert_almost_equal(centers_, label_to_center, decimal=5, err_msg='Incorrect estimated centers')
67 | np.testing.assert_almost_equal(center_loss_, 0.0, decimal=5, err_msg='Incorrect center loss')
68 |
69 |
70 | def create_features(label_to_center, batch_size, nrof_features, labels):
71 | # Map label to center
72 | # label_to_center_dict = {
73 | # 0:(-3,-3), 1:(-3,-1), 2:(-3,1), 3:(-3,3),
74 | # 4:(-1,-3), 5:(-1,-1), 6:(-1,1), 7:(-1,3),
75 | # 8:( 1,-3), 9:( 1,-1), 10:( 1,1), 11:( 1,3),
76 | # 12:( 3,-3), 13:( 3,-1), 14:( 3,1), 15:( 3,3),
77 | # }
78 | # Create array of features corresponding to the labels
79 | feats = np.zeros((batch_size, nrof_features))
80 | for i in range(batch_size):
81 | cntr = label_to_center[labels[i]]
82 | for j in range(nrof_features):
83 | feats[i,j] = cntr[j]
84 | return feats
85 |
86 | if __name__ == "__main__":
87 | unittest.main()
88 |
--------------------------------------------------------------------------------
/facenet/tmp/visualize_vgg_model.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import misc
3 | import tensorflow as tf
4 | from matplotlib import pyplot, image
5 | import vggverydeep19
6 |
7 | paintingStyleImage = image.imread("../data/schoolofathens.jpg")
8 | pyplot.imshow(paintingStyleImage)
9 |
10 | inputImage = image.imread("../data/grandcentral.jpg")
11 | pyplot.imshow(inputImage)
12 |
13 | outputWidth = 800
14 | outputHeight = 600
15 |
16 | # Beta constant
17 | beta = 5
18 | # Alpha constant
19 | alpha = 100
20 | # Noise ratio
21 | noiseRatio = 0.6
22 |
23 | nodes = vggverydeep19.load('../data/imagenet-vgg-verydeep-19.mat', (600, 800))
24 |
25 | # Mean VGG-19 image
26 | meanImage19 = np.array([103.939, 116.779, 123.68]).reshape((1,1,1,3)) #pylint: disable=no-member
27 |
28 |
29 |
30 | # Squared-error loss of content between the two feature representations
31 | def sqErrorLossContent(sess, modelGraph, layer):
32 | p = session.run(modelGraph[layer])
33 | #pylint: disable=maybe-no-member
34 | N = p.shape[3]
35 | M = p.shape[1] * p.shape[2]
36 | return (1 / (4 * N * M)) * tf.reduce_sum(tf.pow(modelGraph[layer] - sess.run(modelGraph[layer]), 2))
37 |
38 | # Squared-error loss of style between the two feature representations
39 | styleLayers = [
40 | ('conv1_1', 0.2),
41 | ('conv2_1', 0.2),
42 | ('conv3_1', 0.2),
43 | ('conv4_1', 0.2),
44 | ('conv5_1', 0.2),
45 | ]
46 | def sqErrorLossStyle(sess, modelGraph):
47 | def intermediateCalc(x, y):
48 | N = x.shape[3]
49 | M = x.shape[1] * x.shape[2]
50 | A = tf.matmul(tf.transpose(tf.reshape(x, (M, N))), tf.reshape(x, (M, N)))
51 | G = tf.matmul(tf.transpose(tf.reshape(y, (M, N))), tf.reshape(y, (M, N)))
52 | return (1 / (4 * N**2 * M**2)) * tf.reduce_sum(tf.pow(G - A, 2))
53 | E = [intermediateCalc(sess.run(modelGraph[layerName]), modelGraph[layerName]) for layerName, _ in styleLayers]
54 | W = [w for _, w in styleLayers]
55 | return sum([W[layerNumber] * E[layerNumber] for layerNumber in range(len(styleLayers))])
56 |
57 | session = tf.InteractiveSession()
58 |
59 | # Addition of extra dimension to image
60 | inputImage = np.reshape(inputImage, ((1,) + inputImage.shape))
61 | inputImage = inputImage - meanImage19
62 | # Display image
63 | pyplot.imshow(inputImage[0])
64 |
65 | # Addition of extra dimension to image
66 | paintingStyleImage = np.reshape(paintingStyleImage, ((1,) + paintingStyleImage.shape))
67 | paintingStyleImage = paintingStyleImage - meanImage19
68 | # Display image
69 | pyplot.imshow(paintingStyleImage[0])
70 |
71 | imageNoise = np.random.uniform(-20, 20, (1, outputHeight, outputWidth, 3)).astype('float32')
72 | pyplot.imshow(imageNoise[0])
73 | mixedImage = imageNoise * noiseRatio + inputImage * (1 - noiseRatio)
74 | pyplot.imshow(inputImage[0])
75 |
76 |
77 | session.run(tf.global_variables_initializer())
78 | session.run(nodes['input'].assign(inputImage))
79 | contentLoss = sqErrorLossContent(session, nodes, 'conv4_2')
80 | session.run(nodes['input'].assign(paintingStyleImage))
81 | styleLoss = sqErrorLossStyle(session, nodes)
82 | totalLoss = beta * contentLoss + alpha * styleLoss
83 |
84 | optimizer = tf.train.AdamOptimizer(2.0)
85 | trainStep = optimizer.minimize(totalLoss)
86 | session.run(tf.global_variables_initializer())
87 | session.run(nodes['input'].assign(inputImage))
88 | # Number of iterations to run.
89 | iterations = 2000
90 | session.run(tf.global_variables_initializer())
91 | session.run(nodes['input'].assign(inputImage))
92 |
93 | for iters in range(iterations):
94 | session.run(trainStep)
95 | if iters%50 == 0:
96 | # Output every 50 iterations for animation
97 | filename = 'output%d.png' % (iters)
98 | im = mixedImage + meanImage19
99 | im = im[0]
100 | im = np.clip(im, 0, 255).astype('uint8')
101 | misc.imsave(filename, im)
102 |
103 | im = mixedImage + meanImage19
104 | im = im[0]
105 | im = np.clip(im, 0, 255).astype('uint8')
106 | misc.imsave('finalImage.png', im)
107 |
108 |
--------------------------------------------------------------------------------
/facenet/tmp/vggverydeep19.py:
--------------------------------------------------------------------------------
1 | """Load the VGG imagenet model into TensorFlow.
2 | Download the model from http://www.robots.ox.ac.uk/~vgg/research/very_deep/
3 | and point to the file 'imagenet-vgg-verydeep-19.mat'
4 | """
5 | import numpy as np
6 | from scipy import io
7 | import tensorflow as tf
8 |
9 | def load(filename, images):
10 | vgg19 = io.loadmat(filename)
11 | vgg19Layers = vgg19['layers']
12 |
13 | # A function to get the weights of the VGG layers
14 | def vbbWeights(layerNumber):
15 | W = vgg19Layers[0][layerNumber][0][0][2][0][0]
16 | W = tf.constant(W)
17 | return W
18 |
19 | def vbbConstants(layerNumber):
20 | b = vgg19Layers[0][layerNumber][0][0][2][0][1].T
21 | b = tf.constant(np.reshape(b, (b.size)))
22 | return b
23 |
24 | modelGraph = {}
25 | modelGraph['input'] = images
26 | modelGraph['conv1_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['input'], filter = vbbWeights(0), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(0))
27 | modelGraph['conv1_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv1_1'], filter = vbbWeights(2), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(2))
28 | modelGraph['avgpool1'] = tf.nn.avg_pool(modelGraph['conv1_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
29 | modelGraph['conv2_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool1'], filter = vbbWeights(5), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(5))
30 | modelGraph['conv2_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv2_1'], filter = vbbWeights(7), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(7))
31 | modelGraph['avgpool2'] = tf.nn.avg_pool(modelGraph['conv2_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
32 | modelGraph['conv3_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool2'], filter = vbbWeights(10), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(10))
33 | modelGraph['conv3_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_1'], filter = vbbWeights(12), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(12))
34 | modelGraph['conv3_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_2'], filter = vbbWeights(14), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(14))
35 | modelGraph['conv3_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv3_3'], filter = vbbWeights(16), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(16))
36 | modelGraph['avgpool3'] = tf.nn.avg_pool(modelGraph['conv3_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
37 | modelGraph['conv4_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool3'], filter = vbbWeights(19), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(19))
38 | modelGraph['conv4_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_1'], filter = vbbWeights(21), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(21))
39 | modelGraph['conv4_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_2'], filter = vbbWeights(23), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(23))
40 | modelGraph['conv4_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv4_3'], filter = vbbWeights(25), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(25))
41 | modelGraph['avgpool4'] = tf.nn.avg_pool(modelGraph['conv4_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
42 | modelGraph['conv5_1'] = tf.nn.relu(tf.nn.conv2d(modelGraph['avgpool4'], filter = vbbWeights(28), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(28))
43 | modelGraph['conv5_2'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_1'], filter = vbbWeights(30), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(30))
44 | modelGraph['conv5_3'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_2'], filter = vbbWeights(32), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(32))
45 | modelGraph['conv5_4'] = tf.nn.relu(tf.nn.conv2d(modelGraph['conv5_3'], filter = vbbWeights(34), strides = [1, 1, 1, 1], padding = 'SAME') + vbbConstants(34))
46 | modelGraph['avgpool5'] = tf.nn.avg_pool(modelGraph['conv5_4'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
47 |
48 | return modelGraph
49 |
50 |
--------------------------------------------------------------------------------
/facenet/tmp/nn4_small2_v1.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | # pylint: disable=missing-docstring
24 | from __future__ import absolute_import
25 | from __future__ import division
26 | from __future__ import print_function
27 |
28 | import tensorflow as tf
29 | import models.network as network
30 |
31 | def inference(images, keep_probability, phase_train=True, weight_decay=0.0):
32 | """ Define an inference network for face recognition based
33 | on inception modules using batch normalization
34 |
35 | Args:
36 | images: The images to run inference on, dimensions batch_size x height x width x channels
37 | phase_train: True if batch normalization should operate in training mode
38 | """
39 | endpoints = {}
40 | net = network.conv(images, 3, 64, 7, 7, 2, 2, 'SAME', 'conv1_7x7', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
41 | endpoints['conv1'] = net
42 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool1')
43 | endpoints['pool1'] = net
44 | net = network.conv(net, 64, 64, 1, 1, 1, 1, 'SAME', 'conv2_1x1', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
45 | endpoints['conv2_1x1'] = net
46 | net = network.conv(net, 64, 192, 3, 3, 1, 1, 'SAME', 'conv3_3x3', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
47 | endpoints['conv3_3x3'] = net
48 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool3')
49 | endpoints['pool3'] = net
50 |
51 | net = network.inception(net, 192, 1, 64, 96, 128, 16, 32, 3, 32, 1, 'MAX', 'incept3a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
52 | endpoints['incept3a'] = net
53 | net = network.inception(net, 256, 1, 64, 96, 128, 32, 64, 3, 64, 1, 'MAX', 'incept3b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
54 | endpoints['incept3b'] = net
55 | net = network.inception(net, 320, 2, 0, 128, 256, 32, 64, 3, 0, 2, 'MAX', 'incept3c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
56 | endpoints['incept3c'] = net
57 |
58 | net = network.inception(net, 640, 1, 256, 96, 192, 32, 64, 3, 128, 1, 'MAX', 'incept4a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
59 | endpoints['incept4a'] = net
60 | net = network.inception(net, 640, 2, 0, 160, 256, 64, 128, 3, 0, 2, 'MAX', 'incept4e', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
61 | endpoints['incept4e'] = net
62 |
63 | net = network.inception(net, 1024, 1, 256, 96, 384, 0, 0, 3, 96, 1, 'MAX', 'incept5a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
64 | endpoints['incept5a'] = net
65 | net = network.inception(net, 736, 1, 256, 96, 384, 0, 0, 3, 96, 1, 'MAX', 'incept5b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
66 | endpoints['incept5b'] = net
67 | net = network.apool(net, 3, 3, 1, 1, 'VALID', 'pool6')
68 | endpoints['pool6'] = net
69 | net = tf.reshape(net, [-1, 736])
70 | endpoints['prelogits'] = net
71 | net = tf.nn.dropout(net, keep_probability)
72 | endpoints['dropout'] = net
73 |
74 | return net, endpoints
75 |
--------------------------------------------------------------------------------
/facenet/tmp/random_test.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from six.moves import xrange
4 |
5 |
6 | with tf.Graph().as_default():
7 | tf.set_random_seed(666)
8 |
9 |
10 | # Placeholder for input images
11 | input_placeholder = tf.placeholder(tf.float32, shape=(9, 7), name='input')
12 |
13 | # Split example embeddings into anchor, positive and negative
14 | #anchor, positive, negative = tf.split(0, 3, input)
15 | resh1 = tf.reshape(input_placeholder, [3,3,7])
16 | anchor = resh1[0,:,:]
17 | positive = resh1[1,:,:]
18 | negative = resh1[2,:,:]
19 |
20 | # Build an initialization operation to run below.
21 | init = tf.global_variables_initializer()
22 |
23 | # Start running operations on the Graph.
24 | sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
25 | sess.run(init)
26 |
27 | with sess.as_default():
28 | batch = np.zeros((9,7))
29 | batch[0,:] = 1.1
30 | batch[1,:] = 2.1
31 | batch[2,:] = 3.1
32 | batch[3,:] = 1.2
33 | batch[4,:] = 2.2
34 | batch[5,:] = 3.2
35 | batch[6,:] = 1.3
36 | batch[7,:] = 2.3
37 | batch[8,:] = 3.3
38 | feed_dict = {input_placeholder: batch }
39 | print(batch)
40 | print(sess.run([anchor, positive, negative], feed_dict=feed_dict))
41 |
42 |
43 |
44 |
45 | #feed_dict = { images_placeholder: np.zeros((90,96,96,3)), phase_train_placeholder: True }
46 | #vars_eval = sess.run(tf.global_variables(), feed_dict=feed_dict)
47 | #for gt in vars_eval:
48 | #print('%.20f' % (np.sum(gt)))
49 | #for gt, gv in zip(grads_eval, grad_vars):
50 | #print('%40s: %.20f' % (gv.op.name, np.sum(gt)))
51 |
52 |
53 |
54 | #import h5py
55 | #myFile = h5py.File('/home/david/repo/TensorFace/network.h5', 'r')
56 |
57 | ## The '...' means retrieve the whole tensor
58 | #data = myFile[...]
59 | #print(data)
60 |
61 |
62 | #import h5py # HDF5 support
63 |
64 | #fileName = "/home/david/repo/TensorFace/network.h5"
65 | #f = h5py.File(fileName, "r")
66 | ##for item in f.keys():
67 | ##print item
68 | #for item in f.values():
69 | #print item
70 |
71 |
72 | #import tensorflow as tf
73 | #import numpy as np
74 | #import matplotlib.pyplot as plt
75 | #import math
76 | #import facenet
77 | #import os
78 | #import glob
79 | #from scipy import misc
80 |
81 | #def plot_triplet(apn, idx):
82 | #plt.subplot(1,3,1)
83 | #plt.imshow(np.multiply(apn[idx*3+0,:,:,:],1/256))
84 | #plt.subplot(1,3,2)
85 | #plt.imshow(np.multiply(apn[idx*3+1,:,:,:],1/256))
86 | #plt.subplot(1,3,3)
87 | #plt.imshow(np.multiply(apn[idx*3+2,:,:,:],1/256))
88 |
89 |
90 | #input_image = tf.placeholder(tf.float32, name='input_image')
91 | #phase_train = tf.placeholder(tf.bool, name='phase_train')
92 |
93 | #n_in, n_out = 3, 16
94 | #ksize = 3
95 | #stride = 1
96 | #kernel = tf.Variable(tf.truncated_normal([ksize, ksize, n_in, n_out],
97 | #stddev=math.sqrt(2/(ksize*ksize*n_out))),
98 | #name='kernel')
99 | #conv = tf.nn.conv2d(input_image, kernel, [1,stride,stride,1], padding="SAME")
100 | #conv_bn = facenet.batch_norm(conv, n_out, phase_train)
101 | #relu = tf.nn.relu(conv_bn)
102 |
103 | ## Build an initialization operation to run below.
104 | #init = tf.global_variables_initializer()
105 |
106 | ## Start running operations on the Graph.
107 | #sess = tf.Session()
108 | #sess.run(init)
109 |
110 | #path = '/home/david/datasets/fs_aligned/Zooey_Deschanel/'
111 | #files = glob.glob(os.path.join(path, '*.png'))
112 | #nrof_samples = 30
113 | #img_list = [None] * nrof_samples
114 | #for i in xrange(nrof_samples):
115 | #img_list[i] = misc.imread(files[i])
116 | #images = np.stack(img_list)
117 |
118 | #feed_dict = {
119 | #input_image: images.astype(np.float32),
120 | #phase_train: True
121 | #}
122 |
123 | #out = sess.run([relu], feed_dict=feed_dict)
124 | #print(out[0].shape)
125 |
126 | ##print(out)
127 |
128 | #plot_triplet(images, 0)
129 |
130 |
131 |
132 | #import matplotlib.pyplot as plt
133 | #import numpy as np
134 |
135 | #a=[3,4,5,6]
136 | #b = [1,a[1:3]]
137 | #print(b)
138 |
139 | ## Generate some data...
140 | #x, y = np.meshgrid(np.linspace(-2,2,200), np.linspace(-2,2,200))
141 | #x, y = x - x.mean(), y - y.mean()
142 | #z = x * np.exp(-x**2 - y**2)
143 | #print(z.shape)
144 |
145 | ## Plot the grid
146 | #plt.imshow(z)
147 | #plt.gray()
148 | #plt.show()
149 |
150 | #import numpy as np
151 |
152 | #np.random.seed(123)
153 | #rnd = 1.0*np.random.randint(1,2**32)/2**32
154 | #print(rnd)
155 |
--------------------------------------------------------------------------------
/facenet/tmp/funnel_dataset.py:
--------------------------------------------------------------------------------
1 | """Performs face alignment and stores face thumbnails in the output directory."""
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from scipy import misc
8 | import sys
9 | import os
10 | import argparse
11 | import facenet
12 | import subprocess
13 | from contextlib import contextmanager
14 | import tempfile
15 | import shutil
16 | import numpy as np
17 |
18 | @contextmanager
19 | def TemporaryDirectory():
20 | name = tempfile.mkdtemp()
21 | try:
22 | yield name
23 | finally:
24 | shutil.rmtree(name)
25 |
26 |
27 | def main(args):
28 | funnel_cmd = 'funnelReal'
29 | funnel_model = 'people.train'
30 |
31 | output_dir = os.path.expanduser(args.output_dir)
32 | if not os.path.exists(output_dir):
33 | os.makedirs(output_dir)
34 | # Store some git revision info in a text file in the output directory
35 | src_path,_ = os.path.split(os.path.realpath(__file__))
36 | facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
37 | dataset = facenet.get_dataset(args.input_dir)
38 | np.random.shuffle(dataset)
39 | # Scale the image such that the face fills the frame when cropped to crop_size
40 | #scale = float(args.face_size) / args.image_size
41 | with TemporaryDirectory() as tmp_dir:
42 | for cls in dataset:
43 | output_class_dir = os.path.join(output_dir, cls.name)
44 | tmp_output_class_dir = os.path.join(tmp_dir, cls.name)
45 | if not os.path.exists(output_class_dir) and not os.path.exists(tmp_output_class_dir):
46 | print('Aligning class %s:' % cls.name)
47 | tmp_filenames = []
48 | if not os.path.exists(tmp_output_class_dir):
49 | os.makedirs(tmp_output_class_dir)
50 | input_list_filename = os.path.join(tmp_dir, 'input_list.txt')
51 | output_list_filename = os.path.join(tmp_dir, 'output_list.txt')
52 | input_file = open(input_list_filename, 'w')
53 | output_file = open(output_list_filename,'w')
54 | for image_path in cls.image_paths:
55 | filename = os.path.split(image_path)[1]
56 | input_file.write(image_path+'\n')
57 | output_filename = os.path.join(tmp_output_class_dir, filename)
58 | output_file.write(output_filename+'\n')
59 | tmp_filenames.append(output_filename)
60 | input_file.close()
61 | output_file.close()
62 | cmd = args.funnel_dir+funnel_cmd + ' ' + input_list_filename + ' ' + args.funnel_dir+funnel_model + ' ' + output_list_filename
63 | subprocess.call(cmd, shell=True)
64 |
65 | # Resize and crop images
66 | if not os.path.exists(output_class_dir):
67 | os.makedirs(output_class_dir)
68 | scale = 1.0
69 | for tmp_filename in tmp_filenames:
70 | img = misc.imread(tmp_filename)
71 | img_scale = misc.imresize(img, scale)
72 | sz1 = img.shape[1]/2
73 | sz2 = args.image_size/2
74 | img_crop = img_scale[int(sz1-sz2):int(sz1+sz2),int(sz1-sz2):int(sz1+sz2),:]
75 | filename = os.path.splitext(os.path.split(tmp_filename)[1])[0]
76 | output_filename = os.path.join(output_class_dir, filename+'.png')
77 | print('Saving image %s' % output_filename)
78 | misc.imsave(output_filename, img_crop)
79 |
80 | # Remove tmp directory with images
81 | shutil.rmtree(tmp_output_class_dir)
82 |
83 | def parse_arguments(argv):
84 | parser = argparse.ArgumentParser()
85 |
86 | parser.add_argument('input_dir', type=str, help='Directory with unaligned images.')
87 | parser.add_argument('output_dir', type=str, help='Directory with aligned face thumbnails.')
88 | parser.add_argument('funnel_dir', type=str, help='Directory containing the funnelReal binary and the people.train model file')
89 | parser.add_argument('--image_size', type=int,
90 | help='Image size (height, width) in pixels.', default=110)
91 | parser.add_argument('--face_size', type=int,
92 | help='Size of the face thumbnail (height, width) in pixels.', default=96)
93 | return parser.parse_args(argv)
94 |
95 | if __name__ == '__main__':
96 | main(parse_arguments(sys.argv[1:]))
97 |
--------------------------------------------------------------------------------
/facenet/tmp/nn4.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | # pylint: disable=missing-docstring
24 | from __future__ import absolute_import
25 | from __future__ import division
26 | from __future__ import print_function
27 |
28 | import tensorflow as tf
29 | import models.network as network
30 |
31 | def inference(images, keep_probability, phase_train=True, weight_decay=0.0):
32 | """ Define an inference network for face recognition based
33 | on inception modules using batch normalization
34 |
35 | Args:
36 | images: The images to run inference on, dimensions batch_size x height x width x channels
37 | phase_train: True if batch normalization should operate in training mode
38 | """
39 | endpoints = {}
40 | net = network.conv(images, 3, 64, 7, 7, 2, 2, 'SAME', 'conv1_7x7', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
41 | endpoints['conv1'] = net
42 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool1')
43 | endpoints['pool1'] = net
44 | net = network.conv(net, 64, 64, 1, 1, 1, 1, 'SAME', 'conv2_1x1', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
45 | endpoints['conv2_1x1'] = net
46 | net = network.conv(net, 64, 192, 3, 3, 1, 1, 'SAME', 'conv3_3x3', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
47 | endpoints['conv3_3x3'] = net
48 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool3')
49 | endpoints['pool3'] = net
50 |
51 | net = network.inception(net, 192, 1, 64, 96, 128, 16, 32, 3, 32, 1, 'MAX', 'incept3a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
52 | endpoints['incept3a'] = net
53 | net = network.inception(net, 256, 1, 64, 96, 128, 32, 64, 3, 64, 1, 'MAX', 'incept3b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
54 | endpoints['incept3b'] = net
55 | net = network.inception(net, 320, 2, 0, 128, 256, 32, 64, 3, 0, 2, 'MAX', 'incept3c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
56 | endpoints['incept3c'] = net
57 |
58 | net = network.inception(net, 640, 1, 256, 96, 192, 32, 64, 3, 128, 1, 'MAX', 'incept4a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
59 | endpoints['incept4a'] = net
60 | net = network.inception(net, 640, 1, 224, 112, 224, 32, 64, 3, 128, 1, 'MAX', 'incept4b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
61 | endpoints['incept4b'] = net
62 | net = network.inception(net, 640, 1, 192, 128, 256, 32, 64, 3, 128, 1, 'MAX', 'incept4c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
63 | endpoints['incept4c'] = net
64 | net = network.inception(net, 640, 1, 160, 144, 288, 32, 64, 3, 128, 1, 'MAX', 'incept4d', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
65 | endpoints['incept4d'] = net
66 | net = network.inception(net, 640, 2, 0, 160, 256, 64, 128, 3, 0, 2, 'MAX', 'incept4e', phase_train=phase_train, use_batch_norm=True)
67 | endpoints['incept4e'] = net
68 |
69 | net = network.inception(net, 1024, 1, 384, 192, 384, 0, 0, 3, 128, 1, 'MAX', 'incept5a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
70 | endpoints['incept5a'] = net
71 | net = network.inception(net, 896, 1, 384, 192, 384, 0, 0, 3, 128, 1, 'MAX', 'incept5b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
72 | endpoints['incept5b'] = net
73 | net = network.apool(net, 3, 3, 1, 1, 'VALID', 'pool6')
74 | endpoints['pool6'] = net
75 | net = tf.reshape(net, [-1, 896])
76 | endpoints['prelogits'] = net
77 | net = tf.nn.dropout(net, keep_probability)
78 | endpoints['dropout'] = net
79 |
80 | return net, endpoints
81 |
--------------------------------------------------------------------------------
/facenet/tmp/nn2.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | # pylint: disable=missing-docstring
24 | from __future__ import absolute_import
25 | from __future__ import division
26 | from __future__ import print_function
27 |
28 | import tensorflow as tf
29 | import models.network as network
30 |
31 | def inference(images, keep_probability, phase_train=True, weight_decay=0.0):
32 | """ Define an inference network for face recognition based
33 | on inception modules using batch normalization
34 |
35 | Args:
36 | images: The images to run inference on, dimensions batch_size x height x width x channels
37 | phase_train: True if batch normalization should operate in training mode
38 | """
39 | endpoints = {}
40 | net = network.conv(images, 3, 64, 7, 7, 2, 2, 'SAME', 'conv1_7x7', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
41 | endpoints['conv1'] = net
42 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool1')
43 | endpoints['pool1'] = net
44 | net = network.conv(net, 64, 64, 1, 1, 1, 1, 'SAME', 'conv2_1x1', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
45 | endpoints['conv2_1x1'] = net
46 | net = network.conv(net, 64, 192, 3, 3, 1, 1, 'SAME', 'conv3_3x3', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
47 | endpoints['conv3_3x3'] = net
48 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool3')
49 | endpoints['pool3'] = net
50 |
51 | net = network.inception(net, 192, 1, 64, 96, 128, 16, 32, 3, 32, 1, 'MAX', 'incept3a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
52 | endpoints['incept3a'] = net
53 | net = network.inception(net, 256, 1, 64, 96, 128, 32, 64, 3, 64, 1, 'MAX', 'incept3b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
54 | endpoints['incept3b'] = net
55 | net = network.inception(net, 320, 2, 0, 128, 256, 32, 64, 3, 0, 2, 'MAX', 'incept3c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
56 | endpoints['incept3c'] = net
57 |
58 | net = network.inception(net, 640, 1, 256, 96, 192, 32, 64, 3, 128, 1, 'MAX', 'incept4a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
59 | endpoints['incept4a'] = net
60 | net = network.inception(net, 640, 1, 224, 112, 224, 32, 64, 3, 128, 1, 'MAX', 'incept4b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
61 | endpoints['incept4b'] = net
62 | net = network.inception(net, 640, 1, 192, 128, 256, 32, 64, 3, 128, 1, 'MAX', 'incept4c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
63 | endpoints['incept4c'] = net
64 | net = network.inception(net, 640, 1, 160, 144, 288, 32, 64, 3, 128, 1, 'MAX', 'incept4d', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
65 | endpoints['incept4d'] = net
66 | net = network.inception(net, 640, 2, 0, 160, 256, 64, 128, 3, 0, 2, 'MAX', 'incept4e', phase_train=phase_train, use_batch_norm=True)
67 | endpoints['incept4e'] = net
68 |
69 | net = network.inception(net, 1024, 1, 384, 192, 384, 48, 128, 3, 128, 1, 'MAX', 'incept5a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
70 | endpoints['incept5a'] = net
71 | net = network.inception(net, 1024, 1, 384, 192, 384, 48, 128, 3, 128, 1, 'MAX', 'incept5b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
72 | endpoints['incept5b'] = net
73 | net = network.apool(net, 7, 7, 1, 1, 'VALID', 'pool6')
74 | endpoints['pool6'] = net
75 | net = tf.reshape(net, [-1, 1024])
76 | endpoints['prelogits'] = net
77 | net = tf.nn.dropout(net, keep_probability)
78 | endpoints['dropout'] = net
79 |
80 | return net, endpoints
81 |
--------------------------------------------------------------------------------
/facenet/tmp/nn3.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | # pylint: disable=missing-docstring
24 | from __future__ import absolute_import
25 | from __future__ import division
26 | from __future__ import print_function
27 |
28 | import tensorflow as tf
29 | import models.network as network
30 |
31 | def inference(images, keep_probability, phase_train=True, weight_decay=0.0):
32 | """ Define an inference network for face recognition based
33 | on inception modules using batch normalization
34 |
35 | Args:
36 | images: The images to run inference on, dimensions batch_size x height x width x channels
37 | phase_train: True if batch normalization should operate in training mode
38 | """
39 | endpoints = {}
40 | net = network.conv(images, 3, 64, 7, 7, 2, 2, 'SAME', 'conv1_7x7', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
41 | endpoints['conv1'] = net
42 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool1')
43 | endpoints['pool1'] = net
44 | net = network.conv(net, 64, 64, 1, 1, 1, 1, 'SAME', 'conv2_1x1', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
45 | endpoints['conv2_1x1'] = net
46 | net = network.conv(net, 64, 192, 3, 3, 1, 1, 'SAME', 'conv3_3x3', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
47 | endpoints['conv3_3x3'] = net
48 | net = network.mpool(net, 3, 3, 2, 2, 'SAME', 'pool3')
49 | endpoints['pool3'] = net
50 |
51 | net = network.inception(net, 192, 1, 64, 96, 128, 16, 32, 3, 32, 1, 'MAX', 'incept3a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
52 | endpoints['incept3a'] = net
53 | net = network.inception(net, 256, 1, 64, 96, 128, 32, 64, 3, 64, 1, 'MAX', 'incept3b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
54 | endpoints['incept3b'] = net
55 | net = network.inception(net, 320, 2, 0, 128, 256, 32, 64, 3, 0, 2, 'MAX', 'incept3c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
56 | endpoints['incept3c'] = net
57 |
58 | net = network.inception(net, 640, 1, 256, 96, 192, 32, 64, 3, 128, 1, 'MAX', 'incept4a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
59 | endpoints['incept4a'] = net
60 | net = network.inception(net, 640, 1, 224, 112, 224, 32, 64, 3, 128, 1, 'MAX', 'incept4b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
61 | endpoints['incept4b'] = net
62 | net = network.inception(net, 640, 1, 192, 128, 256, 32, 64, 3, 128, 1, 'MAX', 'incept4c', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
63 | endpoints['incept4c'] = net
64 | net = network.inception(net, 640, 1, 160, 144, 288, 32, 64, 3, 128, 1, 'MAX', 'incept4d', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
65 | endpoints['incept4d'] = net
66 | net = network.inception(net, 640, 2, 0, 160, 256, 64, 128, 3, 0, 2, 'MAX', 'incept4e', phase_train=phase_train, use_batch_norm=True)
67 | endpoints['incept4e'] = net
68 |
69 | net = network.inception(net, 1024, 1, 384, 192, 384, 48, 128, 3, 128, 1, 'MAX', 'incept5a', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
70 | endpoints['incept5a'] = net
71 | net = network.inception(net, 1024, 1, 384, 192, 384, 48, 128, 3, 128, 1, 'MAX', 'incept5b', phase_train=phase_train, use_batch_norm=True, weight_decay=weight_decay)
72 | endpoints['incept5b'] = net
73 | net = network.apool(net, 5, 5, 1, 1, 'VALID', 'pool6')
74 | endpoints['pool6'] = net
75 | net = tf.reshape(net, [-1, 1024])
76 | endpoints['prelogits'] = net
77 | net = tf.nn.dropout(net, keep_probability)
78 | endpoints['dropout'] = net
79 |
80 | return net, endpoints
81 |
--------------------------------------------------------------------------------
/facenet/tmp/mtcnn_test.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2016 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 | from __future__ import absolute_import
23 | from __future__ import division
24 | from __future__ import print_function
25 |
26 | import tensorflow as tf
27 | import numpy as np
28 | import align.detect_face
29 |
30 | g1 = tf.Graph()
31 | with g1.as_default():
32 | data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
33 | pnet = align.detect_face.PNet({'data':data})
34 | sess1 = tf.Session(graph=g1)
35 | pnet.load('../../data/det1.npy', sess1)
36 | pnet_fun = lambda img : sess1.run(('conv4-2/BiasAdd:0', 'prob1:0'), feed_dict={'input:0':img})
37 | np.random.seed(666)
38 | img = np.random.rand(1,3,150,150)
39 | img = np.transpose(img, (0,2,3,1))
40 |
41 | np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
42 |
43 | # prob1=sess1.run('prob1:0', feed_dict={data:img})
44 | # print(prob1[0,0,0,:])
45 | # conv42=sess1.run('conv4-2/BiasAdd:0', feed_dict={data:img})
46 | # print(conv42[0,0,0,:])
47 |
48 | # conv42, prob1 = pnet_fun(img)
49 | # print(prob1[0,0,0,:])
50 | # print(conv42[0,0,0,:])
51 |
52 |
53 | # [ 0.9929 0.0071] prob1, caffe
54 | # [ 0.9929 0.0071] prob1, tensorflow
55 |
56 | # [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, caffe
57 | # [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, tensorflow
58 |
59 |
60 | g2 = tf.Graph()
61 | with g2.as_default():
62 | data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
63 | rnet = align.detect_face.RNet({'data':data})
64 | sess2 = tf.Session(graph=g2)
65 | rnet.load('../../data/det2.npy', sess2)
66 | rnet_fun = lambda img : sess2.run(('conv5-2/conv5-2:0', 'prob1:0'), feed_dict={'input:0':img})
67 | np.random.seed(666)
68 | img = np.random.rand(73,3,24,24)
69 | img = np.transpose(img, (0,2,3,1))
70 |
71 | # np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
72 | #
73 | # prob1=sess2.run('prob1:0', feed_dict={data:img})
74 | # print(prob1[0,:])
75 | #
76 | # conv52=sess2.run('conv5-2/conv5-2:0', feed_dict={data:img})
77 | # print(conv52[0,:])
78 |
79 | # [ 0.9945 0.0055] prob1, caffe
80 | # [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, caffe
81 |
82 | # [ 0.9945 0.0055] prob1, tensorflow
83 | # [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, tensorflow
84 |
85 |
86 | g3 = tf.Graph()
87 | with g3.as_default():
88 | data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
89 | onet = align.detect_face.ONet({'data':data})
90 | sess3 = tf.Session(graph=g3)
91 | onet.load('../../data/det3.npy', sess3)
92 | onet_fun = lambda img : sess3.run(('conv6-2/conv6-2:0', 'conv6-3/conv6-3:0', 'prob1:0'), feed_dict={'input:0':img})
93 | np.random.seed(666)
94 | img = np.random.rand(11,3,48,48)
95 | img = np.transpose(img, (0,2,3,1))
96 |
97 | # np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
98 | #
99 | # prob1=sess3.run('prob1:0', feed_dict={data:img})
100 | # print(prob1[0,:])
101 | # print('prob1, tensorflow')
102 | #
103 | # conv62=sess3.run('conv6-2/conv6-2:0', feed_dict={data:img})
104 | # print(conv62[0,:])
105 | # print('conv6-2, tensorflow')
106 | #
107 | # conv63=sess3.run('conv6-3/conv6-3:0', feed_dict={data:img})
108 | # print(conv63[0,:])
109 | # print('conv6-3, tensorflow')
110 |
111 | # [ 0.9988 0.0012] prob1, caffe
112 | # [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, caffe
113 | # [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, caffe
114 |
115 | # [ 0.9988 0.0012] prob1, tensorflow
116 | # [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, tensorflow
117 | # [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, tensorflow
118 |
119 | #pnet_fun = lambda img : sess1.run(('conv4-2/BiasAdd:0', 'prob1:0'), feed_dict={'input:0':img})
120 |
121 |
--------------------------------------------------------------------------------
/facenet/tmp/vggface16.py:
--------------------------------------------------------------------------------
1 | """Load the VGG Face model into TensorFlow.
2 | Download the model from http://www.robots.ox.ac.uk/~vgg/software/vgg_face/
3 | and point to the file 'vgg_face.mat'
4 | """
5 | import numpy as np
6 | from scipy import io
7 | import tensorflow as tf
8 |
9 | def load(filename, images):
10 | #filename = '../data/vgg_face_matconvnet/data/vgg_face.mat'
11 | vgg16 = io.loadmat(filename)
12 | vgg16Layers = vgg16['net'][0][0]['layers']
13 |
14 | # A function to get the weights of the VGG layers
15 | def vbbWeights(layerNumber):
16 | W = vgg16Layers[0][layerNumber][0][0][2][0][0]
17 | W = tf.constant(W)
18 | return W
19 |
20 | def vbbConstants(layerNumber):
21 | b = vgg16Layers[0][layerNumber][0][0][2][0][1].T
22 | b = tf.constant(np.reshape(b, (b.size)))
23 | return b
24 |
25 | modelGraph = {}
26 | modelGraph['input'] = images
27 |
28 | modelGraph['conv1_1'] = tf.nn.conv2d(modelGraph['input'], filter = vbbWeights(0), strides = [1, 1, 1, 1], padding = 'SAME')
29 | modelGraph['relu1_1'] = tf.nn.relu(modelGraph['conv1_1'] + vbbConstants(0))
30 | modelGraph['conv1_2'] = tf.nn.conv2d(modelGraph['relu1_1'], filter = vbbWeights(2), strides = [1, 1, 1, 1], padding = 'SAME')
31 | modelGraph['relu1_2'] = tf.nn.relu(modelGraph['conv1_2'] + vbbConstants(2))
32 | modelGraph['pool1'] = tf.nn.max_pool(modelGraph['relu1_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
33 |
34 | modelGraph['conv2_1'] = tf.nn.conv2d(modelGraph['pool1'], filter = vbbWeights(5), strides = [1, 1, 1, 1], padding = 'SAME')
35 | modelGraph['relu2_1'] = tf.nn.relu(modelGraph['conv2_1'] + vbbConstants(5))
36 | modelGraph['conv2_2'] = tf.nn.conv2d(modelGraph['relu2_1'], filter = vbbWeights(7), strides = [1, 1, 1, 1], padding = 'SAME')
37 | modelGraph['relu2_2'] = tf.nn.relu(modelGraph['conv2_2'] + vbbConstants(7))
38 | modelGraph['pool2'] = tf.nn.max_pool(modelGraph['relu2_2'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
39 |
40 | modelGraph['conv3_1'] = tf.nn.conv2d(modelGraph['pool2'], filter = vbbWeights(10), strides = [1, 1, 1, 1], padding = 'SAME')
41 | modelGraph['relu3_1'] = tf.nn.relu(modelGraph['conv3_1'] + vbbConstants(10))
42 | modelGraph['conv3_2'] = tf.nn.conv2d(modelGraph['relu3_1'], filter = vbbWeights(12), strides = [1, 1, 1, 1], padding = 'SAME')
43 | modelGraph['relu3_2'] = tf.nn.relu(modelGraph['conv3_2'] + vbbConstants(12))
44 | modelGraph['conv3_3'] = tf.nn.conv2d(modelGraph['relu3_2'], filter = vbbWeights(14), strides = [1, 1, 1, 1], padding = 'SAME')
45 | modelGraph['relu3_3'] = tf.nn.relu(modelGraph['conv3_3'] + vbbConstants(14))
46 | modelGraph['pool3'] = tf.nn.max_pool(modelGraph['relu3_3'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
47 |
48 | modelGraph['conv4_1'] = tf.nn.conv2d(modelGraph['pool3'], filter = vbbWeights(17), strides = [1, 1, 1, 1], padding = 'SAME')
49 | modelGraph['relu4_1'] = tf.nn.relu(modelGraph['conv4_1'] + vbbConstants(17))
50 | modelGraph['conv4_2'] = tf.nn.conv2d(modelGraph['relu4_1'], filter = vbbWeights(19), strides = [1, 1, 1, 1], padding = 'SAME')
51 | modelGraph['relu4_2'] = tf.nn.relu(modelGraph['conv4_2'] + vbbConstants(19))
52 | modelGraph['conv4_3'] = tf.nn.conv2d(modelGraph['relu4_2'], filter = vbbWeights(21), strides = [1, 1, 1, 1], padding = 'SAME')
53 | modelGraph['relu4_3'] = tf.nn.relu(modelGraph['conv4_3'] + vbbConstants(21))
54 | modelGraph['pool4'] = tf.nn.max_pool(modelGraph['relu4_3'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
55 |
56 | modelGraph['conv5_1'] = tf.nn.conv2d(modelGraph['pool4'], filter = vbbWeights(24), strides = [1, 1, 1, 1], padding = 'SAME')
57 | modelGraph['relu5_1'] = tf.nn.relu(modelGraph['conv5_1'] + vbbConstants(24))
58 | modelGraph['conv5_2'] = tf.nn.conv2d(modelGraph['relu5_1'], filter = vbbWeights(26), strides = [1, 1, 1, 1], padding = 'SAME')
59 | modelGraph['relu5_2'] = tf.nn.relu(modelGraph['conv5_2'] + vbbConstants(26))
60 | modelGraph['conv5_3'] = tf.nn.conv2d(modelGraph['relu5_2'], filter = vbbWeights(28), strides = [1, 1, 1, 1], padding = 'SAME')
61 | modelGraph['relu5_3'] = tf.nn.relu(modelGraph['conv5_3'] + vbbConstants(28))
62 | modelGraph['pool5'] = tf.nn.max_pool(modelGraph['relu5_3'], ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
63 |
64 | modelGraph['resh1'] = tf.reshape(modelGraph['pool5'], [-1, 25088])
65 | modelGraph['fc6'] = tf.nn.relu_layer(modelGraph['resh1'], tf.reshape(vbbWeights(31), [25088, 4096]), vbbConstants(31))
66 | modelGraph['dropout1'] = tf.nn.dropout(modelGraph['fc6'], 0.5)
67 | modelGraph['fc7'] = tf.nn.relu_layer(modelGraph['dropout1'], tf.squeeze(vbbWeights(34), [0, 1]), vbbConstants(34))
68 | modelGraph['dropout2'] = tf.nn.dropout(modelGraph['fc7'], 0.5)
69 | modelGraph['fc8'] = tf.nn.relu_layer(modelGraph['dropout2'], tf.squeeze(vbbWeights(37), [0, 1]), vbbConstants(37))
70 |
71 | return modelGraph
72 |
--------------------------------------------------------------------------------
/facenet/tmp/mtcnn_test_pnet_dbg.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 | import scipy.io as io
8 | import align.detect_face
9 |
10 | #ref = io.loadmat('pnet_dbg.mat')
11 | with tf.Graph().as_default():
12 | sess = tf.Session()
13 | with sess.as_default():
14 | with tf.variable_scope('pnet'):
15 | # data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
16 | data = tf.placeholder(tf.float32, (1,1610, 1901,3), 'input')
17 | pnet = align.detect_face.PNet({'data':data})
18 | pnet.load('../../data/det1.npy', sess)
19 | # with tf.variable_scope('rnet'):
20 | # data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
21 | # rnet = align.detect_face.RNet({'data':data})
22 | # rnet.load('../../data/det2.npy', sess)
23 | # with tf.variable_scope('onet'):
24 | # data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
25 | # onet = align.detect_face.ONet({'data':data})
26 | # onet.load('../../data/det3.npy', sess)
27 |
28 | pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
29 | # rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
30 | # onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
31 |
32 |
33 | ref = io.loadmat('pnet_dbg.mat')
34 |
35 | img_x = np.expand_dims(ref['im_data'], 0)
36 | img_y = np.transpose(img_x, (0,2,1,3))
37 | out = pnet_fun(img_y)
38 | out0 = np.transpose(out[0], (0,2,1,3))
39 | out1 = np.transpose(out[1], (0,2,1,3))
40 |
41 | #np.where(abs(out0[0,:,:,:]-ref['out0'])>1e-18)
42 | qqq3 = np.where(abs(out1[0,:,:,:]-ref['out1'])>1e-7) # 3390 diffs with softmax2
43 | print(qqq3[0].shape)
44 |
45 | np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
46 |
47 | # prob1=sess1.run('prob1:0', feed_dict={data:img})
48 | # print(prob1[0,0,0,:])
49 | # conv42=sess1.run('conv4-2/BiasAdd:0', feed_dict={data:img})
50 | # print(conv42[0,0,0,:])
51 |
52 | # conv42, prob1 = pnet_fun(img)
53 | # print(prob1[0,0,0,:])
54 | # print(conv42[0,0,0,:])
55 |
56 |
57 | # [ 0.9929 0.0071] prob1, caffe
58 | # [ 0.9929 0.0071] prob1, tensorflow
59 |
60 | # [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, caffe
61 | # [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, tensorflow
62 |
63 |
64 | # g2 = tf.Graph()
65 | # with g2.as_default():
66 | # data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
67 | # rnet = align.detect_face.RNet({'data':data})
68 | # sess2 = tf.Session(graph=g2)
69 | # rnet.load('../../data/det2.npy', sess2)
70 | # rnet_fun = lambda img : sess2.run(('conv5-2/conv5-2:0', 'prob1:0'), feed_dict={'input:0':img})
71 | # np.random.seed(666)
72 | # img = np.random.rand(73,3,24,24)
73 | # img = np.transpose(img, (0,2,3,1))
74 |
75 | # np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
76 | #
77 | # prob1=sess2.run('prob1:0', feed_dict={data:img})
78 | # print(prob1[0,:])
79 | #
80 | # conv52=sess2.run('conv5-2/conv5-2:0', feed_dict={data:img})
81 | # print(conv52[0,:])
82 |
83 | # [ 0.9945 0.0055] prob1, caffe
84 | # [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, caffe
85 |
86 | # [ 0.9945 0.0055] prob1, tensorflow
87 | # [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, tensorflow
88 |
89 |
90 | # g3 = tf.Graph()
91 | # with g3.as_default():
92 | # data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
93 | # onet = align.detect_face.ONet({'data':data})
94 | # sess3 = tf.Session(graph=g3)
95 | # onet.load('../../data/det3.npy', sess3)
96 | # onet_fun = lambda img : sess3.run(('conv6-2/conv6-2:0', 'conv6-3/conv6-3:0', 'prob1:0'), feed_dict={'input:0':img})
97 | # np.random.seed(666)
98 | # img = np.random.rand(11,3,48,48)
99 | # img = np.transpose(img, (0,2,3,1))
100 |
101 | # np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
102 | #
103 | # prob1=sess3.run('prob1:0', feed_dict={data:img})
104 | # print(prob1[0,:])
105 | # print('prob1, tensorflow')
106 | #
107 | # conv62=sess3.run('conv6-2/conv6-2:0', feed_dict={data:img})
108 | # print(conv62[0,:])
109 | # print('conv6-2, tensorflow')
110 | #
111 | # conv63=sess3.run('conv6-3/conv6-3:0', feed_dict={data:img})
112 | # print(conv63[0,:])
113 | # print('conv6-3, tensorflow')
114 |
115 | # [ 0.9988 0.0012] prob1, caffe
116 | # [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, caffe
117 | # [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, caffe
118 |
119 | # [ 0.9988 0.0012] prob1, tensorflow
120 | # [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, tensorflow
121 | # [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, tensorflow
122 |
123 | #pnet_fun = lambda img : sess1.run(('conv4-2/BiasAdd:0', 'prob1:0'), feed_dict={'input:0':img})
124 |
125 |
--------------------------------------------------------------------------------
/facenet/src/freeze_graph.py:
--------------------------------------------------------------------------------
1 | """Imports a model metagraph and checkpoint file, converts the variables to constants
2 | and exports the model as a graphdef protobuf
3 | """
4 | # MIT License
5 | #
6 | # Copyright (c) 2016 David Sandberg
7 | #
8 | # Permission is hereby granted, free of charge, to any person obtaining a copy
9 | # of this software and associated documentation files (the "Software"), to deal
10 | # in the Software without restriction, including without limitation the rights
11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | # copies of the Software, and to permit persons to whom the Software is
13 | # furnished to do so, subject to the following conditions:
14 | #
15 | # The above copyright notice and this permission notice shall be included in all
16 | # copies or substantial portions of the Software.
17 | #
18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | # SOFTWARE.
25 |
26 | from __future__ import absolute_import
27 | from __future__ import division
28 | from __future__ import print_function
29 |
30 | from tensorflow.python.framework import graph_util
31 | import tensorflow as tf
32 | import argparse
33 | import os
34 | import sys
35 | import facenet
36 | from six.moves import xrange # @UnresolvedImport
37 |
38 | def main(args):
39 | with tf.Graph().as_default():
40 | with tf.Session() as sess:
41 | # Load the model metagraph and checkpoint
42 | print('Model directory: %s' % args.model_dir)
43 | meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir))
44 |
45 | print('Metagraph file: %s' % meta_file)
46 | print('Checkpoint file: %s' % ckpt_file)
47 |
48 | model_dir_exp = os.path.expanduser(args.model_dir)
49 | saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file), clear_devices=True)
50 | tf.get_default_session().run(tf.global_variables_initializer())
51 | tf.get_default_session().run(tf.local_variables_initializer())
52 | saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))
53 |
54 | # Retrieve the protobuf graph definition and fix the batch norm nodes
55 | input_graph_def = sess.graph.as_graph_def()
56 |
57 | # Freeze the graph def
58 | output_graph_def = freeze_graph_def(sess, input_graph_def, 'embeddings,label_batch')
59 |
60 | # Serialize and dump the output graph to the filesystem
61 | with tf.gfile.GFile(args.output_file, 'wb') as f:
62 | f.write(output_graph_def.SerializeToString())
63 | print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
64 |
65 | def freeze_graph_def(sess, input_graph_def, output_node_names):
66 | for node in input_graph_def.node:
67 | if node.op == 'RefSwitch':
68 | node.op = 'Switch'
69 | for index in xrange(len(node.input)):
70 | if 'moving_' in node.input[index]:
71 | node.input[index] = node.input[index] + '/read'
72 | elif node.op == 'AssignSub':
73 | node.op = 'Sub'
74 | if 'use_locking' in node.attr: del node.attr['use_locking']
75 | elif node.op == 'AssignAdd':
76 | node.op = 'Add'
77 | if 'use_locking' in node.attr: del node.attr['use_locking']
78 |
79 | # Get the list of important nodes
80 | whitelist_names = []
81 | for node in input_graph_def.node:
82 | if (node.name.startswith('InceptionResnet') or node.name.startswith('embeddings') or
83 | node.name.startswith('image_batch') or node.name.startswith('label_batch') or
84 | node.name.startswith('phase_train') or node.name.startswith('Logits')):
85 | whitelist_names.append(node.name)
86 |
87 | # Replace all the variables in the graph with constants of the same values
88 | output_graph_def = graph_util.convert_variables_to_constants(
89 | sess, input_graph_def, output_node_names.split(","),
90 | variable_names_whitelist=whitelist_names)
91 | return output_graph_def
92 |
93 | def parse_arguments(argv):
94 | parser = argparse.ArgumentParser()
95 |
96 | parser.add_argument('model_dir', type=str,
97 | help='Directory containing the metagraph (.meta) file and the checkpoint (ckpt) file containing model parameters')
98 | parser.add_argument('output_file', type=str,
99 | help='Filename for the exported graphdef protobuf (.pb)')
100 | return parser.parse_args(argv)
101 |
102 | if __name__ == '__main__':
103 | main(parse_arguments(sys.argv[1:]))
104 |
--------------------------------------------------------------------------------
/facenet/src/generative/models/dfc_vae.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2017 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | """Variational autoencoder based on the paper
24 | 'Deep Feature Consistent Variational Autoencoder'
25 | (https://arxiv.org/pdf/1610.00291.pdf)
26 | """
27 |
28 | from __future__ import absolute_import
29 | from __future__ import division
30 | from __future__ import print_function
31 |
32 | import tensorflow as tf
33 | import tensorflow.contrib.slim as slim
34 | import generative.models.vae_base # @UnresolvedImport
35 |
36 |
37 | class Vae(generative.models.vae_base.Vae):
38 |
39 | def __init__(self, latent_variable_dim):
40 | super(Vae, self).__init__(latent_variable_dim, 64)
41 |
42 | def encoder(self, images, is_training):
43 | activation_fn = leaky_relu # tf.nn.relu
44 | weight_decay = 0.0
45 | with tf.variable_scope('encoder'):
46 | with slim.arg_scope([slim.batch_norm],
47 | is_training=is_training):
48 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
49 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
50 | weights_regularizer=slim.l2_regularizer(weight_decay),
51 | normalizer_fn=slim.batch_norm,
52 | normalizer_params=self.batch_norm_params):
53 | net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
54 | net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
55 | net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
56 | net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
57 | net = slim.flatten(net)
58 | fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
59 | fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
60 | return fc1, fc2
61 |
62 | def decoder(self, latent_var, is_training):
63 | activation_fn = leaky_relu # tf.nn.relu
64 | weight_decay = 0.0
65 | with tf.variable_scope('decoder'):
66 | with slim.arg_scope([slim.batch_norm],
67 | is_training=is_training):
68 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
69 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
70 | weights_regularizer=slim.l2_regularizer(weight_decay),
71 | normalizer_fn=slim.batch_norm,
72 | normalizer_params=self.batch_norm_params):
73 | net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
74 | net = tf.reshape(net, [-1,4,4,256], name='Reshape')
75 |
76 | net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
77 | net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1')
78 |
79 | net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
80 | net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2')
81 |
82 | net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
83 | net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3')
84 |
85 | net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
86 | net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4')
87 |
88 | return net
89 |
90 | def leaky_relu(x):
91 | return tf.maximum(0.1*x,x)
92 |
--------------------------------------------------------------------------------
/facenet/tmp/download_vgg_face_dataset.py:
--------------------------------------------------------------------------------
1 | """Download the VGG face dataset from URLs given by http://www.robots.ox.ac.uk/~vgg/data/vgg_face/vgg_face_dataset.tar.gz
2 | """
3 | # MIT License
4 | #
5 | # Copyright (c) 2016 David Sandberg
6 | #
7 | # Permission is hereby granted, free of charge, to any person obtaining a copy
8 | # of this software and associated documentation files (the "Software"), to deal
9 | # in the Software without restriction, including without limitation the rights
10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | # copies of the Software, and to permit persons to whom the Software is
12 | # furnished to do so, subject to the following conditions:
13 | #
14 | # The above copyright notice and this permission notice shall be included in all
15 | # copies or substantial portions of the Software.
16 | #
17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | # SOFTWARE.
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | from scipy import misc
30 | import numpy as np
31 | from skimage import io
32 | import sys
33 | import argparse
34 | import os
35 | import socket
36 | from urllib2 import HTTPError, URLError
37 | from httplib import HTTPException
38 |
39 | def main(args):
40 | socket.setdefaulttimeout(30)
41 | textfile_names = os.listdir(args.dataset_descriptor)
42 | for textfile_name in textfile_names:
43 | if textfile_name.endswith('.txt'):
44 | with open(os.path.join(args.dataset_descriptor, textfile_name), 'rt') as f:
45 | lines = f.readlines()
46 | dir_name = textfile_name.split('.')[0]
47 | class_path = os.path.join(args.dataset_descriptor, dir_name)
48 | if not os.path.exists(class_path):
49 | os.makedirs(class_path)
50 | for line in lines:
51 | x = line.split(' ')
52 | filename = x[0]
53 | url = x[1]
54 | box = np.rint(np.array(map(float, x[2:6]))) # x1,y1,x2,y2
55 | image_path = os.path.join(args.dataset_descriptor, dir_name, filename+'.'+args.output_format)
56 | error_path = os.path.join(args.dataset_descriptor, dir_name, filename+'.err')
57 | if not os.path.exists(image_path) and not os.path.exists(error_path):
58 | try:
59 | img = io.imread(url, mode='RGB')
60 | except (HTTPException, HTTPError, URLError, IOError, ValueError, IndexError, OSError) as e:
61 | error_message = '{}: {}'.format(url, e)
62 | save_error_message_file(error_path, error_message)
63 | else:
64 | try:
65 | if img.ndim == 2:
66 | img = to_rgb(img)
67 | if img.ndim != 3:
68 | raise ValueError('Wrong number of image dimensions')
69 | hist = np.histogram(img, 255, density=True)
70 | if hist[0][0]>0.9 and hist[0][254]>0.9:
71 | raise ValueError('Image is mainly black or white')
72 | else:
73 | # Crop image according to dataset descriptor
74 | img_cropped = img[int(box[1]):int(box[3]),int(box[0]):int(box[2]),:]
75 | # Scale to 256x256
76 | img_resized = misc.imresize(img_cropped, (args.image_size,args.image_size))
77 | # Save image as .png
78 | misc.imsave(image_path, img_resized)
79 | except ValueError as e:
80 | error_message = '{}: {}'.format(url, e)
81 | save_error_message_file(error_path, error_message)
82 |
83 | def save_error_message_file(filename, error_message):
84 | print(error_message)
85 | with open(filename, "w") as textfile:
86 | textfile.write(error_message)
87 |
88 | def to_rgb(img):
89 | w, h = img.shape
90 | ret = np.empty((w, h, 3), dtype=np.uint8)
91 | ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
92 | return ret
93 |
94 | def parse_arguments(argv):
95 | parser = argparse.ArgumentParser()
96 | parser.add_argument('dataset_descriptor', type=str,
97 | help='Directory containing the text files with the image URLs. Image files will also be placed in this directory.')
98 | parser.add_argument('--output_format', type=str, help='Format of the output images', default='png', choices=['png', 'jpg'])
99 | parser.add_argument('--image_size', type=int,
100 | help='Image size (height, width) in pixels.', default=256)
101 | return parser.parse_args(argv)
102 |
103 | if __name__ == '__main__':
104 | main(parse_arguments(sys.argv[1:]))
105 |
--------------------------------------------------------------------------------
/facenet/tmp/visualize.py:
--------------------------------------------------------------------------------
1 | """Visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network
2 | Based on http://nbviewer.jupyter.org/github/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
3 | """
4 | # MIT License
5 | #
6 | # Copyright (c) 2016 David Sandberg
7 | #
8 | # Permission is hereby granted, free of charge, to any person obtaining a copy
9 | # of this software and associated documentation files (the "Software"), to deal
10 | # in the Software without restriction, including without limitation the rights
11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | # copies of the Software, and to permit persons to whom the Software is
13 | # furnished to do so, subject to the following conditions:
14 | #
15 | # The above copyright notice and this permission notice shall be included in all
16 | # copies or substantial portions of the Software.
17 | #
18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | # SOFTWARE.
25 |
26 | from __future__ import absolute_import
27 | from __future__ import division
28 | from __future__ import print_function
29 |
30 | import os
31 | import numpy as np
32 | import sys
33 | import argparse
34 | import tensorflow as tf
35 | import importlib
36 | from scipy import misc
37 |
38 | def main(args):
39 |
40 | network = importlib.import_module(args.model_def, 'inference')
41 |
42 | # Start with a gray image with a little noise
43 | np.random.seed(seed=args.seed)
44 | img_noise = np.random.uniform(size=(args.image_size,args.image_size,3)) + 100.0
45 |
46 | sess = tf.Session()
47 |
48 | t_input = tf.placeholder(np.float32, shape=(args.image_size,args.image_size,3), name='input') # define the input tensor
49 | image_mean = 117.0
50 | t_preprocessed = tf.expand_dims(t_input-image_mean, 0)
51 |
52 | # Build the inference graph
53 | network.inference(t_preprocessed, 1.0,
54 | phase_train=True, weight_decay=0.0)
55 |
56 | # Create a saver for restoring variables
57 | saver = tf.train.Saver(tf.global_variables())
58 |
59 | # Restore the parameters
60 | saver.restore(sess, args.model_file)
61 |
62 | layers = [op.name for op in tf.get_default_graph().get_operations() if op.type=='Conv2D']
63 | feature_nums = {layer: int(T(layer).get_shape()[-1]) for layer in layers}
64 |
65 | print('Number of layers: %d' % len(layers))
66 |
67 | for layer in sorted(feature_nums.keys()):
68 | print('%s%d' % ((layer+': ').ljust(40), feature_nums[layer]))
69 |
70 | # Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
71 | # to have non-zero gradients for features with negative initial activations.
72 | layer = 'InceptionResnetV1/Repeat_2/block8_3/Conv2d_1x1/Conv2D'
73 | #layer = 'incept4b/in4_conv1x1_31/Conv2D'
74 | result_dir = '../data/'
75 | print('Number of features in layer "%s": %d' % (layer, feature_nums[layer]))
76 | channels = range(feature_nums[layer])
77 | np.random.shuffle(channels)
78 | for i in range(32):
79 | print('Rendering feature %d' % channels[i])
80 | channel = channels[i]
81 | img = render_naive(sess, t_input, T(layer)[:,:,:,channel], img_noise)
82 | filename = '%s_%03d.png' % (layer.replace('/', '_'), channel)
83 | misc.imsave(os.path.join(result_dir, filename), img)
84 |
85 |
86 | def T(layer):
87 | '''Helper for getting layer output tensor'''
88 | return tf.get_default_graph().get_tensor_by_name('%s:0' % layer)
89 |
90 | def visstd(a, s=0.1):
91 | '''Normalize the image range for visualization'''
92 | return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
93 |
94 | def render_naive(sess, t_input, t_obj, img0, iter_n=20, step=1.0):
95 | t_score = tf.reduce_mean(t_obj) # defining the optimization objective
96 | t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
97 |
98 | img = img0.copy()
99 | for _ in range(iter_n):
100 | g, _ = sess.run([t_grad, t_score], {t_input:img})
101 | # normalizing the gradient, so the same step size should work
102 | g /= g.std()+1e-8 # for different layers and networks
103 | img += g*step
104 | return visstd(img)
105 |
106 | def parse_arguments(argv):
107 | parser = argparse.ArgumentParser()
108 |
109 | parser.add_argument('model_file', type=str,
110 | help='Directory containing the graph definition and checkpoint files.')
111 | parser.add_argument('--model_def', type=str,
112 | help='Model definition. Points to a module containing the definition of the inference graph.',
113 | default='models.nn4')
114 | parser.add_argument('--image_size', type=int,
115 | help='Image size (height, width) in pixels.', default=96)
116 | parser.add_argument('--seed', type=int,
117 | help='Random seed.', default=666)
118 | return parser.parse_args(argv)
119 |
120 | if __name__ == '__main__':
121 | main(parse_arguments(sys.argv[1:]))
122 |
--------------------------------------------------------------------------------
/facenet/src/generative/models/dfc_vae_large.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2017 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | """Variational autoencoder based on the paper
24 | 'Deep Feature Consistent Variational Autoencoder'
25 | (https://arxiv.org/pdf/1610.00291.pdf) but with a larger image size (128x128 pixels)
26 | """
27 |
28 | from __future__ import absolute_import
29 | from __future__ import division
30 | from __future__ import print_function
31 |
32 | import tensorflow as tf
33 | import tensorflow.contrib.slim as slim
34 | import generative.models.vae_base # @UnresolvedImport
35 |
36 |
37 | class Vae(generative.models.vae_base.Vae):
38 |
39 | def __init__(self, latent_variable_dim):
40 | super(Vae, self).__init__(latent_variable_dim, 128)
41 |
42 |
43 | def encoder(self, images, is_training):
44 | activation_fn = leaky_relu # tf.nn.relu
45 | weight_decay = 0.0
46 | with tf.variable_scope('encoder'):
47 | with slim.arg_scope([slim.batch_norm],
48 | is_training=is_training):
49 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
50 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
51 | weights_regularizer=slim.l2_regularizer(weight_decay),
52 | normalizer_fn=slim.batch_norm,
53 | normalizer_params=self.batch_norm_params):
54 | net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
55 | net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
56 | net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
57 | net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
58 | net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
59 | net = slim.flatten(net)
60 | fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
61 | fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
62 | return fc1, fc2
63 |
64 | def decoder(self, latent_var, is_training):
65 | activation_fn = leaky_relu # tf.nn.relu
66 | weight_decay = 0.0
67 | with tf.variable_scope('decoder'):
68 | with slim.arg_scope([slim.batch_norm],
69 | is_training=is_training):
70 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
71 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
72 | weights_regularizer=slim.l2_regularizer(weight_decay),
73 | normalizer_fn=slim.batch_norm,
74 | normalizer_params=self.batch_norm_params):
75 | net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
76 | net = tf.reshape(net, [-1,4,4,256], name='Reshape')
77 |
78 | net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
79 | net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1')
80 |
81 | net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
82 | net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2')
83 |
84 | net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
85 | net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3')
86 |
87 | net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
88 | net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4')
89 |
90 | net = tf.image.resize_nearest_neighbor(net, size=(128,128), name='Upsample_5')
91 | net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_5')
92 | return net
93 |
94 | def leaky_relu(x):
95 | return tf.maximum(0.1*x,x)
96 |
--------------------------------------------------------------------------------
/facenet/README.md:
--------------------------------------------------------------------------------
1 | # Face Recognition using Tensorflow [![Build Status][travis-image]][travis]
2 |
3 | [travis-image]: http://travis-ci.org/davidsandberg/facenet.svg?branch=master
4 | [travis]: http://travis-ci.org/davidsandberg/facenet
5 |
6 | This is a TensorFlow implementation of the face recognizer described in the paper
7 | ["FaceNet: A Unified Embedding for Face Recognition and Clustering"](http://arxiv.org/abs/1503.03832). The project also uses ideas from the paper ["Deep Face Recognition"](http://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf) from the [Visual Geometry Group](http://www.robots.ox.ac.uk/~vgg/) at Oxford.
8 |
9 | ## Compatibility
10 | The code is tested using Tensorflow r1.7 under Ubuntu 14.04 with Python 2.7 and Python 3.5. The test cases can be found [here](https://github.com/davidsandberg/facenet/tree/master/test) and the results can be found [here](http://travis-ci.org/davidsandberg/facenet).
11 |
12 | ## News
13 | | Date | Update |
14 | |----------|--------|
15 | | 2018-04-10 | Added new models trained on Casia-WebFace and VGGFace2 (see below). Note that the models uses fixed image standardization (see [wiki](https://github.com/davidsandberg/facenet/wiki/Training-using-the-VGGFace2-dataset)). |
16 | | 2018-03-31 | Added a new, more flexible input pipeline as well as a bunch of minor updates. |
17 | | 2017-05-13 | Removed a bunch of older non-slim models. Moved the last bottleneck layer into the respective models. Corrected normalization of Center Loss. |
18 | | 2017-05-06 | Added code to [train a classifier on your own images](https://github.com/davidsandberg/facenet/wiki/Train-a-classifier-on-own-images). Renamed facenet_train.py to train_tripletloss.py and facenet_train_classifier.py to train_softmax.py. |
19 | | 2017-03-02 | Added pretrained models that generate 128-dimensional embeddings.|
20 | | 2017-02-22 | Updated to Tensorflow r1.0. Added Continuous Integration using Travis-CI.|
21 | | 2017-02-03 | Added models where only trainable variables has been stored in the checkpoint. These are therefore significantly smaller. |
22 | | 2017-01-27 | Added a model trained on a subset of the MS-Celeb-1M dataset. The LFW accuracy of this model is around 0.994. |
23 | | 2017‑01‑02 | Updated to run with Tensorflow r0.12. Not sure if it runs with older versions of Tensorflow though. |
24 |
25 | ## Pre-trained models
26 | | Model name | LFW accuracy | Training dataset | Architecture |
27 | |-----------------|--------------|------------------|-------------|
28 | | [20180408-102900](https://drive.google.com/open?id=1R77HmFADxe87GmoLwzfgMu_HY0IhcyBz) | 0.9905 | CASIA-WebFace | [Inception ResNet v1](https://github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py) |
29 | | [20180402-114759](https://drive.google.com/open?id=1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-) | 0.9965 | VGGFace2 | [Inception ResNet v1](https://github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py) |
30 |
31 | NOTE: If you use any of the models, please do not forget to give proper credit to those providing the training dataset as well.
32 |
33 | ## Inspiration
34 | The code is heavily inspired by the [OpenFace](https://github.com/cmusatyalab/openface) implementation.
35 |
36 | ## Training data
37 | The [CASIA-WebFace](http://www.cbsr.ia.ac.cn/english/CASIA-WebFace-Database.html) dataset has been used for training. This training set consists of total of 453 453 images over 10 575 identities after face detection. Some performance improvement has been seen if the dataset has been filtered before training. Some more information about how this was done will come later.
38 | The best performing model has been trained on the [VGGFace2](https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/) dataset consisting of ~3.3M faces and ~9000 classes.
39 |
40 | ## Pre-processing
41 |
42 | ### Face alignment using MTCNN
43 | One problem with the above approach seems to be that the Dlib face detector misses some of the hard examples (partial occlusion, silhouettes, etc). This makes the training set too "easy" which causes the model to perform worse on other benchmarks.
44 | To solve this, other face landmark detectors has been tested. One face landmark detector that has proven to work very well in this setting is the
45 | [Multi-task CNN](https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html). A Matlab/Caffe implementation can be found [here](https://github.com/kpzhang93/MTCNN_face_detection_alignment) and this has been used for face alignment with very good results. A Python/Tensorflow implementation of MTCNN can be found [here](https://github.com/davidsandberg/facenet/tree/master/src/align). This implementation does not give identical results to the Matlab/Caffe implementation but the performance is very similar.
46 |
47 | ## Running training
48 | Currently, the best results are achieved by training the model using softmax loss. Details on how to train a model using softmax loss on the CASIA-WebFace dataset can be found on the page [Classifier training of Inception-ResNet-v1](https://github.com/davidsandberg/facenet/wiki/Classifier-training-of-inception-resnet-v1) and .
49 |
50 | ## Pre-trained models
51 | ### Inception-ResNet-v1 model
52 | A couple of pretrained models are provided. They are trained using softmax loss with the Inception-Resnet-v1 model. The datasets has been aligned using [MTCNN](https://github.com/davidsandberg/facenet/tree/master/src/align).
53 |
54 | ## Performance
55 | The accuracy on LFW for the model [20180402-114759](https://drive.google.com/open?id=1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-) is 0.99650+-0.00252. A description of how to run the test can be found on the page [Validate on LFW](https://github.com/davidsandberg/facenet/wiki/Validate-on-lfw). Note that the input images to the model need to be standardized using fixed image standardization (use the option `--use_fixed_image_standardization` when running e.g. `validate_on_lfw.py`).
56 |
--------------------------------------------------------------------------------
/facenet/src/compare.py:
--------------------------------------------------------------------------------
1 | """Performs face alignment and calculates L2 distance between the embeddings of images."""
2 |
3 | # MIT License
4 | #
5 | # Copyright (c) 2016 David Sandberg
6 | #
7 | # Permission is hereby granted, free of charge, to any person obtaining a copy
8 | # of this software and associated documentation files (the "Software"), to deal
9 | # in the Software without restriction, including without limitation the rights
10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | # copies of the Software, and to permit persons to whom the Software is
12 | # furnished to do so, subject to the following conditions:
13 | #
14 | # The above copyright notice and this permission notice shall be included in all
15 | # copies or substantial portions of the Software.
16 | #
17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | # SOFTWARE.
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | from scipy import misc
30 | import tensorflow as tf
31 | import numpy as np
32 | import sys
33 | import os
34 | import copy
35 | import argparse
36 | import facenet
37 | import align.detect_face
38 |
39 | def main(args):
40 |
41 | images = load_and_align_data(args.image_files, args.image_size, args.margin, args.gpu_memory_fraction)
42 | with tf.Graph().as_default():
43 |
44 | with tf.Session() as sess:
45 |
46 | # Load the model
47 | facenet.load_model(args.model)
48 |
49 | # Get input and output tensors
50 | images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
51 | embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
52 | phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
53 |
54 | # Run forward pass to calculate embeddings
55 | feed_dict = { images_placeholder: images, phase_train_placeholder:False }
56 | emb = sess.run(embeddings, feed_dict=feed_dict)
57 |
58 | nrof_images = len(args.image_files)
59 |
60 | print('Images:')
61 | for i in range(nrof_images):
62 | print('%1d: %s' % (i, args.image_files[i]))
63 | print('')
64 |
65 | # Print distance matrix
66 | print('Distance matrix')
67 | print(' ', end='')
68 | for i in range(nrof_images):
69 | print(' %1d ' % i, end='')
70 | print('')
71 | for i in range(nrof_images):
72 | print('%1d ' % i, end='')
73 | for j in range(nrof_images):
74 | dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
75 | print(' %1.4f ' % dist, end='')
76 | print('')
77 |
78 |
79 | def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
80 |
81 | minsize = 20 # minimum size of face
82 | threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
83 | factor = 0.709 # scale factor
84 |
85 | print('Creating networks and loading parameters')
86 | with tf.Graph().as_default():
87 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
88 | sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
89 | with sess.as_default():
90 | pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
91 |
92 | tmp_image_paths=copy.copy(image_paths)
93 | img_list = []
94 | for image in tmp_image_paths:
95 | img = misc.imread(os.path.expanduser(image), mode='RGB')
96 | img_size = np.asarray(img.shape)[0:2]
97 | bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
98 | if len(bounding_boxes) < 1:
99 | image_paths.remove(image)
100 | print("can't detect face, remove ", image)
101 | continue
102 | det = np.squeeze(bounding_boxes[0,0:4])
103 | bb = np.zeros(4, dtype=np.int32)
104 | bb[0] = np.maximum(det[0]-margin/2, 0)
105 | bb[1] = np.maximum(det[1]-margin/2, 0)
106 | bb[2] = np.minimum(det[2]+margin/2, img_size[1])
107 | bb[3] = np.minimum(det[3]+margin/2, img_size[0])
108 | cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
109 | aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
110 | prewhitened = facenet.prewhiten(aligned)
111 | img_list.append(prewhitened)
112 | images = np.stack(img_list)
113 | return images
114 |
115 | def parse_arguments(argv):
116 | parser = argparse.ArgumentParser()
117 |
118 | parser.add_argument('model', type=str,
119 | help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
120 | parser.add_argument('image_files', type=str, nargs='+', help='Images to compare')
121 | parser.add_argument('--image_size', type=int,
122 | help='Image size (height, width) in pixels.', default=160)
123 | parser.add_argument('--margin', type=int,
124 | help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
125 | parser.add_argument('--gpu_memory_fraction', type=float,
126 | help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
127 | return parser.parse_args(argv)
128 |
129 | if __name__ == '__main__':
130 | main(parse_arguments(sys.argv[1:]))
131 |
--------------------------------------------------------------------------------
/facenet/contributed/batch_represent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | from __future__ import absolute_import
5 | from __future__ import division
6 | from __future__ import print_function
7 |
8 | """
9 | Allows you to generate embeddings from a directory of images in the format:
10 |
11 | Instructions:
12 |
13 | Image data directory should look like the following figure:
14 | person-1
15 | ├── image-1.jpg
16 | ├── image-2.png
17 | ...
18 | └── image-p.png
19 |
20 | ...
21 |
22 | person-m
23 | ├── image-1.png
24 | ├── image-2.jpg
25 | ...
26 | └── image-q.png
27 |
28 | Trained Model:
29 | - Both the trained model metagraph and the model parameters need to exist
30 | in the same directory, and the metagraph should have the extension '.meta'.
31 |
32 | ####
33 | USAGE:
34 | $ python batch_represent.py -d -o --trained_model_dir
35 | ###
36 | """
37 |
38 | """
39 | Attributions:
40 | The code is heavily inspired by the code from by David Sandberg's ../src/validate_on_lfw.py
41 | The concept is inspired by Brandon Amos' github.com/cmusatyalab/openface/blob/master/batch-represent/batch-represent.lua
42 | """
43 |
44 | #----------------------------------------------------
45 | # MIT License
46 | #
47 | # Copyright (c) 2017 Rakshak Talwar
48 | #
49 | # Permission is hereby granted, free of charge, to any person obtaining a copy
50 | # of this software and associated documentation files (the "Software"), to deal
51 | # in the Software without restriction, including without limitation the rights
52 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
53 | # copies of the Software, and to permit persons to whom the Software is
54 | # furnished to do so, subject to the following conditions:
55 | #
56 | # The above copyright notice and this permission notice shall be included in all
57 | # copies or substantial portions of the Software.
58 | #
59 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
60 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
61 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
62 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
63 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
64 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
65 | # SOFTWARE.
66 | #----------------------------------------------------
67 |
68 | import os
69 | import sys
70 | import argparse
71 | import importlib
72 | import time
73 |
74 | sys.path.insert(1, "../src")
75 | import facenet
76 | import numpy as np
77 | from sklearn.datasets import load_files
78 | import tensorflow as tf
79 | from six.moves import xrange
80 |
81 | def main(args):
82 |
83 | with tf.Graph().as_default():
84 |
85 | with tf.Session() as sess:
86 |
87 | # create output directory if it doesn't exist
88 | output_dir = os.path.expanduser(args.output_dir)
89 | if not os.path.isdir(output_dir):
90 | os.makedirs(output_dir)
91 |
92 | # load the model
93 | print("Loading trained model...\n")
94 | meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.trained_model_dir))
95 | facenet.load_model(args.trained_model_dir, meta_file, ckpt_file)
96 |
97 | # grab all image paths and labels
98 | print("Finding image paths and targets...\n")
99 | data = load_files(args.data_dir, load_content=False, shuffle=False)
100 | labels_array = data['target']
101 | paths = data['filenames']
102 |
103 | # Get input and output tensors
104 | images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
105 | embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
106 | phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
107 |
108 | image_size = images_placeholder.get_shape()[1]
109 | embedding_size = embeddings.get_shape()[1]
110 |
111 | # Run forward pass to calculate embeddings
112 | print('Generating embeddings from images...\n')
113 | start_time = time.time()
114 | batch_size = args.batch_size
115 | nrof_images = len(paths)
116 | nrof_batches = int(np.ceil(1.0*nrof_images / batch_size))
117 | emb_array = np.zeros((nrof_images, embedding_size))
118 | for i in xrange(nrof_batches):
119 | start_index = i*batch_size
120 | end_index = min((i+1)*batch_size, nrof_images)
121 | paths_batch = paths[start_index:end_index]
122 | images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True)
123 | feed_dict = { images_placeholder:images, phase_train_placeholder:False}
124 | emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
125 |
126 | time_avg_forward_pass = (time.time() - start_time) / float(nrof_images)
127 | print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images))
128 |
129 | print("Finally saving embeddings and gallery to: %s" % (output_dir))
130 | # save the gallery and embeddings (signatures) as numpy arrays to disk
131 | np.save(os.path.join(output_dir, "gallery.npy"), labels_array)
132 | np.save(os.path.join(output_dir, "signatures.npy"), emb_array)
133 |
134 | def parse_arguments(argv):
135 | parser = argparse.ArgumentParser(description="Batch-represent face embeddings from a given data directory")
136 | parser.add_argument('-d', '--data_dir', type=str,
137 | help='directory of images with structure as seen at the top of this file.')
138 | parser.add_argument('-o', '--output_dir', type=str,
139 | help='directory containing aligned face patches with file structure as seen at the top of this file.')
140 | parser.add_argument('--trained_model_dir', type=str,
141 | help='Load a trained model before training starts.')
142 | parser.add_argument('--batch_size', type=int, help='Number of images to process in a batch.', default=50)
143 |
144 | return parser.parse_args(argv)
145 |
146 |
147 | if __name__ == "__main__":
148 | main(parse_arguments(sys.argv[1:]))
149 |
--------------------------------------------------------------------------------
/facenet/tmp/seed_test.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import sys
4 | import time
5 | sys.path.append('../src')
6 | import facenet
7 | from tensorflow.python.ops import control_flow_ops
8 | from tensorflow.python.ops import array_ops
9 |
10 | from six.moves import xrange
11 |
12 | tf.app.flags.DEFINE_integer('batch_size', 90,
13 | """Number of images to process in a batch.""")
14 | tf.app.flags.DEFINE_integer('image_size', 96,
15 | """Image size (height, width) in pixels.""")
16 | tf.app.flags.DEFINE_float('alpha', 0.2,
17 | """Positive to negative triplet distance margin.""")
18 | tf.app.flags.DEFINE_float('learning_rate', 0.1,
19 | """Initial learning rate.""")
20 | tf.app.flags.DEFINE_float('moving_average_decay', 0.9999,
21 | """Expontential decay for tracking of training parameters.""")
22 |
23 | FLAGS = tf.app.flags.FLAGS
24 |
25 | def run_train():
26 |
27 | with tf.Graph().as_default():
28 |
29 | # Set the seed for the graph
30 | tf.set_random_seed(666)
31 |
32 | # Placeholder for input images
33 | images_placeholder = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3), name='input')
34 |
35 | # Build the inference graph
36 | embeddings = inference_conv_test(images_placeholder)
37 | #embeddings = inference_affine_test(images_placeholder)
38 |
39 | # Split example embeddings into anchor, positive and negative
40 | anchor, positive, negative = tf.split(0, 3, embeddings)
41 |
42 | # Alternative implementation of the split operation
43 | # This produces the same error
44 | #resh1 = tf.reshape(embeddings, [3,int(FLAGS.batch_size/3), 128])
45 | #anchor = resh1[0,:,:]
46 | #positive = resh1[1,:,:]
47 | #negative = resh1[2,:,:]
48 |
49 | # Calculate triplet loss
50 | pos_dist = tf.reduce_sum(tf.square(tf.sub(anchor, positive)), 1)
51 | neg_dist = tf.reduce_sum(tf.square(tf.sub(anchor, negative)), 1)
52 | basic_loss = tf.add(tf.sub(pos_dist,neg_dist), FLAGS.alpha)
53 | loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
54 |
55 | # Build a Graph that trains the model with one batch of examples and updates the model parameters
56 | opt = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
57 | #opt = tf.train.AdagradOptimizer(FLAGS.learning_rate) # Optimizer does not seem to matter
58 | grads = opt.compute_gradients(loss)
59 | train_op = opt.apply_gradients(grads)
60 |
61 | # Initialize the variables
62 | init = tf.global_variables_initializer()
63 |
64 | # Launch the graph.
65 | sess = tf.Session()
66 | sess.run(init)
67 |
68 | # Set the numpy seed
69 | np.random.seed(666)
70 |
71 | with sess.as_default():
72 | grads_eval = []
73 | all_vars = []
74 | for step in xrange(1):
75 | # Generate some random input data
76 | batch = np.random.random((FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3))
77 | feed_dict = { images_placeholder: batch }
78 | # Get the variables
79 | var_names = tf.global_variables()
80 | all_vars += sess.run(var_names, feed_dict=feed_dict)
81 | # Get the gradients
82 | grad_tensors, grad_vars = zip(*grads)
83 | grads_eval += sess.run(grad_tensors, feed_dict=feed_dict)
84 | # Run training
85 | sess.run(train_op, feed_dict=feed_dict)
86 |
87 | sess.close()
88 | return (var_names, all_vars, grad_vars, grads_eval)
89 |
90 | def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType):
91 | kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
92 | dtype=tf.float32,
93 | stddev=1e-1), name='weights')
94 | conv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType)
95 |
96 | biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
97 | trainable=True, name='biases')
98 | bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
99 | conv1 = tf.nn.relu(bias)
100 | return conv1
101 |
102 | def _affine(inpOp, nIn, nOut):
103 | kernel = tf.Variable(tf.truncated_normal([nIn, nOut],
104 | dtype=tf.float32,
105 | stddev=1e-1), name='weights')
106 | biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
107 | trainable=True, name='biases')
108 | affine1 = tf.nn.relu_layer(inpOp, kernel, biases)
109 | return affine1
110 |
111 | def inference_conv_test(images):
112 | conv1 = _conv(images, 3, 64, 7, 7, 2, 2, 'SAME')
113 | resh1 = tf.reshape(conv1, [-1, 147456])
114 | affn = _affine(resh1, 147456, 128) # Affine layer not needed to reproduce the error
115 | return affn
116 |
117 | def inference_affine_test(images):
118 | resh1 = tf.reshape(images, [-1, 27648])
119 | affn1 = _affine(resh1, 27648, 1024)
120 | affn2 = _affine(affn1, 1024, 1024)
121 | affn3 = _affine(affn2, 1024, 1024)
122 | affn4 = _affine(affn3, 1024, 128)
123 | return affn4
124 |
125 | # Run two sessions with the same seed. These runs should produce the same result.
126 | var_names1, all_vars1, grad_names1, all_grads1 = run_train()
127 | var_names2, all_vars2, grad_names2, all_grads2 = run_train()
128 |
129 | all_vars_close = [None] * len(all_vars1)
130 | for i in range(len(all_vars1)):
131 | all_vars_close[i] = np.allclose(all_vars1[i], all_vars2[i], rtol=1.e-16)
132 | print('%d var %s: %s' % (i, var_names1[i].op.name, all_vars_close[i]))
133 |
134 | all_grads_close = [None] * len(all_grads1)
135 | for i in range(len(all_grads1)):
136 | all_grads_close[i] = np.allclose(all_grads1[i], all_grads2[i], rtol=1.e-16)
137 | print('%d grad %s: %s' % (i, grad_names1[i].op.name, all_grads_close[i]))
138 |
139 | assert all(all_vars_close), 'Variable values differ between the two sessions (with the same seed)'
140 | assert all(all_grads_close), 'Gradient values differ between the two sessions (with the same seed)'
141 |
--------------------------------------------------------------------------------
/facenet/src/calculate_filtering_metrics.py:
--------------------------------------------------------------------------------
1 | """Calculate filtering metrics for a dataset and store in a .hdf file.
2 | """
3 | # MIT License
4 | #
5 | # Copyright (c) 2016 David Sandberg
6 | #
7 | # Permission is hereby granted, free of charge, to any person obtaining a copy
8 | # of this software and associated documentation files (the "Software"), to deal
9 | # in the Software without restriction, including without limitation the rights
10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | # copies of the Software, and to permit persons to whom the Software is
12 | # furnished to do so, subject to the following conditions:
13 | #
14 | # The above copyright notice and this permission notice shall be included in all
15 | # copies or substantial portions of the Software.
16 | #
17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | # SOFTWARE.
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | import tensorflow as tf
30 | import numpy as np
31 | import argparse
32 | import facenet
33 | import os
34 | import sys
35 | import time
36 | import h5py
37 | import math
38 | from tensorflow.python.platform import gfile
39 | from six import iteritems
40 |
41 | def main(args):
42 | dataset = facenet.get_dataset(args.dataset_dir)
43 |
44 | with tf.Graph().as_default():
45 |
46 | # Get a list of image paths and their labels
47 | image_list, label_list = facenet.get_image_paths_and_labels(dataset)
48 | nrof_images = len(image_list)
49 | image_indices = range(nrof_images)
50 |
51 | image_batch, label_batch = facenet.read_and_augment_data(image_list,
52 | image_indices, args.image_size, args.batch_size, None,
53 | False, False, False, nrof_preprocess_threads=4, shuffle=False)
54 |
55 | model_exp = os.path.expanduser(args.model_file)
56 | with gfile.FastGFile(model_exp,'rb') as f:
57 | graph_def = tf.GraphDef()
58 | graph_def.ParseFromString(f.read())
59 | input_map={'input':image_batch, 'phase_train':False}
60 | tf.import_graph_def(graph_def, input_map=input_map, name='net')
61 |
62 | embeddings = tf.get_default_graph().get_tensor_by_name("net/embeddings:0")
63 |
64 | with tf.Session() as sess:
65 | tf.train.start_queue_runners(sess=sess)
66 |
67 | embedding_size = int(embeddings.get_shape()[1])
68 | nrof_batches = int(math.ceil(nrof_images / args.batch_size))
69 | nrof_classes = len(dataset)
70 | label_array = np.array(label_list)
71 | class_names = [cls.name for cls in dataset]
72 | nrof_examples_per_class = [ len(cls.image_paths) for cls in dataset ]
73 | class_variance = np.zeros((nrof_classes,))
74 | class_center = np.zeros((nrof_classes,embedding_size))
75 | distance_to_center = np.ones((len(label_list),))*np.NaN
76 | emb_array = np.zeros((0,embedding_size))
77 | idx_array = np.zeros((0,), dtype=np.int32)
78 | lab_array = np.zeros((0,), dtype=np.int32)
79 | index_arr = np.append(0, np.cumsum(nrof_examples_per_class))
80 | for i in range(nrof_batches):
81 | t = time.time()
82 | emb, idx = sess.run([embeddings, label_batch])
83 | emb_array = np.append(emb_array, emb, axis=0)
84 | idx_array = np.append(idx_array, idx, axis=0)
85 | lab_array = np.append(lab_array, label_array[idx], axis=0)
86 | for cls in set(lab_array):
87 | cls_idx = np.where(lab_array==cls)[0]
88 | if cls_idx.shape[0]==nrof_examples_per_class[cls]:
89 | # We have calculated all the embeddings for this class
90 | i2 = np.argsort(idx_array[cls_idx])
91 | emb_class = emb_array[cls_idx,:]
92 | emb_sort = emb_class[i2,:]
93 | center = np.mean(emb_sort, axis=0)
94 | diffs = emb_sort - center
95 | dists_sqr = np.sum(np.square(diffs), axis=1)
96 | class_variance[cls] = np.mean(dists_sqr)
97 | class_center[cls,:] = center
98 | distance_to_center[index_arr[cls]:index_arr[cls+1]] = np.sqrt(dists_sqr)
99 | emb_array = np.delete(emb_array, cls_idx, axis=0)
100 | idx_array = np.delete(idx_array, cls_idx, axis=0)
101 | lab_array = np.delete(lab_array, cls_idx, axis=0)
102 |
103 |
104 | print('Batch %d in %.3f seconds' % (i, time.time()-t))
105 |
106 | print('Writing filtering data to %s' % args.data_file_name)
107 | mdict = {'class_names':class_names, 'image_list':image_list, 'label_list':label_list, 'distance_to_center':distance_to_center }
108 | with h5py.File(args.data_file_name, 'w') as f:
109 | for key, value in iteritems(mdict):
110 | f.create_dataset(key, data=value)
111 |
112 | def parse_arguments(argv):
113 | parser = argparse.ArgumentParser()
114 |
115 | parser.add_argument('dataset_dir', type=str,
116 | help='Path to the directory containing aligned dataset.')
117 | parser.add_argument('model_file', type=str,
118 | help='File containing the frozen model in protobuf (.pb) format to use for feature extraction.')
119 | parser.add_argument('data_file_name', type=str,
120 | help='The name of the file to store filtering data in.')
121 | parser.add_argument('--image_size', type=int,
122 | help='Image size.', default=160)
123 | parser.add_argument('--batch_size', type=int,
124 | help='Number of images to process in a batch.', default=90)
125 | return parser.parse_args(argv)
126 |
127 | if __name__ == '__main__':
128 | main(parse_arguments(sys.argv[1:]))
129 |
--------------------------------------------------------------------------------
/facenet/contributed/face.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """Face Detection and Recognition"""
3 | # MIT License
4 | #
5 | # Copyright (c) 2017 François Gervais
6 | #
7 | # This is the work of David Sandberg and shanren7 remodelled into a
8 | # high level container. It's an attempt to simplify the use of such
9 | # technology and provide an easy to use facial recognition package.
10 | #
11 | # https://github.com/davidsandberg/facenet
12 | # https://github.com/shanren7/real_time_face_recognition
13 | #
14 | # Permission is hereby granted, free of charge, to any person obtaining a copy
15 | # of this software and associated documentation files (the "Software"), to deal
16 | # in the Software without restriction, including without limitation the rights
17 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18 | # copies of the Software, and to permit persons to whom the Software is
19 | # furnished to do so, subject to the following conditions:
20 | #
21 | # The above copyright notice and this permission notice shall be included in all
22 | # copies or substantial portions of the Software.
23 | #
24 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 | # SOFTWARE.
31 |
32 | import pickle
33 | import os
34 |
35 | import cv2
36 | import numpy as np
37 | import tensorflow as tf
38 | from scipy import misc
39 |
40 | import align.detect_face
41 | import facenet
42 |
43 |
44 | gpu_memory_fraction = 0.3
45 | facenet_model_checkpoint = os.path.dirname(__file__) + "/../model_checkpoints/20170512-110547"
46 | classifier_model = os.path.dirname(__file__) + "/../model_checkpoints/my_classifier_1.pkl"
47 | debug = False
48 |
49 |
50 | class Face:
51 | def __init__(self):
52 | self.name = None
53 | self.bounding_box = None
54 | self.image = None
55 | self.container_image = None
56 | self.embedding = None
57 |
58 |
59 | class Recognition:
60 | def __init__(self):
61 | self.detect = Detection()
62 | self.encoder = Encoder()
63 | self.identifier = Identifier()
64 |
65 | def add_identity(self, image, person_name):
66 | faces = self.detect.find_faces(image)
67 |
68 | if len(faces) == 1:
69 | face = faces[0]
70 | face.name = person_name
71 | face.embedding = self.encoder.generate_embedding(face)
72 | return faces
73 |
74 | def identify(self, image):
75 | faces = self.detect.find_faces(image)
76 |
77 | for i, face in enumerate(faces):
78 | if debug:
79 | cv2.imshow("Face: " + str(i), face.image)
80 | face.embedding = self.encoder.generate_embedding(face)
81 | face.name = self.identifier.identify(face)
82 |
83 | return faces
84 |
85 |
86 | class Identifier:
87 | def __init__(self):
88 | with open(classifier_model, 'rb') as infile:
89 | self.model, self.class_names = pickle.load(infile)
90 |
91 | def identify(self, face):
92 | if face.embedding is not None:
93 | predictions = self.model.predict_proba([face.embedding])
94 | best_class_indices = np.argmax(predictions, axis=1)
95 | return self.class_names[best_class_indices[0]]
96 |
97 |
98 | class Encoder:
99 | def __init__(self):
100 | self.sess = tf.Session()
101 | with self.sess.as_default():
102 | facenet.load_model(facenet_model_checkpoint)
103 |
104 | def generate_embedding(self, face):
105 | # Get input and output tensors
106 | images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
107 | embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
108 | phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
109 |
110 | prewhiten_face = facenet.prewhiten(face.image)
111 |
112 | # Run forward pass to calculate embeddings
113 | feed_dict = {images_placeholder: [prewhiten_face], phase_train_placeholder: False}
114 | return self.sess.run(embeddings, feed_dict=feed_dict)[0]
115 |
116 |
117 | class Detection:
118 | # face detection parameters
119 | minsize = 20 # minimum size of face
120 | threshold = [0.6, 0.7, 0.7] # three steps's threshold
121 | factor = 0.709 # scale factor
122 |
123 | def __init__(self, face_crop_size=160, face_crop_margin=32):
124 | self.pnet, self.rnet, self.onet = self._setup_mtcnn()
125 | self.face_crop_size = face_crop_size
126 | self.face_crop_margin = face_crop_margin
127 |
128 | def _setup_mtcnn(self):
129 | with tf.Graph().as_default():
130 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
131 | sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
132 | with sess.as_default():
133 | return align.detect_face.create_mtcnn(sess, None)
134 |
135 | def find_faces(self, image):
136 | faces = []
137 |
138 | bounding_boxes, _ = align.detect_face.detect_face(image, self.minsize,
139 | self.pnet, self.rnet, self.onet,
140 | self.threshold, self.factor)
141 | for bb in bounding_boxes:
142 | face = Face()
143 | face.container_image = image
144 | face.bounding_box = np.zeros(4, dtype=np.int32)
145 |
146 | img_size = np.asarray(image.shape)[0:2]
147 | face.bounding_box[0] = np.maximum(bb[0] - self.face_crop_margin / 2, 0)
148 | face.bounding_box[1] = np.maximum(bb[1] - self.face_crop_margin / 2, 0)
149 | face.bounding_box[2] = np.minimum(bb[2] + self.face_crop_margin / 2, img_size[1])
150 | face.bounding_box[3] = np.minimum(bb[3] + self.face_crop_margin / 2, img_size[0])
151 | cropped = image[face.bounding_box[1]:face.bounding_box[3], face.bounding_box[0]:face.bounding_box[2], :]
152 | face.image = misc.imresize(cropped, (self.face_crop_size, self.face_crop_size), interp='bilinear')
153 |
154 | faces.append(face)
155 |
156 | return faces
157 |
--------------------------------------------------------------------------------
/facenet/src/generative/models/dfc_vae_resnet.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2017 David Sandberg
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | """Variational autoencoder based on the paper
24 | 'Deep Feature Consistent Variational Autoencoder'
25 | (https://arxiv.org/pdf/1610.00291.pdf)
26 | """
27 |
28 | from __future__ import absolute_import
29 | from __future__ import division
30 | from __future__ import print_function
31 |
32 | import tensorflow as tf
33 | import tensorflow.contrib.slim as slim
34 | import generative.models.vae_base # @UnresolvedImport
35 |
36 |
37 | class Vae(generative.models.vae_base.Vae):
38 |
39 | def __init__(self, latent_variable_dim):
40 | super(Vae, self).__init__(latent_variable_dim, 64)
41 |
42 | def encoder(self, images, is_training):
43 | activation_fn = leaky_relu # tf.nn.relu
44 | weight_decay = 0.0
45 | with tf.variable_scope('encoder'):
46 | with slim.arg_scope([slim.batch_norm],
47 | is_training=is_training):
48 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
49 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
50 | weights_regularizer=slim.l2_regularizer(weight_decay),
51 | normalizer_fn=slim.batch_norm,
52 | normalizer_params=self.batch_norm_params):
53 | net = images
54 |
55 | net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
56 | net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
57 |
58 | net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
59 | net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
60 |
61 | net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
62 | net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
63 |
64 | net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
65 | net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
66 |
67 | net = slim.flatten(net)
68 | fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
69 | fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
70 | return fc1, fc2
71 |
72 | def decoder(self, latent_var, is_training):
73 | activation_fn = leaky_relu # tf.nn.relu
74 | weight_decay = 0.0
75 | with tf.variable_scope('decoder'):
76 | with slim.arg_scope([slim.batch_norm],
77 | is_training=is_training):
78 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
79 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
80 | weights_regularizer=slim.l2_regularizer(weight_decay),
81 | normalizer_fn=slim.batch_norm,
82 | normalizer_params=self.batch_norm_params):
83 | net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1')
84 | net = tf.reshape(net, [-1,4,4,256], name='Reshape')
85 |
86 | net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1')
87 | net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a')
88 | net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b')
89 |
90 | net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2')
91 | net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a')
92 | net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b')
93 |
94 | net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3')
95 | net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a')
96 | net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b')
97 |
98 | net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4')
99 | net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a')
100 | net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b')
101 | net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c')
102 |
103 | return net
104 |
105 | def conv2d_block(inp, scale, *args, **kwargs):
106 | return inp + slim.conv2d(inp, *args, **kwargs) * scale
107 |
108 | def leaky_relu(x):
109 | return tf.maximum(0.1*x,x)
110 |
--------------------------------------------------------------------------------