├── .gitignore
├── LICENSE
├── README.md
├── architecture
└── fig.png
├── configs
├── mobilevit_atk.yaml
├── mobilevit_cd.yaml
├── mobilevit_eofn.yaml
├── mobilevit_fas.yaml
├── mobilevit_fc.yaml
├── mobilevit_ms.yaml
├── mobilevit_norm.yaml
├── mobilevit_s.yaml
├── mobilevit_xs.yaml
└── mobilevit_xxs.yaml
├── data_label
└── generate_label.py
├── experiment
└── m
│ ├── config.py
│ ├── dg_test.py
│ ├── logs
│ └── runs
│ │ ├── Accuracy_train
│ │ ├── events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.1
│ │ ├── events.out.tfevents.1667751802.public2-2.288376.1
│ │ ├── events.out.tfevents.1667752838.public2-2.292092.1
│ │ ├── events.out.tfevents.1667752898.public2-2.292404.1
│ │ ├── events.out.tfevents.1667753917.public2-2.296856.1
│ │ ├── events.out.tfevents.1667754066.public2-2.297489.1
│ │ ├── events.out.tfevents.1667757204.public2-2.309449.1
│ │ ├── events.out.tfevents.1667791088.public2-2.345886.1
│ │ ├── events.out.tfevents.1668090397.public2-2.765533.1
│ │ ├── events.out.tfevents.1668091334.public2-2.769344.1
│ │ ├── events.out.tfevents.1668091449.public2-2.769899.1
│ │ ├── events.out.tfevents.1668091849.public2-2.771620.1
│ │ ├── events.out.tfevents.1668091915.public2-2.772017.1
│ │ ├── events.out.tfevents.1668092026.public2-2.772597.1
│ │ ├── events.out.tfevents.1668092195.public2-2.773242.1
│ │ ├── events.out.tfevents.1678743287.public2-2.531760.1
│ │ ├── events.out.tfevents.1678743366.public2-2.532089.1
│ │ ├── events.out.tfevents.1678743508.public2-2.532654.1
│ │ ├── events.out.tfevents.1678743717.public2-2.533330.1
│ │ ├── events.out.tfevents.1678743959.public2-2.534478.1
│ │ ├── events.out.tfevents.1678744019.public2-2.534792.1
│ │ ├── events.out.tfevents.1678744350.public2-2.536001.1
│ │ ├── events.out.tfevents.1678750528.public2-2.567922.1
│ │ └── events.out.tfevents.1678750631.public2-2.568450.1
│ │ ├── Accuracy_valid
│ │ ├── events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.2
│ │ ├── events.out.tfevents.1667751802.public2-2.288376.2
│ │ ├── events.out.tfevents.1667752838.public2-2.292092.2
│ │ ├── events.out.tfevents.1667752898.public2-2.292404.2
│ │ ├── events.out.tfevents.1667753917.public2-2.296856.2
│ │ ├── events.out.tfevents.1667754066.public2-2.297489.2
│ │ ├── events.out.tfevents.1667757204.public2-2.309449.2
│ │ ├── events.out.tfevents.1667791088.public2-2.345886.2
│ │ ├── events.out.tfevents.1668090397.public2-2.765533.2
│ │ ├── events.out.tfevents.1668091334.public2-2.769344.2
│ │ ├── events.out.tfevents.1668091449.public2-2.769899.2
│ │ ├── events.out.tfevents.1668091849.public2-2.771620.2
│ │ ├── events.out.tfevents.1668091915.public2-2.772017.2
│ │ ├── events.out.tfevents.1668092026.public2-2.772597.2
│ │ ├── events.out.tfevents.1668092195.public2-2.773242.2
│ │ ├── events.out.tfevents.1678743287.public2-2.531760.2
│ │ ├── events.out.tfevents.1678743366.public2-2.532089.2
│ │ ├── events.out.tfevents.1678743508.public2-2.532654.2
│ │ ├── events.out.tfevents.1678743717.public2-2.533330.2
│ │ ├── events.out.tfevents.1678743959.public2-2.534478.2
│ │ ├── events.out.tfevents.1678744019.public2-2.534792.2
│ │ ├── events.out.tfevents.1678744350.public2-2.536001.2
│ │ ├── events.out.tfevents.1678750528.public2-2.567922.2
│ │ └── events.out.tfevents.1678750631.public2-2.568450.2
│ │ ├── Loss_train
│ │ ├── events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.3
│ │ ├── events.out.tfevents.1667751802.public2-2.288376.3
│ │ ├── events.out.tfevents.1667752838.public2-2.292092.3
│ │ ├── events.out.tfevents.1667752898.public2-2.292404.3
│ │ ├── events.out.tfevents.1667753917.public2-2.296856.3
│ │ ├── events.out.tfevents.1667754066.public2-2.297489.3
│ │ ├── events.out.tfevents.1667757204.public2-2.309449.3
│ │ ├── events.out.tfevents.1667791088.public2-2.345886.3
│ │ ├── events.out.tfevents.1668090397.public2-2.765533.3
│ │ ├── events.out.tfevents.1668091334.public2-2.769344.3
│ │ ├── events.out.tfevents.1668091449.public2-2.769899.3
│ │ ├── events.out.tfevents.1668091849.public2-2.771620.3
│ │ ├── events.out.tfevents.1668091915.public2-2.772017.3
│ │ ├── events.out.tfevents.1668092026.public2-2.772597.3
│ │ ├── events.out.tfevents.1668092195.public2-2.773242.3
│ │ ├── events.out.tfevents.1678743287.public2-2.531760.3
│ │ ├── events.out.tfevents.1678743366.public2-2.532089.3
│ │ ├── events.out.tfevents.1678743508.public2-2.532654.3
│ │ ├── events.out.tfevents.1678743717.public2-2.533330.3
│ │ ├── events.out.tfevents.1678743959.public2-2.534478.3
│ │ ├── events.out.tfevents.1678744019.public2-2.534792.3
│ │ ├── events.out.tfevents.1678744350.public2-2.536001.3
│ │ ├── events.out.tfevents.1678750528.public2-2.567922.3
│ │ └── events.out.tfevents.1678750631.public2-2.568450.3
│ │ ├── Loss_valid
│ │ ├── events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.4
│ │ ├── events.out.tfevents.1667751802.public2-2.288376.4
│ │ ├── events.out.tfevents.1667752838.public2-2.292092.4
│ │ ├── events.out.tfevents.1667752898.public2-2.292404.4
│ │ ├── events.out.tfevents.1667753917.public2-2.296856.4
│ │ ├── events.out.tfevents.1667754066.public2-2.297489.4
│ │ ├── events.out.tfevents.1667757204.public2-2.309449.4
│ │ ├── events.out.tfevents.1667791088.public2-2.345886.4
│ │ ├── events.out.tfevents.1668090397.public2-2.765533.4
│ │ ├── events.out.tfevents.1668091334.public2-2.769344.4
│ │ ├── events.out.tfevents.1668091449.public2-2.769899.4
│ │ ├── events.out.tfevents.1668091849.public2-2.771620.4
│ │ ├── events.out.tfevents.1668091915.public2-2.772017.4
│ │ ├── events.out.tfevents.1668092026.public2-2.772597.4
│ │ ├── events.out.tfevents.1668092195.public2-2.773242.4
│ │ ├── events.out.tfevents.1678743287.public2-2.531760.4
│ │ ├── events.out.tfevents.1678743366.public2-2.532089.4
│ │ ├── events.out.tfevents.1678743508.public2-2.532654.4
│ │ ├── events.out.tfevents.1678743717.public2-2.533330.4
│ │ ├── events.out.tfevents.1678743959.public2-2.534478.4
│ │ ├── events.out.tfevents.1678744019.public2-2.534792.4
│ │ ├── events.out.tfevents.1678744350.public2-2.536001.4
│ │ ├── events.out.tfevents.1678750528.public2-2.567922.4
│ │ └── events.out.tfevents.1678750631.public2-2.568450.4
│ │ ├── events.out.tfevents.1653557730.l444ysctr1653533547001-j4nrx.4482.0
│ │ ├── events.out.tfevents.1667632455.public2-2.53048.0
│ │ ├── events.out.tfevents.1667632486.public2-2.53189.0
│ │ ├── events.out.tfevents.1667632618.public2-2.53530.0
│ │ ├── events.out.tfevents.1667632644.public2-2.53627.0
│ │ ├── events.out.tfevents.1667751731.public2-2.288133.0
│ │ ├── events.out.tfevents.1667751760.public2-2.288255.0
│ │ ├── events.out.tfevents.1667751798.public2-2.288376.0
│ │ ├── events.out.tfevents.1667752832.public2-2.292092.0
│ │ ├── events.out.tfevents.1667752892.public2-2.292404.0
│ │ ├── events.out.tfevents.1667753909.public2-2.296856.0
│ │ ├── events.out.tfevents.1667754058.public2-2.297489.0
│ │ ├── events.out.tfevents.1667757180.public2-2.309360.0
│ │ ├── events.out.tfevents.1667757191.public2-2.309449.0
│ │ ├── events.out.tfevents.1667791057.public2-2.345761.0
│ │ ├── events.out.tfevents.1667791079.public2-2.345886.0
│ │ ├── events.out.tfevents.1668086014.public2-2.754938.0
│ │ ├── events.out.tfevents.1668087845.public2-2.758793.0
│ │ ├── events.out.tfevents.1668087899.public2-2.758979.0
│ │ ├── events.out.tfevents.1668087912.public2-2.759057.0
│ │ ├── events.out.tfevents.1668087926.public2-2.759154.0
│ │ ├── events.out.tfevents.1668087950.public2-2.759313.0
│ │ ├── events.out.tfevents.1668087981.public2-2.759407.0
│ │ ├── events.out.tfevents.1668088002.public2-2.759535.0
│ │ ├── events.out.tfevents.1668088018.public2-2.759623.0
│ │ ├── events.out.tfevents.1668088135.public2-2.759938.0
│ │ ├── events.out.tfevents.1668088146.public2-2.760028.0
│ │ ├── events.out.tfevents.1668088205.public2-2.760242.0
│ │ ├── events.out.tfevents.1668088258.public2-2.760423.0
│ │ ├── events.out.tfevents.1668088295.public2-2.760547.0
│ │ ├── events.out.tfevents.1668089319.public2-2.763614.0
│ │ ├── events.out.tfevents.1668090177.public2-2.765042.0
│ │ ├── events.out.tfevents.1668090281.public2-2.765284.0
│ │ ├── events.out.tfevents.1668090388.public2-2.765533.0
│ │ ├── events.out.tfevents.1668091229.public2-2.769027.0
│ │ ├── events.out.tfevents.1668091308.public2-2.769255.0
│ │ ├── events.out.tfevents.1668091327.public2-2.769344.0
│ │ ├── events.out.tfevents.1668091440.public2-2.769899.0
│ │ ├── events.out.tfevents.1668091841.public2-2.771620.0
│ │ ├── events.out.tfevents.1668091871.public2-2.771800.0
│ │ ├── events.out.tfevents.1668091895.public2-2.771913.0
│ │ ├── events.out.tfevents.1668091909.public2-2.772017.0
│ │ ├── events.out.tfevents.1668092020.public2-2.772597.0
│ │ ├── events.out.tfevents.1668092132.public2-2.773061.0
│ │ ├── events.out.tfevents.1668092188.public2-2.773242.0
│ │ ├── events.out.tfevents.1678743240.public2-2.531562.0
│ │ ├── events.out.tfevents.1678743265.public2-2.531675.0
│ │ ├── events.out.tfevents.1678743273.public2-2.531760.0
│ │ ├── events.out.tfevents.1678743352.public2-2.532089.0
│ │ ├── events.out.tfevents.1678743494.public2-2.532654.0
│ │ ├── events.out.tfevents.1678743703.public2-2.533330.0
│ │ ├── events.out.tfevents.1678743945.public2-2.534478.0
│ │ ├── events.out.tfevents.1678744005.public2-2.534792.0
│ │ ├── events.out.tfevents.1678744337.public2-2.536001.0
│ │ ├── events.out.tfevents.1678750514.public2-2.567922.0
│ │ ├── events.out.tfevents.1678750618.public2-2.568450.0
│ │ └── events.out.tfevents.1678750763.public2-2.569212.0
│ ├── test_checkpoint
│ └── dgua_fas
│ │ ├── DGFANet
│ │ └── _checkpoint.pth.tar
│ │ └── best_model
│ │ └── best_model.pth.tar
│ ├── train.py
│ └── utils
│ ├── __init__.py
│ ├── checkpoint_utils.py
│ ├── color_map.py
│ ├── common_utils.py
│ ├── ddp_utils.py
│ ├── download_utils.py
│ ├── logger.py
│ ├── math_utils.py
│ ├── pytorch_to_coreml.py
│ ├── tensor_utils.py
│ └── visualization_utils.py
├── option.py
├── pretrained_model
└── mobilevit_s.pt
└── util
├── dataset.py
├── evaluate.py
├── get_loader.py
├── statistic.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 AI^2 Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DGUA-FAS
2 |
3 | The implementation of [Domain-Generalized Face Anti-Spoofing with Unknown Attacks](https://ieeexplore.ieee.org/abstract/document/10223078)
4 |
5 | The architecture of the proposed DGUA-FAS method:
6 |
7 |
8 |

9 |
10 |
11 | ## Congifuration Environment
12 |
13 | - python 3.9.12
14 | - torch 1.10.0
15 | - torchvision 0.11.1
16 | - cuda 12.0
17 |
18 | ## Requirements
19 |
20 | ### Install MobileViT and modify base_cls.py to [our version](https://drive.google.com/file/d/1shq23SpC4X2OoYFELFjHMWpYyolmMEpj/view?usp=share_link)
21 |
22 | ```python
23 | git clone https://github.com/apple/ml-cvnets
24 | cd ml-cvnets
25 | git checkout 84d992f413e52c0468f86d23196efd9dad885e6f
26 |
27 | # replace ./cvnets/models/classification/base_cls.py to our version
28 | pip install -r requirements.txt
29 | pip install --editable .
30 | pip install pandas
31 | pip install tensorboard
32 | cd ..
33 | ```
34 |
35 | ### Our data Pre-processing is like SSDG, so please ref their dataset setting.
36 |
37 | ```python
38 | # After setting up the dataset path, run below codes.
39 | cd ./data_label
40 | python generate_label.py
41 | ```
42 |
43 | ## Training
44 |
45 | ```python
46 | cd ./experiment/m/
47 | python train.py
48 | ```
49 |
50 | The file `config.py` contains all the hype-parameters used during training.
51 |
52 | ## Testing
53 |
54 | Change the name of testing dataset in config.py and
55 | run like this:
56 |
57 | ```python
58 | python dg_test.py
59 | ```
60 |
61 | We also provide our pretrained model [[Google drive]](https://drive.google.com/drive/folders/1D8WZjO62Kv4uzzNouzJWs2BrBayZq_0l?usp=sharing)
62 |
63 | ## Acknowledgment
64 |
65 | This work can not be finished well without the following reference, many thanks for the author's contribution:
66 |
67 | [SSDG](https://github.com/YYuanAnyVision/mxnet_mtcnn_face_detection), [ml-cvnets](https://github.com/apple/ml-cvnets), [DiVT](https://openaccess.thecvf.com/content/WACV2023/html/Liao_Domain_Invariant_Vision_Transformer_Learning_for_Face_Anti-Spoofing_WACV_2023_paper.html)
68 |
69 | ## Citation
70 |
71 | Please cite our works if the code is helpful to your research.
72 |
73 | ```
74 | @INPROCEEDINGS{10223078,
75 | author={Hong, Zong-Wei and Lin, Yu-Chen and Liu, Hsuan-Tung and Yeh, Yi-Ren and Chen, Chu-Song},
76 | booktitle={2023 IEEE International Conference on Image Processing (ICIP)},
77 | title={Domain-Generalized Face Anti-Spoofing with Unknown Attacks},
78 | year={2023},
79 | volume={},
80 | number={},
81 | pages={820-824},
82 | doi={10.1109/ICIP49359.2023.10223078}}
83 | ```
84 |
--------------------------------------------------------------------------------
/architecture/fig.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/architecture/fig.png
--------------------------------------------------------------------------------
/configs/mobilevit_atk.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-atk"
4 | classifier_dropout: 0.5
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 3
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_cd.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-cd"
4 | classifier_dropout: 0.1
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 2
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_eofn.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-eofn"
4 | classifier_dropout: 0.5
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 2
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_fas.yaml:
--------------------------------------------------------------------------------
1 | dataset:
2 | category: "fas"
3 | model:
4 | fas:
5 | name: "mobilevit-fas"
6 | classifier_dropout: 0.1
7 | mit:
8 | mode: "small"
9 | ffn_dropout: 0.0
10 | attn_dropout: 0.0
11 | dropout: 0.1
12 | number_heads: 4
13 | no_fuse_local_global_features: false
14 | conv_kernel_size: 3
15 | activation:
16 | name: "swish"
17 | n_classes: 2
18 | normalization:
19 | name: "batch_norm_2d"
20 | momentum: 0.1
21 | activation:
22 | name: "swish"
23 | layer:
24 | global_pool: "mean"
25 | conv_init: "kaiming_normal"
26 | linear_init: "trunc_normal"
27 | linear_init_std_dev: 0.02
28 |
--------------------------------------------------------------------------------
/configs/mobilevit_fc.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-fc"
4 | classifier_dropout: 0.1
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 2
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_ms.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-ms"
4 | classifier_dropout: 0.5
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 2
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_norm.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | classification:
3 | name: "mobilevit-norm"
4 | classifier_dropout: 0.5
5 | mit:
6 | mode: "small"
7 | ffn_dropout: 0.0
8 | attn_dropout: 0.0
9 | dropout: 0.1
10 | number_heads: 4
11 | no_fuse_local_global_features: false
12 | conv_kernel_size: 3
13 | activation:
14 | name: "swish"
15 | n_classes: 2
16 | normalization:
17 | name: "batch_norm_2d"
18 | momentum: 0.1
19 | activation:
20 | name: "swish"
21 | layer:
22 | global_pool: "mean"
23 | conv_init: "kaiming_normal"
24 | linear_init: "trunc_normal"
25 | linear_init_std_dev: 0.02
26 |
--------------------------------------------------------------------------------
/configs/mobilevit_s.yaml:
--------------------------------------------------------------------------------
1 | common:
2 | run_label: "run_1"
3 | log_freq: 500
4 | auto_resume: true
5 | mixed_precision: true
6 | dataset:
7 | root_train: "/mnt/imagenet/training"
8 | root_val: "/mnt/imagenet/validation"
9 | name: "imagenet"
10 | category: "classification"
11 | train_batch_size0: 128
12 | val_batch_size0: 100
13 | eval_batch_size0: 100
14 | workers: 4
15 | persistent_workers: false
16 | pin_memory: true
17 | sampler:
18 | name: "batch_sampler"
19 | bs:
20 | crop_size_width: 256
21 | crop_size_height: 256
22 | model:
23 | classification:
24 | name: "mobilevit"
25 | classifier_dropout: 0.1
26 | mit:
27 | mode: "small"
28 | ffn_dropout: 0.0
29 | attn_dropout: 0.0
30 | dropout: 0.1
31 | number_heads: 4
32 | no_fuse_local_global_features: false
33 | conv_kernel_size: 3
34 | activation:
35 | name: "swish"
36 | n_classes: 3
37 | normalization:
38 | name: "batch_norm_2d"
39 | momentum: 0.1
40 | activation:
41 | name: "swish"
42 | layer:
43 | global_pool: "mean"
44 | conv_init: "kaiming_normal"
45 | linear_init: "trunc_normal"
46 | linear_init_std_dev: 0.02
47 | ddp:
48 | enable: true
49 | rank: 0
50 | world_size: -1
51 | dist_port: 30786
52 | stats:
53 | name: [ "loss", "top1", "top5" ]
54 | checkpoint_metric: "top1"
55 | checkpoint_metric_max: true
56 |
--------------------------------------------------------------------------------
/configs/mobilevit_xs.yaml:
--------------------------------------------------------------------------------
1 | common:
2 | run_label: "run_1"
3 | log_freq: 500
4 | auto_resume: true
5 | mixed_precision: true
6 | dataset:
7 | root_train: "/mnt/imagenet/training"
8 | root_val: "/mnt/imagenet/validation"
9 | name: "imagenet"
10 | category: "classification"
11 | train_batch_size0: 128
12 | val_batch_size0: 100
13 | eval_batch_size0: 100
14 | workers: 4
15 | persistent_workers: false
16 | pin_memory: true
17 | sampler:
18 | name: "batch_sampler"
19 | bs:
20 | crop_size_width: 256
21 | crop_size_height: 256
22 | model:
23 | classification:
24 | name: "mobilevit"
25 | classifier_dropout: 0.1
26 | mit:
27 | mode: "x_small"
28 | ffn_dropout: 0.0
29 | attn_dropout: 0.0
30 | dropout: 0.1
31 | number_heads: 4
32 | no_fuse_local_global_features: false
33 | conv_kernel_size: 3
34 | activation:
35 | name: "swish"
36 | n_classes: 3
37 | normalization:
38 | name: "batch_norm_2d"
39 | momentum: 0.1
40 | activation:
41 | name: "swish"
42 | layer:
43 | global_pool: "mean"
44 | conv_init: "kaiming_normal"
45 | linear_init: "trunc_normal"
46 | linear_init_std_dev: 0.02
47 | ddp:
48 | enable: true
49 | rank: 0
50 | world_size: -1
51 | dist_port: 30786
52 | stats:
53 | name: [ "loss", "top1", "top5" ]
54 | checkpoint_metric: "top1"
55 | checkpoint_metric_max: true
56 |
--------------------------------------------------------------------------------
/configs/mobilevit_xxs.yaml:
--------------------------------------------------------------------------------
1 | common:
2 | run_label: "run_1"
3 | log_freq: 500
4 | auto_resume: true
5 | mixed_precision: true
6 | dataset:
7 | root_train: "/mnt/imagenet/training"
8 | root_val: "/mnt/imagenet/validation"
9 | name: "imagenet"
10 | category: "classification"
11 | train_batch_size0: 128
12 | val_batch_size0: 100
13 | eval_batch_size0: 100
14 | workers: 4
15 | persistent_workers: false
16 | pin_memory: true
17 | sampler:
18 | name: "batch_sampler"
19 | bs:
20 | crop_size_width: 256
21 | crop_size_height: 256
22 | model:
23 | classification:
24 | name: "mobilevit"
25 | classifier_dropout: 0.1
26 | mit:
27 | mode: "xx_small"
28 | ffn_dropout: 0.0
29 | attn_dropout: 0.0
30 | dropout: 0.05
31 | number_heads: 4
32 | no_fuse_local_global_features: false
33 | conv_kernel_size: 3
34 | activation:
35 | name: "swish"
36 | n_classes: 2
37 | normalization:
38 | name: "batch_norm_2d"
39 | momentum: 0.1
40 | activation:
41 | name: "swish"
42 | layer:
43 | global_pool: "mean"
44 | conv_init: "kaiming_normal"
45 | linear_init: "trunc_normal"
46 | linear_init_std_dev: 0.02
47 | ddp:
48 | enable: true
49 | rank: 0
50 | world_size: -1
51 | dist_port: 30786
52 | stats:
53 | name: [ "loss", "top1", "top5" ]
54 | checkpoint_metric: "top1"
55 | checkpoint_metric_max: true
56 |
--------------------------------------------------------------------------------
/data_label/generate_label.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import glob
4 |
5 | # change your data path
6 | # data_dir = '../../../datasets/data/'
7 | data_dir = '/disk2/face-anti-dataset/'
8 |
9 | def msu_test_process():
10 | test_list = []
11 | # data_label for msu
12 | for line in open(f'{data_dir}msu_256/test_sub_list.txt', 'r'):
13 | test_list.append(line[0:2])
14 | train_list = []
15 | for line in open(f'{data_dir}msu_256/train_sub_list.txt', 'r'):
16 | train_list.append(line[0:2])
17 | print(test_list)
18 | print(train_list)
19 | train_final_json = []
20 | test_final_json = []
21 | all_final_json = []
22 | real_final_json = []
23 | fake_final_json = []
24 | label_save_dir = './msu_test/'
25 | if not os.path.exists(label_save_dir):
26 | os.makedirs(label_save_dir)
27 | f_train = open(label_save_dir + 'train_label.json', 'w')
28 | f_test = open(label_save_dir + 'test_label.json', 'w')
29 | f_all = open(label_save_dir + 'all_label.json', 'w')
30 | f_real = open(label_save_dir + 'real_label.json', 'w')
31 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
32 | dataset_path = data_dir + 'msu_256/'
33 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
34 | path_list.sort()
35 | for i in range(len(path_list)):
36 | flag = path_list[i].find('/real/')
37 | if(flag != -1):
38 | label = 1
39 | else:
40 | # label = 0
41 | if path_list[i].find('printed') != -1:
42 | label = 0
43 | else:
44 | label = 2
45 |
46 | dict = {}
47 | dict['photo_path'] = path_list[i]
48 | dict['photo_label'] = label
49 | # video_num = path_list[i].split('/')[-2].split('_')[0]
50 | video_num = path_list[i].split('/')[-2].split('_')[1][-2:]
51 | if (video_num in test_list):
52 | train_final_json.append(dict)
53 |
54 | all_final_json.append(dict)
55 | if(label == 1):
56 | real_final_json.append(dict)
57 | else:
58 | fake_final_json.append(dict)
59 | print('\nMSU: ', len(path_list))
60 | print('MSU(train): ', len(train_final_json))
61 | print('MSU(test): ', len(test_final_json))
62 | print('MSU(all): ', len(all_final_json))
63 | print('MSU(real): ', len(real_final_json))
64 | print('MSU(fake): ', len(fake_final_json))
65 | json.dump(train_final_json, f_train, indent=4)
66 | f_train.close()
67 | json.dump(test_final_json, f_test, indent=4)
68 | f_test.close()
69 | json.dump(all_final_json, f_all, indent=4)
70 | f_all.close()
71 | json.dump(real_final_json, f_real, indent=4)
72 | f_real.close()
73 | json.dump(fake_final_json, f_fake, indent=4)
74 | f_fake.close()
75 |
76 | def msu_train_process():
77 | test_list = []
78 | # data_label for msu
79 | for line in open(f'{data_dir}msu_256/test_sub_list.txt', 'r'):
80 | test_list.append(line[0:2])
81 | train_list = []
82 | for line in open(f'{data_dir}msu_256/train_sub_list.txt', 'r'):
83 | train_list.append(line[0:2])
84 | print(test_list)
85 | print(train_list)
86 | train_final_json = []
87 | test_final_json = []
88 | all_final_json = []
89 | real_final_json = []
90 | fake_final_json = []
91 | label_save_dir = './msu_train/'
92 | if not os.path.exists(label_save_dir):
93 | os.makedirs(label_save_dir)
94 | f_train = open(label_save_dir + 'train_label.json', 'w')
95 | f_test = open(label_save_dir + 'test_label.json', 'w')
96 | f_all = open(label_save_dir + 'all_label.json', 'w')
97 | f_real = open(label_save_dir + 'real_label.json', 'w')
98 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
99 | dataset_path = data_dir + 'msu_256/'
100 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
101 | path_list.sort()
102 | for i in range(len(path_list)):
103 | flag = path_list[i].find('/real/')
104 | if(flag != -1):
105 | label = 1
106 | else:
107 | if path_list[i].find('printed') != -1:
108 | label = 0
109 | else:
110 | label = 2
111 |
112 | dict = {}
113 | dict['photo_path'] = path_list[i]
114 | dict['photo_label'] = label
115 | # video_num = path_list[i].split('/')[-2].split('_')[0]
116 | video_num = path_list[i].split('/')[-2].split('_')[1][-2:]
117 | if (video_num in train_list):
118 | train_final_json.append(dict)
119 |
120 | all_final_json.append(dict)
121 | if(label == 1):
122 | real_final_json.append(dict)
123 | else:
124 | fake_final_json.append(dict)
125 | print('\nMSU: ', len(path_list))
126 | print('MSU(train): ', len(train_final_json))
127 | print('MSU(test): ', len(test_final_json))
128 | print('MSU(all): ', len(all_final_json))
129 | print('MSU(real): ', len(real_final_json))
130 | print('MSU(fake): ', len(fake_final_json))
131 | json.dump(train_final_json, f_train, indent=4)
132 | f_train.close()
133 | json.dump(test_final_json, f_test, indent=4)
134 | f_test.close()
135 | json.dump(all_final_json, f_all, indent=4)
136 | f_all.close()
137 | json.dump(real_final_json, f_real, indent=4)
138 | f_real.close()
139 | json.dump(fake_final_json, f_fake, indent=4)
140 | f_fake.close()
141 |
142 | def msu_process():
143 | test_list = []
144 | # data_label for msu
145 | for line in open(f'{data_dir}msu_256/test_sub_list.txt', 'r'):
146 | test_list.append(line[0:2])
147 | train_list = []
148 | for line in open(f'{data_dir}msu_256/train_sub_list.txt', 'r'):
149 | train_list.append(line[0:2])
150 | print(test_list)
151 | print(train_list)
152 | train_final_json = []
153 | test_final_json = []
154 | all_final_json = []
155 | real_final_json = []
156 | fake_final_json = []
157 | label_save_dir = './msu/'
158 | if not os.path.exists(label_save_dir):
159 | os.makedirs(label_save_dir)
160 | f_train = open(label_save_dir + 'train_label.json', 'w')
161 | f_test = open(label_save_dir + 'test_label.json', 'w')
162 | f_all = open(label_save_dir + 'all_label.json', 'w')
163 | f_real = open(label_save_dir + 'real_label.json', 'w')
164 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
165 | dataset_path = data_dir + 'msu_256/'
166 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
167 | path_list.sort()
168 | for i in range(len(path_list)):
169 | flag = path_list[i].find('/real/')
170 | if(flag != -1):
171 | label = 1
172 | else:
173 | # label = 0
174 | if path_list[i].find('printed') != -1:
175 | label = 0
176 | else:
177 | label = 2
178 |
179 | dict = {}
180 | dict['photo_path'] = path_list[i]
181 | dict['photo_label'] = label
182 | # video_num = path_list[i].split('/')[-2].split('_')[0]
183 | video_num = path_list[i].split('/')[-2].split('_')[1][-2:]
184 | if (video_num in train_list):
185 | train_final_json.append(dict)
186 | else:
187 | test_final_json.append(dict)
188 | all_final_json.append(dict)
189 | if(label == 1):
190 | real_final_json.append(dict)
191 | else:
192 | fake_final_json.append(dict)
193 | print('\nMSU: ', len(path_list))
194 | print('MSU(train): ', len(train_final_json))
195 | print('MSU(test): ', len(test_final_json))
196 | print('MSU(all): ', len(all_final_json))
197 | print('MSU(real): ', len(real_final_json))
198 | print('MSU(fake): ', len(fake_final_json))
199 | json.dump(train_final_json, f_train, indent=4)
200 | f_train.close()
201 | json.dump(test_final_json, f_test, indent=4)
202 | f_test.close()
203 | json.dump(all_final_json, f_all, indent=4)
204 | f_all.close()
205 | json.dump(real_final_json, f_real, indent=4)
206 | f_real.close()
207 | json.dump(fake_final_json, f_fake, indent=4)
208 | f_fake.close()
209 |
210 |
211 | def casia_test_process():
212 | train_final_json = []
213 | test_final_json = []
214 | all_final_json = []
215 | real_final_json = []
216 | fake_final_json = []
217 | label_save_dir = './casia_test/'
218 | if not os.path.exists(label_save_dir):
219 | os.makedirs(label_save_dir)
220 | f_train = open(label_save_dir + 'train_label.json', 'w')
221 | f_test = open(label_save_dir + 'test_label.json', 'w')
222 | f_all = open(label_save_dir + 'all_label.json', 'w')
223 | f_real = open(label_save_dir + 'real_label.json', 'w')
224 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
225 | dataset_path = data_dir + 'casia_256/'
226 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
227 | path_list.sort()
228 | for i in range(len(path_list)):
229 | flag = path_list[i].split('/')[-2]
230 | if (flag == '1' or flag == '2' or flag == 'HR_1'):
231 | label = 1
232 | else:
233 | # label = 0
234 |
235 | if flag in {'3', '4', '5', '6', 'HR_2', 'HR_3'}:
236 | label = 0
237 | else:
238 | label = 2
239 |
240 | dict = {}
241 | dict['photo_path'] = path_list[i]
242 | dict['photo_label'] = label
243 | flag = path_list[i].find('/train_release/')
244 | if (flag == -1):
245 | train_final_json.append(dict)
246 | if (label == 1):
247 | real_final_json.append(dict)
248 | else:
249 | fake_final_json.append(dict)
250 | all_final_json.append(dict)
251 |
252 | print('\nCasia: ', len(path_list))
253 | print('Casia(train): ', len(train_final_json))
254 | print('Casia(test): ', len(test_final_json))
255 | print('Casia(all): ', len(all_final_json))
256 | print('Casia(real): ', len(real_final_json))
257 | print('Casia(fake): ', len(fake_final_json))
258 | json.dump(train_final_json, f_train, indent=4)
259 | f_train.close()
260 | json.dump(test_final_json, f_test, indent=4)
261 | f_test.close()
262 | json.dump(all_final_json, f_all, indent=4)
263 | f_all.close()
264 | json.dump(real_final_json, f_real, indent=4)
265 | f_real.close()
266 | json.dump(fake_final_json, f_fake, indent=4)
267 | f_fake.close()
268 |
269 | def casia_train_process():
270 | train_final_json = []
271 | test_final_json = []
272 | all_final_json = []
273 | real_final_json = []
274 | fake_final_json = []
275 | label_save_dir = './casia_train/'
276 | if not os.path.exists(label_save_dir):
277 | os.makedirs(label_save_dir)
278 | f_train = open(label_save_dir + 'train_label.json', 'w')
279 | f_test = open(label_save_dir + 'test_label.json', 'w')
280 | f_all = open(label_save_dir + 'all_label.json', 'w')
281 | f_real = open(label_save_dir + 'real_label.json', 'w')
282 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
283 | dataset_path = data_dir + 'casia_256/'
284 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
285 | path_list.sort()
286 | for i in range(len(path_list)):
287 | flag = path_list[i].split('/')[-2]
288 | if (flag == '1' or flag == '2' or flag == 'HR_1'):
289 | label = 1
290 | else:
291 | # label = 0
292 |
293 | if flag in {'3', '4', '5', '6', 'HR_2', 'HR_3'}:
294 | label = 0
295 | else:
296 | label = 2
297 |
298 | dict = {}
299 | dict['photo_path'] = path_list[i]
300 | dict['photo_label'] = label
301 | flag = path_list[i].find('/train_release/')
302 | if (flag != -1):
303 | train_final_json.append(dict)
304 | all_final_json.append(dict)
305 | if (label == 1):
306 | real_final_json.append(dict)
307 | else:
308 | fake_final_json.append(dict)
309 | print('\nCasia: ', len(path_list))
310 | print('Casia(train): ', len(train_final_json))
311 | print('Casia(test): ', len(test_final_json))
312 | print('Casia(all): ', len(all_final_json))
313 | print('Casia(real): ', len(real_final_json))
314 | print('Casia(fake): ', len(fake_final_json))
315 | json.dump(train_final_json, f_train, indent=4)
316 | f_train.close()
317 | json.dump(test_final_json, f_test, indent=4)
318 | f_test.close()
319 | json.dump(all_final_json, f_all, indent=4)
320 | f_all.close()
321 | json.dump(real_final_json, f_real, indent=4)
322 | f_real.close()
323 | json.dump(fake_final_json, f_fake, indent=4)
324 | f_fake.close()
325 |
326 | def casia_process():
327 | train_final_json = []
328 | test_final_json = []
329 | all_final_json = []
330 | real_final_json = []
331 | fake_final_json = []
332 | label_save_dir = './casia/'
333 | if not os.path.exists(label_save_dir):
334 | os.makedirs(label_save_dir)
335 | f_train = open(label_save_dir + 'train_label.json', 'w')
336 | f_test = open(label_save_dir + 'test_label.json', 'w')
337 | f_all = open(label_save_dir + 'all_label.json', 'w')
338 | f_real = open(label_save_dir + 'real_label.json', 'w')
339 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
340 | dataset_path = data_dir + 'casia_256/'
341 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
342 | path_list.sort()
343 | for i in range(len(path_list)):
344 | flag = path_list[i].split('/')[-2]
345 | if (flag == '1' or flag == '2' or flag == 'HR_1'):
346 | label = 1
347 | else:
348 | # label = 0
349 |
350 | if flag in {'3', '4', '5', '6', 'HR_2', 'HR_3'}:
351 | label = 0
352 | else:
353 | label = 2
354 |
355 | dict = {}
356 | dict['photo_path'] = path_list[i]
357 | dict['photo_label'] = label
358 | flag = path_list[i].find('/train_release/')
359 | if (flag != -1):
360 | train_final_json.append(dict)
361 | else:
362 | test_final_json.append(dict)
363 | all_final_json.append(dict)
364 | if (label == 1):
365 | real_final_json.append(dict)
366 | else:
367 | fake_final_json.append(dict)
368 | print('\nCasia: ', len(path_list))
369 | print('Casia(train): ', len(train_final_json))
370 | print('Casia(test): ', len(test_final_json))
371 | print('Casia(all): ', len(all_final_json))
372 | print('Casia(real): ', len(real_final_json))
373 | print('Casia(fake): ', len(fake_final_json))
374 | json.dump(train_final_json, f_train, indent=4)
375 | f_train.close()
376 | json.dump(test_final_json, f_test, indent=4)
377 | f_test.close()
378 | json.dump(all_final_json, f_all, indent=4)
379 | f_all.close()
380 | json.dump(real_final_json, f_real, indent=4)
381 | f_real.close()
382 | json.dump(fake_final_json, f_fake, indent=4)
383 | f_fake.close()
384 |
385 | def replay_test_process():
386 | train_final_json = []
387 | valid_final_json = []
388 | test_final_json = []
389 | all_final_json = []
390 | real_final_json = []
391 | fake_final_json = []
392 | label_save_dir = './replay_test/'
393 | if not os.path.exists(label_save_dir):
394 | os.makedirs(label_save_dir)
395 | f_train = open(label_save_dir + 'train_label.json', 'w')
396 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
397 | f_test = open(label_save_dir + 'test_label.json', 'w')
398 | f_all = open(label_save_dir + 'all_label.json', 'w')
399 | f_real = open(label_save_dir + 'real_label.json', 'w')
400 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
401 | dataset_path = data_dir + 'replay_256/'
402 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
403 | path_list.sort()
404 | for i in range(len(path_list)):
405 | flag = path_list[i].find('/real/')
406 | if (flag != -1):
407 | label = 1
408 | else:
409 | # label = 0
410 |
411 | if path_list[i].find('print') != -1:
412 | label = 0
413 | else:
414 | label = 2
415 |
416 | dict = {}
417 | dict['photo_path'] = path_list[i]
418 | dict['photo_label'] = label
419 | if (path_list[i].find('/replayattack-train/') != -1):
420 | pass
421 | elif(path_list[i].find('/replayattack-devel/') != -1):
422 | valid_final_json.append(dict)
423 | else:
424 | all_final_json.append(dict)
425 | train_final_json.append(dict)
426 | if (label == 1):
427 | real_final_json.append(dict)
428 | else:
429 | fake_final_json.append(dict)
430 |
431 | print('\nReplay: ', len(path_list))
432 | print('Replay(train): ', len(train_final_json))
433 | print('Replay(valid): ', len(valid_final_json))
434 | print('Replay(test): ', len(test_final_json))
435 | print('Replay(all): ', len(all_final_json))
436 | print('Replay(real): ', len(real_final_json))
437 | print('Replay(fake): ', len(fake_final_json))
438 | json.dump(train_final_json, f_train, indent=4)
439 | f_train.close()
440 | json.dump(valid_final_json, f_valid, indent=4)
441 | f_valid.close()
442 | json.dump(test_final_json, f_test, indent=4)
443 | f_test.close()
444 | json.dump(all_final_json, f_all, indent=4)
445 | f_all.close()
446 | json.dump(real_final_json, f_real, indent=4)
447 | f_real.close()
448 | json.dump(fake_final_json, f_fake, indent=4)
449 | f_fake.close()
450 | def replay_train_process():
451 | train_final_json = []
452 | valid_final_json = []
453 | test_final_json = []
454 | all_final_json = []
455 | real_final_json = []
456 | fake_final_json = []
457 | label_save_dir = './replay_train/'
458 | if not os.path.exists(label_save_dir):
459 | os.makedirs(label_save_dir)
460 | f_train = open(label_save_dir + 'train_label.json', 'w')
461 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
462 | f_test = open(label_save_dir + 'test_label.json', 'w')
463 | f_all = open(label_save_dir + 'all_label.json', 'w')
464 | f_real = open(label_save_dir + 'real_label.json', 'w')
465 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
466 | dataset_path = data_dir + 'replay_256/'
467 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
468 | path_list.sort()
469 | for i in range(len(path_list)):
470 | flag = path_list[i].find('/real/')
471 | if (flag != -1):
472 | label = 1
473 | else:
474 | # label = 0
475 |
476 | if path_list[i].find('print') != -1:
477 | label = 0
478 | else:
479 | label = 2
480 |
481 | dict = {}
482 | dict['photo_path'] = path_list[i]
483 | dict['photo_label'] = label
484 | if (path_list[i].find('/replayattack-train/') != -1):
485 | train_final_json.append(dict)
486 | all_final_json.append(dict)
487 | if (label == 1):
488 | real_final_json.append(dict)
489 | else:
490 | fake_final_json.append(dict)
491 |
492 | print('\nReplay: ', len(path_list))
493 | print('Replay(train): ', len(train_final_json))
494 | print('Replay(valid): ', len(valid_final_json))
495 | print('Replay(test): ', len(test_final_json))
496 | print('Replay(all): ', len(all_final_json))
497 | print('Replay(real): ', len(real_final_json))
498 | print('Replay(fake): ', len(fake_final_json))
499 | json.dump(train_final_json, f_train, indent=4)
500 | f_train.close()
501 | json.dump(valid_final_json, f_valid, indent=4)
502 | f_valid.close()
503 | json.dump(test_final_json, f_test, indent=4)
504 | f_test.close()
505 | json.dump(all_final_json, f_all, indent=4)
506 | f_all.close()
507 | json.dump(real_final_json, f_real, indent=4)
508 | f_real.close()
509 | json.dump(fake_final_json, f_fake, indent=4)
510 | f_fake.close()
511 |
512 | def replay_process():
513 | train_final_json = []
514 | valid_final_json = []
515 | test_final_json = []
516 | all_final_json = []
517 | real_final_json = []
518 | fake_final_json = []
519 | label_save_dir = './replay/'
520 | if not os.path.exists(label_save_dir):
521 | os.makedirs(label_save_dir)
522 | f_train = open(label_save_dir + 'train_label.json', 'w')
523 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
524 | f_test = open(label_save_dir + 'test_label.json', 'w')
525 | f_all = open(label_save_dir + 'all_label.json', 'w')
526 | f_real = open(label_save_dir + 'real_label.json', 'w')
527 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
528 | dataset_path = data_dir + 'replay_256/'
529 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
530 | path_list.sort()
531 | for i in range(len(path_list)):
532 | flag = path_list[i].find('/real/')
533 | if (flag != -1):
534 | label = 1
535 | else:
536 | # label = 0
537 |
538 | if path_list[i].find('print') != -1:
539 | label = 0
540 | else:
541 | label = 2
542 |
543 | dict = {}
544 | dict['photo_path'] = path_list[i]
545 | dict['photo_label'] = label
546 | if (path_list[i].find('/replayattack-train/') != -1):
547 | train_final_json.append(dict)
548 | elif(path_list[i].find('/replayattack-devel/') != -1):
549 | valid_final_json.append(dict)
550 | else:
551 | test_final_json.append(dict)
552 | if(path_list[i].find('/replayattack-devel/') != -1):
553 | continue
554 | else:
555 | all_final_json.append(dict)
556 | if (label == 1):
557 | real_final_json.append(dict)
558 | else:
559 | fake_final_json.append(dict)
560 | print('\nReplay: ', len(path_list))
561 | print('Replay(train): ', len(train_final_json))
562 | print('Replay(valid): ', len(valid_final_json))
563 | print('Replay(test): ', len(test_final_json))
564 | print('Replay(all): ', len(all_final_json))
565 | print('Replay(real): ', len(real_final_json))
566 | print('Replay(fake): ', len(fake_final_json))
567 | json.dump(train_final_json, f_train, indent=4)
568 | f_train.close()
569 | json.dump(valid_final_json, f_valid, indent=4)
570 | f_valid.close()
571 | json.dump(test_final_json, f_test, indent=4)
572 | f_test.close()
573 | json.dump(all_final_json, f_all, indent=4)
574 | f_all.close()
575 | json.dump(real_final_json, f_real, indent=4)
576 | f_real.close()
577 | json.dump(fake_final_json, f_fake, indent=4)
578 | f_fake.close()
579 |
580 | def oulu_train_process():
581 | train_final_json = []
582 | valid_final_json = []
583 | test_final_json = []
584 | all_final_json = []
585 | real_final_json = []
586 | fake_final_json = []
587 | label_save_dir = './oulu_train/'
588 | if not os.path.exists(label_save_dir):
589 | os.makedirs(label_save_dir)
590 | f_train = open(label_save_dir + 'train_label.json', 'w')
591 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
592 | f_test = open(label_save_dir + 'test_label.json', 'w')
593 | f_all = open(label_save_dir + 'all_label.json', 'w')
594 | f_real = open(label_save_dir + 'real_label.json', 'w')
595 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
596 | dataset_path = data_dir + 'oulu_256/'
597 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
598 | path_list.sort()
599 | for i in range(len(path_list)):
600 | flag = int(path_list[i].split('/')[-2].split('_')[-1])
601 | if (flag == 1):
602 | label = 1
603 | else:
604 | #label = 0
605 |
606 | if flag == 2 or flag == 3:
607 | label = 0
608 | else:
609 | label = 2
610 |
611 | dict = {}
612 | dict['photo_path'] = path_list[i]
613 | dict['photo_label'] = label
614 | if (path_list[i].find('/Train_files/') != -1):
615 | train_final_json.append(dict)
616 | all_final_json.append(dict)
617 | if (label == 1):
618 | real_final_json.append(dict)
619 | else:
620 | fake_final_json.append(dict)
621 |
622 |
623 | print('\nOulu: ', len(path_list))
624 | print('Oulu(train): ', len(train_final_json))
625 | print('Oulu(valid): ', len(valid_final_json))
626 | print('Oulu(test): ', len(test_final_json))
627 | print('Oulu(all): ', len(all_final_json))
628 | print('Oulu(real): ', len(real_final_json))
629 | print('Oulu(fake): ', len(fake_final_json))
630 | json.dump(train_final_json, f_train, indent=4)
631 | f_train.close()
632 | json.dump(valid_final_json, f_valid, indent=4)
633 | f_valid.close()
634 | json.dump(test_final_json, f_test, indent=4)
635 | f_test.close()
636 | json.dump(all_final_json, f_all, indent=4)
637 | f_all.close()
638 | json.dump(real_final_json, f_real, indent=4)
639 | f_real.close()
640 | json.dump(fake_final_json, f_fake, indent=4)
641 | f_fake.close()
642 |
643 | def oulu_test_process():
644 | train_final_json = []
645 | valid_final_json = []
646 | test_final_json = []
647 | all_final_json = []
648 | real_final_json = []
649 | fake_final_json = []
650 | label_save_dir = './oulu_test/'
651 | if not os.path.exists(label_save_dir):
652 | os.makedirs(label_save_dir)
653 | f_train = open(label_save_dir + 'train_label.json', 'w')
654 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
655 | f_test = open(label_save_dir + 'test_label.json', 'w')
656 | f_all = open(label_save_dir + 'all_label.json', 'w')
657 | f_real = open(label_save_dir + 'real_label.json', 'w')
658 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
659 | dataset_path = data_dir + 'oulu_256/'
660 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
661 | path_list.sort()
662 | for i in range(len(path_list)):
663 | flag = int(path_list[i].split('/')[-2].split('_')[-1])
664 | if (flag == 1):
665 | label = 1
666 | else:
667 | #label = 0
668 |
669 | if flag == 2 or flag == 3:
670 | label = 0
671 | else:
672 | label = 2
673 |
674 | dict = {}
675 | dict['photo_path'] = path_list[i]
676 | dict['photo_label'] = label
677 | if (path_list[i].find('/Train_files/') != -1):
678 | pass
679 | elif(path_list[i].find('/Dev_files/') != -1):
680 | valid_final_json.append(dict)
681 | else:
682 | train_final_json.append(dict)
683 | all_final_json.append(dict)
684 | if (label == 1):
685 | real_final_json.append(dict)
686 | else:
687 | fake_final_json.append(dict)
688 |
689 |
690 | print('\nOulu: ', len(path_list))
691 | print('Oulu(train): ', len(train_final_json))
692 | print('Oulu(valid): ', len(valid_final_json))
693 | print('Oulu(test): ', len(test_final_json))
694 | print('Oulu(all): ', len(all_final_json))
695 | print('Oulu(real): ', len(real_final_json))
696 | print('Oulu(fake): ', len(fake_final_json))
697 | json.dump(train_final_json, f_train, indent=4)
698 | f_train.close()
699 | json.dump(valid_final_json, f_valid, indent=4)
700 | f_valid.close()
701 | json.dump(test_final_json, f_test, indent=4)
702 | f_test.close()
703 | json.dump(all_final_json, f_all, indent=4)
704 | f_all.close()
705 | json.dump(real_final_json, f_real, indent=4)
706 | f_real.close()
707 | json.dump(fake_final_json, f_fake, indent=4)
708 | f_fake.close()
709 | def oulu_process():
710 | train_final_json = []
711 | valid_final_json = []
712 | test_final_json = []
713 | all_final_json = []
714 | real_final_json = []
715 | fake_final_json = []
716 | label_save_dir = './oulu/'
717 | if not os.path.exists(label_save_dir):
718 | os.makedirs(label_save_dir)
719 | f_train = open(label_save_dir + 'train_label.json', 'w')
720 | f_valid = open(label_save_dir + 'valid_label.json', 'w')
721 | f_test = open(label_save_dir + 'test_label.json', 'w')
722 | f_all = open(label_save_dir + 'all_label.json', 'w')
723 | f_real = open(label_save_dir + 'real_label.json', 'w')
724 | f_fake = open(label_save_dir + 'fake_label.json', 'w')
725 | dataset_path = data_dir + 'oulu_256/'
726 | path_list = glob.glob(dataset_path + '**/*.png', recursive=True)
727 | path_list.sort()
728 | for i in range(len(path_list)):
729 | flag = int(path_list[i].split('/')[-2].split('_')[-1])
730 | if (flag == 1):
731 | label = 1
732 | else:
733 | #label = 0
734 |
735 | if flag == 2 or flag == 3:
736 | label = 0
737 | else:
738 | label = 2
739 |
740 | dict = {}
741 | dict['photo_path'] = path_list[i]
742 | dict['photo_label'] = label
743 | if (path_list[i].find('/Train_files/') != -1):
744 | train_final_json.append(dict)
745 | elif(path_list[i].find('/Dev_files/') != -1):
746 | valid_final_json.append(dict)
747 | else:
748 | test_final_json.append(dict)
749 | if(path_list[i].find('/Dev_files/') != -1):
750 | continue
751 | else:
752 | all_final_json.append(dict)
753 | if (label == 1):
754 | real_final_json.append(dict)
755 | else:
756 | fake_final_json.append(dict)
757 | print('\nOulu: ', len(path_list))
758 | print('Oulu(train): ', len(train_final_json))
759 | print('Oulu(valid): ', len(valid_final_json))
760 | print('Oulu(test): ', len(test_final_json))
761 | print('Oulu(all): ', len(all_final_json))
762 | print('Oulu(real): ', len(real_final_json))
763 | print('Oulu(fake): ', len(fake_final_json))
764 | json.dump(train_final_json, f_train, indent=4)
765 | f_train.close()
766 | json.dump(valid_final_json, f_valid, indent=4)
767 | f_valid.close()
768 | json.dump(test_final_json, f_test, indent=4)
769 | f_test.close()
770 | json.dump(all_final_json, f_all, indent=4)
771 | f_all.close()
772 | json.dump(real_final_json, f_real, indent=4)
773 | f_real.close()
774 | json.dump(fake_final_json, f_fake, indent=4)
775 | f_fake.close()
776 |
777 |
778 | if __name__=="__main__":
779 | msu_process()
780 | oulu_process()
781 | casia_process()
782 | replay_process()
783 |
784 |
--------------------------------------------------------------------------------
/experiment/m/config.py:
--------------------------------------------------------------------------------
1 | class DefaultConfigs(object):
2 | seed = 666
3 | # SGD
4 | weight_decay = 1e-6
5 | momentum = 0.9
6 | # learning rate
7 | init_lr = 1e-4
8 | lr_epoch_1 = 0
9 | lr_epoch_2 = 150
10 | # model
11 | pretrained = True
12 | model = 'dgua_fas'
13 | # training parameters
14 | gpus = "0"
15 | batch_size = 4
16 | norm_flag = True
17 | max_iter = 4000
18 | lambda_triplet = 1
19 | lambda_adreal = 0.5
20 | # test model name
21 | tgt_best_model_name = 'best_model.pth.tar' #'model_best_0.08_29.pth.tar'
22 | # source data information
23 | src1_data = 'oulu'
24 | src1_train_num_frames = 1
25 | src2_data = 'casia'
26 | src2_train_num_frames = 1
27 | src3_data = 'msu'
28 | src3_train_num_frames = 1
29 | # target data information
30 | tgt_data = 'replay'
31 | tgt_test_num_frames = 3
32 | # paths information
33 | checkpoint_path = './test_checkpoint/' + model + '/DGFANet/'
34 | best_model_path = './test_checkpoint/' + model + '/best_model/'
35 | logs = './logs/'
36 |
37 | config = DefaultConfigs()
38 |
--------------------------------------------------------------------------------
/experiment/m/dg_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys.path.append('../../')
4 | import os
5 | import numpy as np
6 | import torch
7 | from torch.utils.data import DataLoader
8 | from torch.autograd import Variable
9 | from torch.nn import functional as F
10 | from config import config
11 | from util.utils import sample_frames
12 | from util.dataset import YunpeiDataset
13 | from util.utils import AverageMeter, accuracy, draw_roc
14 | from util.statistic import get_EER_states, get_HTER_at_thr, calculate_threshold
15 | from sklearn.metrics import roc_auc_score
16 | from option import get_training_arguments
17 | from cvnets import get_model
18 |
19 |
20 |
21 |
22 |
23 | os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
24 |
25 | accs = [[], [], []]
26 | accs2 = [[], [], []]
27 | def test(test_dataloader, model, threshold):
28 | valid_top1 = AverageMeter()
29 | prob_dict = {}
30 | label_dict = {}
31 | p_dict = {}
32 | model.eval()
33 | output_dict_tmp = {}
34 | target_dict_tmp = {}
35 | number = 0
36 |
37 | tmp = []
38 | na = []
39 | test_features_dict = {}
40 | with torch.no_grad():
41 | for iter, (input, target, videoID) in enumerate(test_dataloader):
42 | if (iter % 100 == 0):
43 | print('**Testing** ', iter, ' photos done!')
44 | input = Variable(input).cuda()
45 | target = Variable(torch.from_numpy(np.array(target)).long()).cuda()
46 | cls_out= model(input, config.norm_flag)[0]
47 | prob = F.softmax(cls_out, dim=1).cpu().data.numpy()[:, 1]
48 | p = F.softmax(cls_out, dim=1).detach().cpu().numpy()
49 |
50 | tmp += [[p, torch.argmax(cls_out, dim=1)[0].item(), target.detach().cpu()[0].item()]]
51 | # novel attack
52 | if tmp[-1][2] == 3:
53 | na += [prob]
54 | label = target.cpu().data.numpy()
55 | videoID = videoID.cpu().data.numpy()
56 |
57 |
58 | for i in range(len(prob)):
59 | if (videoID[i] in prob_dict.keys()):
60 | prob_dict[videoID[i]].append(prob[i])
61 | label_dict[videoID[i]].append(label[i])
62 | p_dict[videoID[i]].append(p[i])
63 | output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))
64 | target_dict_tmp[videoID[i]].append(target[i].view(1))
65 | else:
66 | prob_dict[videoID[i]] = []
67 | label_dict[videoID[i]] = []
68 | p_dict[videoID[i]] = []
69 | prob_dict[videoID[i]].append(prob[i])
70 | label_dict[videoID[i]].append(label[i])
71 | p_dict[videoID[i]].append(p[i])
72 | output_dict_tmp[videoID[i]] = []
73 | target_dict_tmp[videoID[i]] = []
74 | output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))
75 | target_dict_tmp[videoID[i]].append(target[i].view(1))
76 | # 1*3*256*256
77 | input = input[i].reshape(1, input[i].shape[0], input[i].shape[1], input[i].shape[2])
78 | # feature: 640*8*8; flatten: 40960
79 | feature = torch.flatten(model.extract_features(input.cuda())).detach().cpu()
80 | test_features_dict[videoID[i]] = feature.numpy()
81 | number += 1
82 |
83 | print('**Testing** ', number, ' photos done!')
84 |
85 | prob_list = []
86 | label_list = []
87 | p_list = []
88 | test_features = []
89 | for key in prob_dict.keys():
90 | avg_single_video_prob = sum(prob_dict[key]) / len(prob_dict[key])
91 | avg_single_video_label = sum(label_dict[key]) / len(label_dict[key])
92 | avg_single_video_p = sum(p_dict[key]) / len(p_dict[key])
93 | test_features.append(test_features_dict[key])
94 | prob_list = np.append(prob_list, avg_single_video_prob)
95 | label_list = np.append(label_list, avg_single_video_label)
96 | p_list.append(avg_single_video_p)
97 | # compute loss and acc for every video
98 | avg_single_video_output = sum(output_dict_tmp[key]) / len(output_dict_tmp[key])
99 | avg_single_video_target = sum(target_dict_tmp[key]) / len(target_dict_tmp[key])
100 | acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,))
101 | valid_top1.update(acc_valid[0])
102 |
103 | binary_label = np.where(label_list == 1, np.ones_like(label_list), np.zeros_like(label_list))
104 | cur_EER_valid, threshold, FRR_list, FAR_list = get_EER_states(prob_list, binary_label)
105 | ACC_threshold = calculate_threshold(prob_list, binary_label, threshold)
106 | auc_score = roc_auc_score(binary_label, prob_list)
107 | draw_roc(FRR_list, FAR_list, auc_score)
108 | cur_HTER_valid = get_HTER_at_thr(prob_list, binary_label, threshold)
109 |
110 |
111 | return [valid_top1.avg, cur_EER_valid, cur_HTER_valid, auc_score, ACC_threshold, threshold]
112 |
113 | def main():
114 |
115 | opts = get_training_arguments(config_path='./../../configs/mobilevit_s.yaml')
116 | net = get_model(opts)
117 | net.cuda()
118 | net_ = torch.load(config.best_model_path + config.tgt_best_model_name)
119 | net.load_state_dict(net_["state_dict"])
120 | threshold = net_["threshold"]
121 | net.eval()
122 |
123 |
124 | test_data = sample_frames(flag=2, num_frames=config.tgt_test_num_frames, dataset_name=config.tgt_data)
125 | test_dataloader = DataLoader(YunpeiDataset(test_data, train=False), batch_size=1, shuffle=False)
126 |
127 |
128 |
129 | print('\n')
130 | print("**Testing** Get test files done!")
131 | # test model
132 | test_args = test(test_dataloader, net, threshold)
133 | print('\n===========Test Info===========\n')
134 | print(config.tgt_data, 'Test acc: %5.4f' %(test_args[0]))
135 | print(config.tgt_data, 'Test EER: %5.4f' %(test_args[1]))
136 | print(config.tgt_data, 'Test HTER: %5.4f' %(test_args[2]))
137 | print(config.tgt_data, 'Test AUC: %5.4f' % (test_args[3]))
138 | print(config.tgt_data, 'Test ACC_threshold: %5.4f' % (test_args[4]))
139 | print('\n===============================\n')
140 |
141 |
142 |
143 | if __name__ == '__main__':
144 | main()
145 |
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667751802.public2-2.288376.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667751802.public2-2.288376.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667752838.public2-2.292092.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667752838.public2-2.292092.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667752898.public2-2.292404.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667752898.public2-2.292404.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667753917.public2-2.296856.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667753917.public2-2.296856.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667754066.public2-2.297489.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667754066.public2-2.297489.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667757204.public2-2.309449.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667757204.public2-2.309449.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667791088.public2-2.345886.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1667791088.public2-2.345886.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668090397.public2-2.765533.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668090397.public2-2.765533.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091334.public2-2.769344.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091334.public2-2.769344.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091449.public2-2.769899.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091449.public2-2.769899.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091849.public2-2.771620.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091849.public2-2.771620.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091915.public2-2.772017.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668091915.public2-2.772017.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668092026.public2-2.772597.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668092026.public2-2.772597.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668092195.public2-2.773242.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1668092195.public2-2.773242.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743287.public2-2.531760.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743287.public2-2.531760.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743366.public2-2.532089.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743366.public2-2.532089.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743508.public2-2.532654.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743508.public2-2.532654.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743717.public2-2.533330.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743717.public2-2.533330.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743959.public2-2.534478.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678743959.public2-2.534478.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678744019.public2-2.534792.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678744019.public2-2.534792.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678744350.public2-2.536001.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678744350.public2-2.536001.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678750528.public2-2.567922.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678750528.public2-2.567922.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678750631.public2-2.568450.1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_train/events.out.tfevents.1678750631.public2-2.568450.1
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667751802.public2-2.288376.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667751802.public2-2.288376.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667752838.public2-2.292092.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667752838.public2-2.292092.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667752898.public2-2.292404.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667752898.public2-2.292404.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667753917.public2-2.296856.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667753917.public2-2.296856.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667754066.public2-2.297489.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667754066.public2-2.297489.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667757204.public2-2.309449.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667757204.public2-2.309449.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667791088.public2-2.345886.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1667791088.public2-2.345886.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668090397.public2-2.765533.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668090397.public2-2.765533.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091334.public2-2.769344.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091334.public2-2.769344.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091449.public2-2.769899.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091449.public2-2.769899.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091849.public2-2.771620.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091849.public2-2.771620.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091915.public2-2.772017.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668091915.public2-2.772017.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668092026.public2-2.772597.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668092026.public2-2.772597.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668092195.public2-2.773242.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1668092195.public2-2.773242.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743287.public2-2.531760.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743287.public2-2.531760.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743366.public2-2.532089.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743366.public2-2.532089.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743508.public2-2.532654.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743508.public2-2.532654.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743717.public2-2.533330.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743717.public2-2.533330.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743959.public2-2.534478.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678743959.public2-2.534478.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678744019.public2-2.534792.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678744019.public2-2.534792.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678744350.public2-2.536001.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678744350.public2-2.536001.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678750528.public2-2.567922.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678750528.public2-2.567922.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678750631.public2-2.568450.2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Accuracy_valid/events.out.tfevents.1678750631.public2-2.568450.2
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667751802.public2-2.288376.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667751802.public2-2.288376.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667752838.public2-2.292092.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667752838.public2-2.292092.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667752898.public2-2.292404.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667752898.public2-2.292404.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667753917.public2-2.296856.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667753917.public2-2.296856.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667754066.public2-2.297489.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667754066.public2-2.297489.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667757204.public2-2.309449.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667757204.public2-2.309449.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667791088.public2-2.345886.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1667791088.public2-2.345886.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668090397.public2-2.765533.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668090397.public2-2.765533.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091334.public2-2.769344.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091334.public2-2.769344.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091449.public2-2.769899.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091449.public2-2.769899.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091849.public2-2.771620.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091849.public2-2.771620.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091915.public2-2.772017.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668091915.public2-2.772017.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668092026.public2-2.772597.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668092026.public2-2.772597.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668092195.public2-2.773242.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1668092195.public2-2.773242.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743287.public2-2.531760.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743287.public2-2.531760.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743366.public2-2.532089.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743366.public2-2.532089.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743508.public2-2.532654.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743508.public2-2.532654.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743717.public2-2.533330.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743717.public2-2.533330.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743959.public2-2.534478.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678743959.public2-2.534478.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678744019.public2-2.534792.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678744019.public2-2.534792.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678744350.public2-2.536001.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678744350.public2-2.536001.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678750528.public2-2.567922.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678750528.public2-2.567922.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678750631.public2-2.568450.3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_train/events.out.tfevents.1678750631.public2-2.568450.3
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1653557809.l444ysctr1653533547001-j4nrx.4482.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667751802.public2-2.288376.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667751802.public2-2.288376.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667752838.public2-2.292092.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667752838.public2-2.292092.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667752898.public2-2.292404.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667752898.public2-2.292404.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667753917.public2-2.296856.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667753917.public2-2.296856.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667754066.public2-2.297489.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667754066.public2-2.297489.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667757204.public2-2.309449.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667757204.public2-2.309449.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667791088.public2-2.345886.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1667791088.public2-2.345886.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668090397.public2-2.765533.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668090397.public2-2.765533.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091334.public2-2.769344.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091334.public2-2.769344.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091449.public2-2.769899.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091449.public2-2.769899.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091849.public2-2.771620.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091849.public2-2.771620.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091915.public2-2.772017.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668091915.public2-2.772017.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668092026.public2-2.772597.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668092026.public2-2.772597.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668092195.public2-2.773242.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1668092195.public2-2.773242.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743287.public2-2.531760.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743287.public2-2.531760.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743366.public2-2.532089.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743366.public2-2.532089.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743508.public2-2.532654.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743508.public2-2.532654.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743717.public2-2.533330.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743717.public2-2.533330.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743959.public2-2.534478.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678743959.public2-2.534478.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678744019.public2-2.534792.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678744019.public2-2.534792.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678744350.public2-2.536001.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678744350.public2-2.536001.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678750528.public2-2.567922.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678750528.public2-2.567922.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678750631.public2-2.568450.4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/Loss_valid/events.out.tfevents.1678750631.public2-2.568450.4
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1653557730.l444ysctr1653533547001-j4nrx.4482.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1653557730.l444ysctr1653533547001-j4nrx.4482.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667632455.public2-2.53048.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667632455.public2-2.53048.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667632486.public2-2.53189.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667632486.public2-2.53189.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667632618.public2-2.53530.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667632618.public2-2.53530.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667632644.public2-2.53627.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667632644.public2-2.53627.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667751731.public2-2.288133.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667751731.public2-2.288133.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667751760.public2-2.288255.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667751760.public2-2.288255.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667751798.public2-2.288376.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667751798.public2-2.288376.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667752832.public2-2.292092.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667752832.public2-2.292092.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667752892.public2-2.292404.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667752892.public2-2.292404.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667753909.public2-2.296856.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667753909.public2-2.296856.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667754058.public2-2.297489.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667754058.public2-2.297489.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667757180.public2-2.309360.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667757180.public2-2.309360.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667757191.public2-2.309449.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667757191.public2-2.309449.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667791057.public2-2.345761.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667791057.public2-2.345761.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1667791079.public2-2.345886.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1667791079.public2-2.345886.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668086014.public2-2.754938.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668086014.public2-2.754938.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087845.public2-2.758793.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087845.public2-2.758793.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087899.public2-2.758979.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087899.public2-2.758979.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087912.public2-2.759057.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087912.public2-2.759057.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087926.public2-2.759154.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087926.public2-2.759154.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087950.public2-2.759313.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087950.public2-2.759313.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668087981.public2-2.759407.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668087981.public2-2.759407.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088002.public2-2.759535.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088002.public2-2.759535.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088018.public2-2.759623.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088018.public2-2.759623.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088135.public2-2.759938.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088135.public2-2.759938.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088146.public2-2.760028.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088146.public2-2.760028.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088205.public2-2.760242.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088205.public2-2.760242.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088258.public2-2.760423.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088258.public2-2.760423.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668088295.public2-2.760547.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668088295.public2-2.760547.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668089319.public2-2.763614.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668089319.public2-2.763614.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668090177.public2-2.765042.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668090177.public2-2.765042.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668090281.public2-2.765284.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668090281.public2-2.765284.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668090388.public2-2.765533.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668090388.public2-2.765533.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091229.public2-2.769027.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091229.public2-2.769027.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091308.public2-2.769255.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091308.public2-2.769255.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091327.public2-2.769344.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091327.public2-2.769344.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091440.public2-2.769899.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091440.public2-2.769899.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091841.public2-2.771620.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091841.public2-2.771620.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091871.public2-2.771800.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091871.public2-2.771800.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091895.public2-2.771913.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091895.public2-2.771913.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668091909.public2-2.772017.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668091909.public2-2.772017.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668092020.public2-2.772597.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668092020.public2-2.772597.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668092132.public2-2.773061.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668092132.public2-2.773061.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1668092188.public2-2.773242.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1668092188.public2-2.773242.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743240.public2-2.531562.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743240.public2-2.531562.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743265.public2-2.531675.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743265.public2-2.531675.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743273.public2-2.531760.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743273.public2-2.531760.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743352.public2-2.532089.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743352.public2-2.532089.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743494.public2-2.532654.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743494.public2-2.532654.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743703.public2-2.533330.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743703.public2-2.533330.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678743945.public2-2.534478.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678743945.public2-2.534478.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678744005.public2-2.534792.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678744005.public2-2.534792.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678744337.public2-2.536001.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678744337.public2-2.536001.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678750514.public2-2.567922.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678750514.public2-2.567922.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678750618.public2-2.568450.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678750618.public2-2.568450.0
--------------------------------------------------------------------------------
/experiment/m/logs/runs/events.out.tfevents.1678750763.public2-2.569212.0:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/logs/runs/events.out.tfevents.1678750763.public2-2.569212.0
--------------------------------------------------------------------------------
/experiment/m/test_checkpoint/dgua_fas/DGFANet/_checkpoint.pth.tar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/test_checkpoint/dgua_fas/DGFANet/_checkpoint.pth.tar
--------------------------------------------------------------------------------
/experiment/m/test_checkpoint/dgua_fas/best_model/best_model.pth.tar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/test_checkpoint/dgua_fas/best_model/best_model.pth.tar
--------------------------------------------------------------------------------
/experiment/m/train.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 |
4 | from util.utils import save_checkpoint, AverageMeter, Logger, accuracy, mkdirs, time_to_str
5 | from util.evaluate import eval
6 | from util.get_loader import get_dataset
7 |
8 | import random
9 | import numpy as np
10 | from config import config
11 | from datetime import datetime
12 | import time
13 | from timeit import default_timer as timer
14 | import os
15 | import torch
16 | import torch.nn as nn
17 | import torch.optim as optim
18 | import torch.functional as F
19 | from torch.utils.tensorboard import SummaryWriter
20 |
21 | from cvnets.models import get_model
22 | from option import get_training_arguments
23 |
24 | class SmoothCrossEntropy(nn.Module):
25 | def __init__(self, alpha=0.5):
26 | super(SmoothCrossEntropy, self).__init__()
27 | self.alpha = alpha
28 |
29 | def forward(self, logits, labels):
30 | num_classes = logits.shape[-1]
31 | alpha_div_k = self.alpha / num_classes
32 | target_probs = nn.functional.one_hot(labels, num_classes=num_classes).float() * \
33 | (1. - self.alpha) + alpha_div_k
34 | loss = -(target_probs * torch.log_softmax(logits, dim=-1)).sum(dim=-1)
35 | return loss.mean()
36 |
37 |
38 | random.seed(config.seed)
39 | np.random.seed(config.seed)
40 | torch.manual_seed(config.seed)
41 | torch.cuda.manual_seed_all(config.seed)
42 | torch.cuda.manual_seed(config.seed)
43 | os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
44 | torch.backends.cudnn.benchmark = False
45 | torch.backends.cudnn.deterministic = True
46 | device = 'cuda'
47 |
48 | def train():
49 | mkdirs(config.checkpoint_path, config.best_model_path, config.logs)
50 | # load data
51 | src1_train_dataloader_fake, src1_train_dataloader_real, \
52 | src2_train_dataloader_fake, src2_train_dataloader_real, \
53 | src3_train_dataloader_fake, src3_train_dataloader_real, \
54 | tgt_valid_dataloader = get_dataset(config.src1_data, config.src1_train_num_frames,
55 | config.src2_data, config.src2_train_num_frames,
56 | config.src3_data, config.src3_train_num_frames,
57 | config.tgt_data, config.tgt_test_num_frames, config.batch_size)
58 |
59 | best_model_ACC = 0.0
60 | best_model_HTER = 1.0
61 | best_model_ACER = 1.0
62 | best_model_AUC = 0.0
63 | # 0:loss, 1:top-1, 2:EER, 3:HTER, 4:ACER, 5:AUC, 6:threshold
64 | valid_args = [np.inf, 0, 0, 0, 0, 0, 0, 0]
65 |
66 | loss_classifier = AverageMeter()
67 | classifer_top1 = AverageMeter()
68 |
69 | opts = get_training_arguments(config_path='./../../configs/mobilevit_s.yaml')
70 | net = get_model(opts).to(device)
71 | net2 = get_model(opts).to(device)
72 |
73 | state_dict = torch.load('./../../pretrained_model/mobilevit_s.pt')
74 | del state_dict['classifier.fc.weight']
75 | del state_dict['classifier.fc.bias']
76 | net.load_state_dict(state_dict, strict=False)
77 | net2.load_state_dict(state_dict, strict=False)
78 |
79 | writer = SummaryWriter('./logs/runs')
80 | log = Logger()
81 | log.open(config.logs + config.tgt_data + '_log.txt', mode='a')
82 | log.write("\n----------------------------------------------- [START %s] %s\n\n" % (
83 | datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '-' * 51))
84 |
85 | log.write('** start training target model! **\n')
86 | log.write(
87 | '--------|------------- VALID -------------|--- classifier ---|------ Current Best ------|--------------|\n')
88 | log.write(
89 | ' iter | loss top-1 HTER AUC | loss top-1 | top-1 HTER AUC | time |\n')
90 | log.write(
91 | '-------------------------------------------------------------------------------------------------------|\n')
92 | start = timer()
93 | criterion = {
94 | 'softmax': nn.CrossEntropyLoss(label_smoothing=0.1).cuda(),
95 | 'l1': nn.L1Loss().cuda(),
96 | 'lsr_hard' : SmoothCrossEntropy(0.5),
97 | 'lsr_easy' : SmoothCrossEntropy(1.0)
98 | }
99 | optimizer_dict = [
100 | {"params": filter(lambda p: p.requires_grad, net.parameters()), "lr": config.init_lr},
101 | ]
102 | optimizer_dict2 = [
103 | {"params": filter(lambda p: p.requires_grad, net2.parameters()), "lr": config.init_lr},
104 | ]
105 |
106 | optimizer = optim.Adam(optimizer_dict, lr=config.init_lr, weight_decay=config.weight_decay)
107 | optimizer2 = optim.Adam(optimizer_dict2, lr=config.init_lr, weight_decay=config.weight_decay)
108 | init_param_lr = []
109 | for param_group in optimizer.param_groups:
110 | init_param_lr.append(param_group["lr"])
111 |
112 | iter_per_epoch = 10
113 |
114 | src1_train_iter_real = iter(src1_train_dataloader_real)
115 | src1_iter_per_epoch_real = len(src1_train_iter_real)
116 | src2_train_iter_real = iter(src2_train_dataloader_real)
117 | src2_iter_per_epoch_real = len(src2_train_iter_real)
118 | src3_train_iter_real = iter(src3_train_dataloader_real)
119 | src3_iter_per_epoch_real = len(src3_train_iter_real)
120 |
121 | src1_train_iter_fake = iter(src1_train_dataloader_fake)
122 | src1_iter_per_epoch_fake = len(src1_train_iter_fake)
123 | src2_train_iter_fake = iter(src2_train_dataloader_fake)
124 | src2_iter_per_epoch_fake = len(src2_train_iter_fake)
125 | src3_train_iter_fake = iter(src3_train_dataloader_fake)
126 | src3_iter_per_epoch_fake = len(src3_train_iter_fake)
127 |
128 | max_iter = config.max_iter
129 | epoch = 1
130 | if(len(config.gpus) > 1):
131 | net = torch.nn.DataParallel(net).cuda()
132 | net2 = torch.nn.DataParallel(net).cuda()
133 |
134 | for iter_num in range(max_iter+1):
135 | if (iter_num % src1_iter_per_epoch_real == 0):
136 | src1_train_iter_real = iter(src1_train_dataloader_real)
137 | if (iter_num % src2_iter_per_epoch_real == 0):
138 | src2_train_iter_real = iter(src2_train_dataloader_real)
139 | if (iter_num % src3_iter_per_epoch_real == 0):
140 | src3_train_iter_real = iter(src3_train_dataloader_real)
141 |
142 | if (iter_num % src1_iter_per_epoch_fake == 0):
143 | src1_train_iter_fake = iter(src1_train_dataloader_fake)
144 | if (iter_num % src2_iter_per_epoch_fake == 0):
145 | src2_train_iter_fake = iter(src2_train_dataloader_fake)
146 | if (iter_num % src3_iter_per_epoch_fake == 0):
147 | src3_train_iter_fake = iter(src3_train_dataloader_fake)
148 |
149 | if (iter_num != 0 and iter_num % iter_per_epoch == 0):
150 | epoch = epoch + 1
151 | param_lr_tmp = []
152 | for param_group in optimizer.param_groups:
153 | param_lr_tmp.append(param_group["lr"])
154 |
155 | net.train(True)
156 | optimizer.zero_grad()
157 |
158 | ######### data prepare #########
159 | src1_img_real, src1_label_real = src1_train_iter_real.next()
160 | src1_img_real = src1_img_real.cuda()
161 | src1_label_real = src1_label_real.cuda()
162 |
163 | src2_img_real, src2_label_real = src2_train_iter_real.next()
164 | src2_img_real = src2_img_real.cuda()
165 | src2_label_real = src2_label_real.cuda()
166 |
167 | src3_img_real, src3_label_real = src3_train_iter_real.next()
168 | src3_img_real = src3_img_real.cuda()
169 | src3_label_real = src3_label_real.cuda()
170 |
171 | src1_img_fake, src1_label_fake = src1_train_iter_fake.next()
172 | src1_img_fake = src1_img_fake.cuda()
173 | src1_label_fake = src1_label_fake.cuda()
174 |
175 |
176 | src2_img_fake, src2_label_fake = src2_train_iter_fake.next()
177 | src2_img_fake = src2_img_fake.cuda()
178 | src2_label_fake = src2_label_fake.cuda()
179 |
180 |
181 | src3_img_fake, src3_label_fake = src3_train_iter_fake.next()
182 | src3_img_fake = src3_img_fake.cuda()
183 | src3_label_fake = src3_label_fake.cuda()
184 |
185 |
186 | input_data = torch.cat([src1_img_real, src1_img_fake, src2_img_real, src2_img_fake, src3_img_real, src3_img_fake], dim=0)
187 |
188 | source_label = torch.cat([src1_label_real, src1_label_fake,
189 | src2_label_real, src2_label_fake,
190 | src3_label_real, src3_label_fake,
191 | ], dim=0)
192 |
193 | ######### forward #########
194 | ######### Copycat train #########
195 | bsz = source_label.size(0)
196 | net.train(False)
197 | net2.train(True) # Copycat Model
198 | optimizer2.zero_grad()
199 | classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
200 | classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
201 |
202 | pullloss1 = criterion["l1"](x11.reshape(bsz, -1),x21.reshape(bsz,-1))
203 | pullloss2 = criterion["l1"](x12.reshape(bsz, -1),x22.reshape(bsz,-1))
204 | cls_loss = criterion["softmax"](classifier_label_out2.narrow(0, 0, input_data.size(0)), source_label)
205 |
206 | pullloss = (pullloss1 + pullloss2) / 2
207 |
208 | cls_loss = cls_loss + pullloss
209 | cls_loss.backward()
210 | optimizer2.step()
211 |
212 |
213 | ######## MainModel train ########
214 | net.train(True)
215 | net2.train(False) # Copycat Model
216 | optimizer.zero_grad()
217 | classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
218 | classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
219 |
220 |
221 | out21 = net(input_data, x1 = x21)
222 | out22 = net(input_data, x2 = x22)
223 | out23 = net(input_data, x3 = x23)
224 |
225 | klu0 = criterion["lsr_hard"](out21, source_label)
226 | klu1 = criterion["lsr_hard"](out22, source_label)
227 | klu2 = criterion["lsr_easy"](out23, source_label)
228 | klu = (klu0 + klu1 + klu2) / 3
229 |
230 | # features_dim = 20*640*8*8
231 | real_features = net.extract_features(input_data[source_label == 1])
232 |
233 | l1_loss = criterion["l1"](real_features, torch.zeros_like(real_features))
234 |
235 | ######### cross-entropy loss #########
236 | cls_loss = criterion["softmax"](classifier_label_out.narrow(0, 0, input_data.size(0)), source_label)
237 |
238 | ######### backward #########
239 | total_loss = cls_loss + l1_loss + 0.1 * klu
240 | total_loss.backward()
241 | optimizer.step()
242 | optimizer.zero_grad()
243 |
244 | loss_classifier.update(cls_loss.item())
245 | acc = accuracy(classifier_label_out.narrow(0, 0, input_data.size(0)), source_label, topk=(1,))
246 | classifer_top1.update(acc[0])
247 | print('\r', end='', flush=True)
248 | print(
249 | ' %4.1f | %5.3f %6.3f %6.3f %6.3f | %6.3f %6.3f | %6.3f %6.3f %6.3f | %s'
250 | % (
251 | (iter_num+1) / iter_per_epoch,
252 | valid_args[0], valid_args[1], valid_args[3] * 100, valid_args[4] * 100,
253 | loss_classifier.avg, classifer_top1.avg,
254 | float(best_model_ACC), float(best_model_HTER * 100), float(best_model_AUC * 100),
255 | time_to_str(timer() - start, 'min'))
256 | , end='', flush=True)
257 |
258 | if (iter_num != 0 and (iter_num+1) % iter_per_epoch == 0):
259 | train_loss = loss_classifier.avg
260 | train_acc = classifer_top1.avg
261 | # 0:loss, 1:top-1, 2:EER, 3:HTER, 4:AUC, 5:threshold, 6:ACC_threshold
262 | valid_args = eval(tgt_valid_dataloader, net)
263 | # judge model according to HTER
264 | is_best = valid_args[4] > best_model_AUC
265 | best_model_AUC = max(valid_args[4], best_model_AUC)
266 | threshold = valid_args[5]
267 | if is_best:
268 | best_model_ACC = valid_args[1]
269 | best_model_HTER = valid_args[3]
270 |
271 | save_list = [epoch, valid_args, best_model_HTER, best_model_ACC, best_model_ACER, threshold]
272 | save_checkpoint(save_list, is_best, net, config.gpus, config.checkpoint_path, config.best_model_path)
273 | print('\r', end='', flush=True)
274 | log.write(
275 | ' %4.1f | %5.3f %6.3f %6.3f %6.3f | %6.3f %6.3f | %6.3f %6.3f %6.3f | %s %s'
276 | % (
277 | (iter_num+1) / iter_per_epoch,
278 | valid_args[0], valid_args[1], valid_args[3] * 100, valid_args[4] * 100,
279 | loss_classifier.avg, classifer_top1.avg,
280 | float(best_model_ACC), float(best_model_HTER * 100), float(best_model_AUC * 100),
281 | time_to_str(timer() - start, 'min'),
282 | param_lr_tmp[0]))
283 | log.write('\n')
284 | writer.add_scalars(f'Accuracy', {'train': train_acc[0].detach(),
285 | 'valid': valid_args[1][0].detach()}, (iter_num + 1) // iter_per_epoch)
286 | writer.add_scalars(f'Loss', {'train': train_loss,
287 | 'valid': valid_args[0]}, (iter_num + 1) // iter_per_epoch)
288 | writer.add_scalar(f'AUC', valid_args[4], (iter_num + 1) // iter_per_epoch)
289 | writer.add_scalar(f'HTER', valid_args[3], (iter_num + 1) // iter_per_epoch)
290 | time.sleep(0.01)
291 |
292 | if __name__ == '__main__':
293 | train()
294 |
295 |
296 |
--------------------------------------------------------------------------------
/experiment/m/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/experiment/m/utils/__init__.py
--------------------------------------------------------------------------------
/experiment/m/utils/checkpoint_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import os
7 | import torch
8 | from typing import Optional, Union
9 | import math
10 | import glob
11 |
12 | from cvnets import EMA
13 | from optim import BaseOptim
14 | from utils import logger
15 | from utils.ddp_utils import is_master
16 | from utils.download_utils import get_local_path
17 |
18 | CHECKPOINT_EXTN = "pt"
19 |
20 |
21 | def get_model_state_dict(model):
22 | if isinstance(model, EMA):
23 | return get_model_state_dict(model.ema_model)
24 | else:
25 | return (
26 | model.module.state_dict()
27 | if hasattr(model, "module")
28 | else model.state_dict()
29 | )
30 |
31 |
32 | def load_state_dict(model, state_dict):
33 | if hasattr(model, "module"):
34 | model.module.load_state_dict(state_dict)
35 | else:
36 | model.load_state_dict(state_dict)
37 | return model
38 |
39 |
40 | def average_ckpts(ckpt_loc_list: list):
41 | avg_state_dict = dict()
42 | key_count = dict()
43 | key_dtype = dict()
44 |
45 | for c in ckpt_loc_list:
46 | if not os.path.isfile(c):
47 | pass
48 | ckpt_state_dict = torch.load(c, map_location="cpu")
49 |
50 | for k, v in ckpt_state_dict.items():
51 | if k not in avg_state_dict:
52 | key_dtype[k] = v.dtype
53 | avg_state_dict[k] = v.clone().to(dtype=torch.float64)
54 | key_count[k] = 1
55 | else:
56 | avg_state_dict[k] += v.to(dtype=torch.float64)
57 | key_count[k] += 1
58 |
59 | for k, v in avg_state_dict.items():
60 | avg_state_dict[k] = v.div(key_count[k]).to(dtype=key_dtype[k])
61 | return avg_state_dict
62 |
63 |
64 | def avg_n_save_k_checkpoints(
65 | model_state, best_metric, k_best_checkpoints, max_ckpt_metric, ckpt_str
66 | ):
67 | try:
68 | ckpt_fname = "{}_score_{:.4f}.{}".format(ckpt_str, best_metric, CHECKPOINT_EXTN)
69 | torch.save(model_state, ckpt_fname)
70 |
71 | best_fnames = glob.glob("{}_score_*".format(ckpt_str))
72 | best_scores = [
73 | float(f.split("_score_")[-1].replace(".{}".format(CHECKPOINT_EXTN), ""))
74 | for f in best_fnames
75 | ]
76 |
77 | best_scores_keep = []
78 | if len(best_scores) > k_best_checkpoints:
79 | best_scores = sorted(best_scores)
80 | if not max_ckpt_metric:
81 | best_scores = best_scores[::-1]
82 | best_scores_keep = best_scores[-k_best_checkpoints:]
83 | for k in best_scores:
84 | if k in best_scores_keep:
85 | continue
86 | rm_ckpt = "{}_score_{:.4f}.{}".format(ckpt_str, k, CHECKPOINT_EXTN)
87 | os.remove(rm_ckpt)
88 | logger.log("Deleting checkpoint: {}".format(rm_ckpt))
89 | #
90 | if len(best_scores_keep) > 1:
91 | avg_fnames = [
92 | "{}_score_{:.4f}.{}".format(ckpt_str, k, CHECKPOINT_EXTN)
93 | for k in best_scores_keep
94 | ]
95 | logger.log(
96 | "Averaging checkpoints: {}".format(
97 | [f.split("/")[-1] for f in avg_fnames]
98 | )
99 | )
100 | # save the average model
101 | avg_model_state = average_ckpts(ckpt_loc_list=avg_fnames)
102 | ckpt_fname = "{}_avg.{}".format(ckpt_str, CHECKPOINT_EXTN)
103 | if avg_model_state:
104 | torch.save(avg_model_state, ckpt_fname)
105 | logger.log("Averaged checkpoint saved at: {}".format(ckpt_fname))
106 | except Exception as e:
107 | logger.log("Error in k-best-checkpoint")
108 | print(e)
109 |
110 |
111 | def save_checkpoint(
112 | iterations: int,
113 | epoch: int,
114 | model: torch.nn.Module,
115 | optimizer: Union[BaseOptim, torch.optim.Optimizer],
116 | best_metric: float,
117 | is_best: bool,
118 | save_dir: str,
119 | gradient_scalar: torch.cuda.amp.GradScaler,
120 | model_ema: Optional[torch.nn.Module] = None,
121 | is_ema_best: Optional[bool] = False,
122 | ema_best_metric: Optional[float] = None,
123 | max_ckpt_metric: Optional[bool] = False,
124 | k_best_checkpoints: Optional[int] = -1,
125 | save_all_checkpoints: Optional[bool] = False,
126 | *args,
127 | **kwargs
128 | ) -> None:
129 | model_state = get_model_state_dict(model)
130 | checkpoint = {
131 | "iterations": iterations,
132 | "epoch": epoch,
133 | "model_state_dict": model_state,
134 | "optim_state_dict": optimizer.state_dict(),
135 | "best_metric": best_metric,
136 | "gradient_scalar_state_dict": gradient_scalar.state_dict(),
137 | }
138 | ckpt_str = "{}/checkpoint".format(save_dir)
139 |
140 | if is_best:
141 | best_model_fname = "{}_best.{}".format(ckpt_str, CHECKPOINT_EXTN)
142 | if os.path.isfile(best_model_fname):
143 | os.remove(best_model_fname)
144 | torch.save(model_state, best_model_fname)
145 | logger.log(
146 | "Best checkpoint with score {:.2f} saved at {}".format(
147 | best_metric, best_model_fname
148 | )
149 | )
150 |
151 | if k_best_checkpoints > 1:
152 | avg_n_save_k_checkpoints(
153 | model_state, best_metric, k_best_checkpoints, max_ckpt_metric, ckpt_str
154 | )
155 |
156 | if model_ema is not None:
157 | checkpoint["ema_state_dict"] = get_model_state_dict(model_ema)
158 | ema_fname = "{}_ema.{}".format(ckpt_str, CHECKPOINT_EXTN)
159 | torch.save(checkpoint["ema_state_dict"], ema_fname)
160 | if save_all_checkpoints:
161 | ema_fname = "{}_ema_epoch{}.{}".format(ckpt_str, epoch, CHECKPOINT_EXTN)
162 | torch.save(checkpoint["ema_state_dict"], ema_fname)
163 |
164 | if is_ema_best:
165 | ema_best_fname = "{}_ema_best.{}".format(ckpt_str, CHECKPOINT_EXTN)
166 | if os.path.isfile(ema_best_fname):
167 | os.remove(ema_best_fname)
168 | torch.save(checkpoint["ema_state_dict"], ema_best_fname)
169 | logger.log(
170 | "Best EMA checkpoint with score {:.2f} saved at {}".format(
171 | ema_best_metric, ema_best_fname
172 | )
173 | )
174 |
175 | if k_best_checkpoints > 1 and ema_best_metric is not None:
176 | avg_n_save_k_checkpoints(
177 | model_state=checkpoint["ema_state_dict"],
178 | best_metric=ema_best_metric,
179 | k_best_checkpoints=k_best_checkpoints,
180 | max_ckpt_metric=max_ckpt_metric,
181 | ckpt_str="{}_ema".format(ckpt_str),
182 | )
183 |
184 | ckpt_fname = "{}.{}".format(ckpt_str, CHECKPOINT_EXTN)
185 | torch.save(checkpoint, ckpt_fname)
186 |
187 | ckpt_fname = "{}_last.{}".format(ckpt_str, CHECKPOINT_EXTN)
188 | torch.save(model_state, ckpt_fname)
189 |
190 | if save_all_checkpoints:
191 | ckpt_fname = "{}_epoch{}.{}".format(ckpt_str, epoch, CHECKPOINT_EXTN)
192 | torch.save(model_state, ckpt_fname)
193 |
194 |
195 | def load_checkpoint(
196 | opts,
197 | model: torch.nn.Module,
198 | optimizer: Union[BaseOptim, torch.optim.Optimizer],
199 | gradient_scalar: torch.cuda.amp.GradScaler,
200 | model_ema: Optional[torch.nn.Module] = None,
201 | ):
202 | resume_loc = getattr(opts, "common.resume", None)
203 | dev_id = getattr(opts, "dev.device_id", None)
204 | device = getattr(opts, "dev.device", torch.device("cpu"))
205 | start_epoch = start_iteration = 0
206 | best_metric = (
207 | 0.0 if getattr(opts, "stats.checkpoint_metric_max", False) else math.inf
208 | )
209 | auto_resume = getattr(opts, "common.auto_resume", False)
210 | exp_dir = getattr(opts, "common.exp_loc", None)
211 | is_master_node = is_master(opts)
212 | if resume_loc is None and auto_resume and exp_dir is not None:
213 | resume_loc = "{}/checkpoint.{}".format(exp_dir, CHECKPOINT_EXTN)
214 |
215 | resume_loc = get_local_path(opts, path=resume_loc)
216 | if resume_loc is not None and os.path.isfile(resume_loc):
217 | if dev_id is None:
218 | checkpoint = torch.load(resume_loc, map_location=device)
219 | else:
220 | checkpoint = torch.load(resume_loc, map_location="cuda:{}".format(dev_id))
221 |
222 | start_epoch = checkpoint["epoch"] + 1
223 | start_iteration = checkpoint["iterations"] + 1
224 | best_metric = checkpoint["best_metric"]
225 |
226 | model = load_state_dict(model, checkpoint["model_state_dict"])
227 | optimizer.load_state_dict(checkpoint["optim_state_dict"])
228 | gradient_scalar.load_state_dict(checkpoint["gradient_scalar_state_dict"])
229 |
230 | if model_ema is not None and "ema_state_dict" in checkpoint:
231 | model_ema.ema_model = load_state_dict(
232 | model_ema.ema_model, checkpoint["ema_state_dict"]
233 | )
234 |
235 | if is_master_node:
236 | logger.log("Loaded checkpoint from {}".format(resume_loc))
237 | logger.log("Resuming training for epoch {}".format(start_epoch))
238 | else:
239 | if is_master_node:
240 | logger.log("No checkpoint found at '{}'".format(resume_loc))
241 | return (
242 | model,
243 | optimizer,
244 | gradient_scalar,
245 | start_epoch,
246 | start_iteration,
247 | best_metric,
248 | model_ema,
249 | )
250 |
251 |
252 | def load_model_state(opts, model, model_ema=None):
253 | dev_id = getattr(opts, "dev.device_id", None)
254 | device = getattr(opts, "dev.device", torch.device("cpu"))
255 | finetune_loc = getattr(opts, "common.finetune_imagenet1k", None)
256 | finetune_ema_loc = getattr(opts, "common.finetune_ema", None)
257 |
258 | def load_state(path):
259 | path = get_local_path(opts, path=path)
260 | if dev_id is None:
261 | model_state = torch.load(path, map_location=device)
262 | else:
263 | model_state = torch.load(path, map_location="cuda:{}".format(dev_id))
264 | return model_state
265 |
266 | if finetune_loc is not None and os.path.isfile(finetune_loc):
267 | # load model dict
268 | model = load_state_dict(model, load_state(finetune_loc))
269 |
270 | # load ema dict
271 | if model_ema is not None and os.path.isfile(finetune_ema_loc):
272 | model_ema = load_state_dict(model, load_state(finetune_ema_loc))
273 |
274 | return model, model_ema
275 |
276 |
277 | def copy_weights(
278 | model_src: torch.nn.Module, model_tgt: torch.nn.Module
279 | ) -> torch.nn.Module:
280 | with torch.no_grad():
281 | model_state = get_model_state_dict(model=model_src)
282 | return load_state_dict(model=model_tgt, state_dict=model_state)
283 |
--------------------------------------------------------------------------------
/experiment/m/utils/color_map.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import numpy as np
7 | from typing import Optional, List
8 |
9 |
10 | class Colormap(object):
11 | """
12 | Generate colormap for visualizing segmentation masks or bounding boxes.
13 |
14 | This is based on the MATLab code in the PASCAL VOC repository:
15 | http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
16 | """
17 |
18 | def __init__(self, n: Optional[int] = 256, normalized: Optional[bool] = False):
19 | super(Colormap, self).__init__()
20 | self.n = n
21 | self.normalized = normalized
22 |
23 | @staticmethod
24 | def get_bit_at_idx(val, idx):
25 | return (val & (1 << idx)) != 0
26 |
27 | def get_color_map(self) -> np.ndarray:
28 |
29 | dtype = "float32" if self.normalized else "uint8"
30 | color_map = np.zeros((self.n, 3), dtype=dtype)
31 | for i in range(self.n):
32 | r = g = b = 0
33 | c = i
34 | for j in range(8):
35 | r = r | (self.get_bit_at_idx(c, 0) << 7 - j)
36 | g = g | (self.get_bit_at_idx(c, 1) << 7 - j)
37 | b = b | (self.get_bit_at_idx(c, 2) << 7 - j)
38 | c = c >> 3
39 |
40 | color_map[i] = np.array([r, g, b])
41 | color_map = color_map / 255 if self.normalized else color_map
42 | return color_map
43 |
44 | def get_box_color_codes(self) -> List:
45 | box_codes = []
46 |
47 | for i in range(self.n):
48 | r = g = b = 0
49 | c = i
50 | for j in range(8):
51 | r = r | (self.get_bit_at_idx(c, 0) << 7 - j)
52 | g = g | (self.get_bit_at_idx(c, 1) << 7 - j)
53 | b = b | (self.get_bit_at_idx(c, 2) << 7 - j)
54 | c = c >> 3
55 | box_codes.append((int(r), int(g), int(b)))
56 | return box_codes
57 |
58 | def get_color_map_list(self) -> List:
59 | cmap = self.get_color_map()
60 | cmap = np.asarray(cmap).flatten()
61 | return list(cmap)
62 |
--------------------------------------------------------------------------------
/experiment/m/utils/common_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import random
7 | import torch
8 | import numpy as np
9 | import os
10 | from typing import Union, Dict, Optional, Tuple
11 | from torch import Tensor
12 | from sys import platform
13 |
14 | from utils import logger
15 | from utils.ddp_utils import is_master
16 | from cvnets.layers import norm_layers_tuple
17 |
18 |
19 | def check_compatibility():
20 | ver = torch.__version__.split(".")
21 | major_version = int(ver[0])
22 | minor_version = int(ver[0])
23 |
24 | if major_version < 1 and minor_version < 7:
25 | logger.error(
26 | "Min pytorch version required is 1.7.0. Got: {}".format(".".join(ver))
27 | )
28 |
29 |
30 | def check_frozen_norm_layer(model: torch.nn.Module) -> (bool, int):
31 |
32 | if hasattr(model, "module"):
33 | model = model.module
34 |
35 | count_norm = 0
36 | frozen_state = False
37 | for m in model.modules():
38 | if isinstance(m, norm_layers_tuple):
39 | frozen_state = m.weight.requires_grad
40 |
41 | return frozen_state, count_norm
42 |
43 |
44 | def device_setup(opts):
45 | random_seed = getattr(opts, "common.seed", 0)
46 | random.seed(random_seed)
47 | torch.manual_seed(random_seed)
48 | np.random.seed(random_seed)
49 |
50 | is_master_node = is_master(opts)
51 | if is_master_node:
52 | logger.log("Random seeds are set to {}".format(random_seed))
53 | logger.log("Using PyTorch version {}".format(torch.__version__))
54 |
55 | n_gpus = torch.cuda.device_count()
56 | if n_gpus == 0:
57 | if is_master_node:
58 | logger.warning("No GPUs available. Using CPU")
59 | device = torch.device("cpu")
60 | n_gpus = 0
61 | else:
62 | if is_master_node:
63 | logger.log("Available GPUs: {}".format(n_gpus))
64 | device = torch.device("cuda")
65 |
66 | if torch.backends.cudnn.is_available():
67 | import torch.backends.cudnn as cudnn
68 |
69 | torch.backends.cudnn.enabled = True
70 | cudnn.benchmark = False
71 | cudnn.deterministic = True
72 | if is_master_node:
73 | logger.log("CUDNN is enabled")
74 |
75 | setattr(opts, "dev.device", device)
76 | setattr(opts, "dev.num_gpus", n_gpus)
77 |
78 | return opts
79 |
80 |
81 | def create_directories(dir_path: str, is_master_node: bool) -> None:
82 | if not os.path.isdir(dir_path):
83 | os.makedirs(dir_path)
84 | if is_master_node:
85 | logger.log("Directory created at: {}".format(dir_path))
86 | else:
87 | if is_master_node:
88 | logger.log("Directory exists at: {}".format(dir_path))
89 |
90 |
91 | def move_to_device(
92 | opts,
93 | x: Union[Dict, Tensor],
94 | device: Optional[str] = "cpu",
95 | non_blocking: Optional[bool] = True,
96 | *args,
97 | **kwargs
98 | ) -> Union[Dict, Tensor]:
99 |
100 | # if getattr(opts, "dataset.decode_data_on_gpu", False):
101 | # # data is already on GPU
102 | # return x
103 |
104 | if isinstance(x, Dict):
105 | # return the tensor because if its already on device
106 | if "on_gpu" in x and x["on_gpu"]:
107 | return x
108 |
109 | for k, v in x.items():
110 | if isinstance(v, Dict):
111 | x[k] = move_to_device(opts=opts, x=v, device=device)
112 | elif isinstance(v, Tensor):
113 | x[k] = v.to(device=device, non_blocking=non_blocking)
114 |
115 | elif isinstance(x, Tensor):
116 | x = x.to(device=device, non_blocking=non_blocking)
117 | else:
118 | logger.error(
119 | "Inputs of type Tensor or Dict of Tensors are only supported right now"
120 | )
121 | return x
122 |
123 |
124 | def is_coreml_conversion(opts) -> bool:
125 | coreml_convert = getattr(opts, "common.enable_coreml_compatible_module", False)
126 | if coreml_convert:
127 | return True
128 | return False
129 |
--------------------------------------------------------------------------------
/experiment/m/utils/ddp_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 |
7 | import socket
8 | import torch
9 | import torch.distributed as dist
10 | from utils import logger
11 |
12 |
13 | def is_master(opts) -> bool:
14 | node_rank = getattr(opts, "ddp.rank", 0)
15 | return node_rank == 0
16 |
17 |
18 | def dist_barrier():
19 | dist.barrier()
20 |
21 |
22 | def is_start_rank_node(opts) -> bool:
23 | node_rank = getattr(opts, "ddp.rank", 0)
24 | def_rank = getattr(opts, "ddp.start_rank", 0)
25 | return node_rank == def_rank
26 |
27 |
28 | def distributed_init(opts) -> int:
29 | ddp_url = getattr(opts, "ddp.dist_url", None)
30 | is_master_node = is_master(opts)
31 | if ddp_url is None:
32 | ddp_port = getattr(opts, "ddp.dist_port", 6006)
33 | hostname = socket.gethostname()
34 | ddp_url = "tcp://{}:{}".format(hostname, ddp_port)
35 | setattr(opts, "ddp.dist_url", ddp_url)
36 |
37 | node_rank = getattr(opts, "ddp.rank", 0)
38 | world_size = getattr(opts, "ddp.world_size", 0)
39 | if torch.distributed.is_initialized():
40 | logger.warning("DDP is already initialized and cannot be initialize twice!")
41 | else:
42 | logger.info("distributed init (rank {}): {}".format(node_rank, ddp_url))
43 |
44 | dist_backend = getattr(opts, "ddp.backend", "nccl") # "gloo"
45 |
46 | if dist_backend is None and dist.is_nccl_available():
47 | dist_backend = "nccl"
48 | if is_master_node:
49 | logger.log(
50 | "Using NCCL as distributed backend with version={}".format(
51 | torch.cuda.nccl.version()
52 | )
53 | )
54 | elif dist_backend is None:
55 | dist_backend = "gloo"
56 |
57 | dist.init_process_group(
58 | backend=dist_backend,
59 | init_method=ddp_url,
60 | world_size=world_size,
61 | rank=node_rank,
62 | )
63 |
64 | # perform a dummy all-reduce to initialize the NCCL communicator
65 | if torch.cuda.is_available():
66 | dist.all_reduce(torch.zeros(1).cuda())
67 |
68 | node_rank = torch.distributed.get_rank()
69 | setattr(opts, "ddp.rank", node_rank)
70 | return node_rank
71 |
--------------------------------------------------------------------------------
/experiment/m/utils/download_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import os
7 | import copy
8 | import torch.distributed as dist
9 | from urllib import request
10 |
11 | from common import TMP_CACHE_LOC
12 | from utils.ddp_utils import is_start_rank_node
13 |
14 |
15 | try:
16 | from utils_internal.blobby_utils import get_local_path_blobby
17 |
18 | def get_local_path(opts, path, recursive=False, *args, **kwargs):
19 | """
20 | If File is in S3, download to /tmp and then return the local path. Otherwise, don't do anything
21 | """
22 | return get_local_path_blobby(opts=opts, path=path, recursive=recursive)
23 |
24 |
25 | except ModuleNotFoundError as mnfe:
26 |
27 | def get_local_path(opts, path, *args, **kwargs):
28 | """
29 | If File name is a URL, download to TMP_CACHE_LOC and then return the local path. Otherwise, don't do anything
30 | """
31 | if (
32 | path.find("s3://") > -1
33 | or path.find("http://") > -1
34 | or path.find("https://") > -1
35 | ):
36 | url_path = copy.deepcopy(path)
37 | ckpt_name = path.split(os.sep)[-1]
38 | local_path = "{}/{}".format(TMP_CACHE_LOC, ckpt_name)
39 | local_path = str(local_path).strip()
40 |
41 | if os.path.isfile(local_path) and is_start_rank_node(opts):
42 | # If file exists, remove it and then download again
43 | # This is important because if we are downloading from bolt tasks, then checkpoint names are the same
44 | os.remove(local_path)
45 |
46 | if not os.path.isfile(local_path) and is_start_rank_node(opts):
47 | request.urlretrieve(url_path, local_path)
48 |
49 | if getattr(opts, "ddp.use_distributed", False):
50 | # syncronize between processes
51 | dist.barrier()
52 | return local_path
53 | return path
54 |
--------------------------------------------------------------------------------
/experiment/m/utils/logger.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import time
7 | from typing import Optional
8 | import sys
9 | import os
10 |
11 | text_colors = {
12 | "logs": "\033[34m", # 033 is the escape code and 34 is the color code
13 | "info": "\033[32m",
14 | "warning": "\033[33m",
15 | "debug": "\033[93m",
16 | "error": "\033[31m",
17 | "bold": "\033[1m",
18 | "end_color": "\033[0m",
19 | "light_red": "\033[36m",
20 | }
21 |
22 |
23 | def get_curr_time_stamp() -> str:
24 | return time.strftime("%Y-%m-%d %H:%M:%S")
25 |
26 |
27 | def error(message: str) -> None:
28 | time_stamp = get_curr_time_stamp()
29 | error_str = (
30 | text_colors["error"]
31 | + text_colors["bold"]
32 | + "ERROR "
33 | + text_colors["end_color"]
34 | )
35 | print("{} - {} - {}".format(time_stamp, error_str, message), flush=True)
36 | print("{} - {} - {}".format(time_stamp, error_str, "Exiting!!!"), flush=True)
37 | exit(-1)
38 |
39 |
40 | def color_text(in_text: str) -> str:
41 | return text_colors["light_red"] + in_text + text_colors["end_color"]
42 |
43 |
44 | def log(message: str) -> None:
45 | time_stamp = get_curr_time_stamp()
46 | log_str = (
47 | text_colors["logs"] + text_colors["bold"] + "LOGS " + text_colors["end_color"]
48 | )
49 | print("{} - {} - {}".format(time_stamp, log_str, message))
50 |
51 |
52 | def warning(message: str) -> None:
53 | time_stamp = get_curr_time_stamp()
54 | warn_str = (
55 | text_colors["warning"]
56 | + text_colors["bold"]
57 | + "WARNING"
58 | + text_colors["end_color"]
59 | )
60 | print("{} - {} - {}".format(time_stamp, warn_str, message))
61 |
62 |
63 | def info(message: str, print_line: Optional[bool] = False) -> None:
64 | time_stamp = get_curr_time_stamp()
65 | info_str = (
66 | text_colors["info"] + text_colors["bold"] + "INFO " + text_colors["end_color"]
67 | )
68 | print("{} - {} - {}".format(time_stamp, info_str, message))
69 | if print_line:
70 | double_dash_line(dashes=150)
71 |
72 |
73 | def debug(message: str) -> None:
74 | time_stamp = get_curr_time_stamp()
75 | log_str = (
76 | text_colors["debug"]
77 | + text_colors["bold"]
78 | + "DEBUG "
79 | + text_colors["end_color"]
80 | )
81 | print("{} - {} - {}".format(time_stamp, log_str, message))
82 |
83 |
84 | def double_dash_line(dashes: Optional[int] = 75) -> None:
85 | print(text_colors["error"] + "=" * dashes + text_colors["end_color"])
86 |
87 |
88 | def singe_dash_line(dashes: Optional[int] = 67) -> None:
89 | print("-" * dashes)
90 |
91 |
92 | def print_header(header: str) -> None:
93 | double_dash_line()
94 | print(
95 | text_colors["info"]
96 | + text_colors["bold"]
97 | + "=" * 50
98 | + str(header)
99 | + text_colors["end_color"]
100 | )
101 | double_dash_line()
102 |
103 |
104 | def print_header_minor(header: str) -> None:
105 | print(
106 | text_colors["warning"]
107 | + text_colors["bold"]
108 | + "=" * 25
109 | + str(header)
110 | + text_colors["end_color"]
111 | )
112 |
113 |
114 | def disable_printing():
115 | sys.stdout = open(os.devnull, "w")
116 |
117 |
118 | def enable_printing():
119 | sys.stdout = sys.__stdout__
120 |
--------------------------------------------------------------------------------
/experiment/m/utils/math_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | from typing import Union, Optional
7 |
8 |
9 | def make_divisible(
10 | v: Union[float, int],
11 | divisor: Optional[int] = 8,
12 | min_value: Optional[Union[float, int]] = None,
13 | ) -> Union[float, int]:
14 | """
15 | This function is taken from the original tf repo.
16 | It ensures that all layers have a channel number that is divisible by 8
17 | It can be seen here:
18 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
19 | :param v:
20 | :param divisor:
21 | :param min_value:
22 | :return:
23 | """
24 | if min_value is None:
25 | min_value = divisor
26 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
27 | # Make sure that round down does not go down by more than 10%.
28 | if new_v < 0.9 * v:
29 | new_v += divisor
30 | return new_v
31 |
32 |
33 | def bound_fn(
34 | min_val: Union[float, int], max_val: Union[float, int], value: Union[float, int]
35 | ) -> Union[float, int]:
36 | return max(min_val, min(max_val, value))
37 |
--------------------------------------------------------------------------------
/experiment/m/utils/pytorch_to_coreml.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import torch
7 | from torch import Tensor
8 | import coremltools as ct
9 | from typing import Optional, Dict, Tuple, Union
10 | import numpy as np
11 | from PIL import Image
12 | from torchvision.transforms import functional as F
13 |
14 |
15 | from utils.tensor_utils import create_rand_tensor
16 | from torch.utils.mobile_optimizer import optimize_for_mobile
17 |
18 |
19 | def convert_pytorch_to_coreml(
20 | opts,
21 | pytorch_model: torch.nn.Module,
22 | jit_model_only: Optional[bool] = False,
23 | *args,
24 | **kwargs
25 | ) -> Dict:
26 | """
27 | Convert Pytorch model to CoreML
28 |
29 | :param opts: Arguments
30 | :param pytorch_model: Pytorch model that needs to be converted to JIT or CoreML
31 | :param input_tensor: Input tensor, usually a 4-dimensional tensor of shape Batch x 3 x Height x Width
32 | :return: CoreML model or package
33 | """
34 |
35 | input_image_path = getattr(opts, "conversion.input_image_path", None)
36 | if input_image_path is not None:
37 | input_pil_img = Image.open(input_image_path).convert("RGB")
38 | input_pil_img = F.resize(
39 | img=input_pil_img, size=256, interpolation=F.InterpolationMode.BILINEAR
40 | )
41 | input_pil_img = F.center_crop(img=input_pil_img, output_size=224)
42 | input_tensor = F.pil_to_tensor(input_pil_img).float()
43 | input_tensor.div_(255.0)
44 | input_tensor = input_tensor.unsqueeze(0) # add dummy batch dimension
45 | else:
46 | input_pil_img = None
47 | input_tensor = create_rand_tensor(opts=opts, device="cpu")
48 |
49 | if pytorch_model.training:
50 | pytorch_model.eval()
51 |
52 | with torch.no_grad():
53 | pytorch_out = pytorch_model(input_tensor)
54 |
55 | jit_model = torch.jit.trace(pytorch_model, input_tensor)
56 | jit_out = jit_model(input_tensor)
57 | assertion_check(py_out=pytorch_out, jit_out=jit_out)
58 |
59 | jit_model_optimized = optimize_for_mobile(jit_model)
60 | jit_optimzied_out = jit_model_optimized(input_tensor)
61 | assertion_check(py_out=pytorch_out, jit_out=jit_optimzied_out)
62 |
63 | if jit_model_only and torch.cuda.device_count() > 0:
64 | # For inference on GPU
65 | return {"coreml": None, "jit": jit_model, "jit_optimized": None}
66 | elif jit_model_only and torch.cuda.device_count() == 0:
67 | # For inference on CPU
68 | return {"coreml": None, "jit": jit_model_optimized, "jit_optimized": None}
69 |
70 | coreml_model = ct.convert(
71 | model=jit_model,
72 | inputs=[
73 | ct.ImageType(name="input", shape=input_tensor.shape, scale=1.0 / 255.0)
74 | ],
75 | convert_to="neuralnetwork",
76 | # preprocessing_args={"scale": 1.0/255.0},
77 | # minimum_deployment_target=ct.target.iOS15,
78 | # compute_precision=ct.precision.FLOAT16
79 | )
80 |
81 | if input_pil_img is not None:
82 | out = coreml_model.predict({"input": input_pil_img})
83 |
84 | return {
85 | "coreml": coreml_model,
86 | "jit": jit_model,
87 | "jit_optimized": jit_model_optimized,
88 | }
89 |
90 |
91 | def assertion_check(
92 | py_out: Union[Tensor, Dict, Tuple], jit_out: Union[Tensor, Dict, Tuple]
93 | ) -> None:
94 | if isinstance(py_out, Dict):
95 | assert isinstance(jit_out, Dict)
96 | keys = py_out.keys()
97 | for k in keys:
98 | np.testing.assert_almost_equal(
99 | py_out[k].cpu().numpy(),
100 | jit_out[k].cpu().numpy(),
101 | decimal=3,
102 | verbose=True,
103 | )
104 | elif isinstance(py_out, Tensor):
105 | assert isinstance(jit_out, Tensor)
106 | np.testing.assert_almost_equal(
107 | py_out.cpu().numpy(), jit_out.cpu().numpy(), decimal=3, verbose=True
108 | )
109 | elif isinstance(py_out, Tuple):
110 | assert isinstance(jit_out, Tuple)
111 | for x, y in zip(py_out, jit_out):
112 | np.testing.assert_almost_equal(
113 | x.cpu().numpy(), y.cpu().numpy(), decimal=3, verbose=True
114 | )
115 |
116 | else:
117 | raise NotImplementedError(
118 | "Only Dictionary[Tensors] or Tuple[Tensors] or Tensors are supported as outputs"
119 | )
120 |
--------------------------------------------------------------------------------
/experiment/m/utils/tensor_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | import numpy as np
7 | import torch
8 | from torch import Tensor
9 | from torch import distributed as dist
10 | from typing import Union, Optional, Tuple
11 |
12 | from common import (
13 | DEFAULT_IMAGE_HEIGHT,
14 | DEFAULT_IMAGE_WIDTH,
15 | DEFAULT_IMAGE_CHANNELS,
16 | DEFAULT_VIDEO_FRAMES,
17 | )
18 | from .ddp_utils import dist_barrier
19 |
20 |
21 | def image_size_from_opts(opts) -> Tuple[int, int]:
22 | try:
23 | sampler_name = getattr(opts, "sampler.name", "variable_batch_sampler").lower()
24 | if sampler_name.find("var") > -1:
25 | im_w = getattr(opts, "sampler.vbs.crop_size_width", DEFAULT_IMAGE_WIDTH)
26 | im_h = getattr(opts, "sampler.vbs.crop_size_height", DEFAULT_IMAGE_HEIGHT)
27 | elif sampler_name.find("multi") > -1:
28 | im_w = getattr(opts, "sampler.msc.crop_size_width", DEFAULT_IMAGE_WIDTH)
29 | im_h = getattr(opts, "sampler.msc.crop_size_height", DEFAULT_IMAGE_HEIGHT)
30 | else:
31 | im_w = getattr(opts, "sampler.bs.crop_size_width", DEFAULT_IMAGE_WIDTH)
32 | im_h = getattr(opts, "sampler.bs.crop_size_height", DEFAULT_IMAGE_HEIGHT)
33 | except Exception as e:
34 | im_h = DEFAULT_IMAGE_HEIGHT
35 | im_w = DEFAULT_IMAGE_WIDTH
36 | return im_h, im_w
37 |
38 |
39 | def video_size_from_opts(opts) -> Tuple[int, int, int]:
40 | try:
41 | sampler_name = getattr(opts, "sampler.name", "video_batch_sampler").lower()
42 | if sampler_name.find("var") > -1:
43 | im_w = getattr(opts, "sampler.vbs.crop_size_width", DEFAULT_IMAGE_WIDTH)
44 | im_h = getattr(opts, "sampler.vbs.crop_size_height", DEFAULT_IMAGE_HEIGHT)
45 | n_frames = getattr(
46 | opts, "sampler.vbs.num_frames_per_clip", DEFAULT_IMAGE_HEIGHT
47 | )
48 | else:
49 | im_w = getattr(opts, "sampler.bs.crop_size_width", DEFAULT_IMAGE_WIDTH)
50 | im_h = getattr(opts, "sampler.bs.crop_size_height", DEFAULT_IMAGE_HEIGHT)
51 | n_frames = getattr(
52 | opts, "sampler.bs.num_frames_per_clip", DEFAULT_IMAGE_HEIGHT
53 | )
54 | except Exception as e:
55 | im_h = DEFAULT_IMAGE_HEIGHT
56 | im_w = DEFAULT_IMAGE_WIDTH
57 | n_frames = DEFAULT_VIDEO_FRAMES
58 | return im_h, im_w, n_frames
59 |
60 |
61 | def create_rand_tensor(
62 | opts, device: Optional[str] = "cpu", batch_size: Optional[int] = 1
63 | ) -> Tensor:
64 | sampler = getattr(opts, "sampler.name", "batch_sampler")
65 | if sampler.lower().find("video") > -1:
66 | video_stack = getattr(opts, "video_reader.frame_stack_format", "channel_first")
67 | im_h, im_w, n_frames = video_size_from_opts(opts=opts)
68 | if video_stack == "channel_first":
69 | inp_tensor = torch.randint(
70 | low=0,
71 | high=255,
72 | size=(batch_size, DEFAULT_IMAGE_CHANNELS, n_frames, im_h, im_w),
73 | device=device,
74 | )
75 | else:
76 | inp_tensor = torch.randint(
77 | low=0,
78 | high=255,
79 | size=(batch_size, n_frames, DEFAULT_IMAGE_CHANNELS, im_h, im_w),
80 | device=device,
81 | )
82 | else:
83 | im_h, im_w = image_size_from_opts(opts=opts)
84 | inp_tensor = torch.randint(
85 | low=0,
86 | high=255,
87 | size=(batch_size, DEFAULT_IMAGE_CHANNELS, im_h, im_w),
88 | device=device,
89 | )
90 | inp_tensor = inp_tensor.float().div(255.0)
91 | return inp_tensor
92 |
93 |
94 | def reduce_tensor(inp_tensor: torch.Tensor) -> torch.Tensor:
95 | size = dist.get_world_size() if dist.is_initialized() else 1
96 | inp_tensor_clone = inp_tensor.clone().detach()
97 | # dist_barrier()
98 | dist.all_reduce(inp_tensor_clone, op=dist.ReduceOp.SUM)
99 | inp_tensor_clone /= size
100 | return inp_tensor_clone
101 |
102 |
103 | def reduce_tensor_sum(inp_tensor: torch.Tensor) -> torch.Tensor:
104 | inp_tensor_clone = inp_tensor.clone().detach()
105 | # dist_barrier()
106 | dist.all_reduce(inp_tensor_clone, op=dist.ReduceOp.SUM)
107 | return inp_tensor_clone
108 |
109 |
110 | def all_gather_list(data):
111 | world_size = dist.get_world_size()
112 | data_list = [None] * world_size
113 | # dist_barrier()
114 | dist.all_gather_object(data_list, data)
115 | return data_list
116 |
117 |
118 | def tensor_to_python_float(
119 | inp_tensor: Union[int, float, torch.Tensor], is_distributed: bool
120 | ) -> Union[int, float, np.ndarray]:
121 | if is_distributed and isinstance(inp_tensor, torch.Tensor):
122 | inp_tensor = reduce_tensor(inp_tensor=inp_tensor)
123 |
124 | if isinstance(inp_tensor, torch.Tensor) and inp_tensor.numel() > 1:
125 | # For IOU, we get a C-dimensional tensor (C - number of classes)
126 | # so, we convert here to a numpy array
127 | return inp_tensor.cpu().numpy()
128 | elif hasattr(inp_tensor, "item"):
129 | return inp_tensor.item()
130 | elif isinstance(inp_tensor, (int, float)):
131 | return inp_tensor * 1.0
132 | else:
133 | raise NotImplementedError(
134 | "The data type is not supported yet in tensor_to_python_float function"
135 | )
136 |
137 |
138 | def to_numpy(img_tensor: torch.Tensor) -> np.ndarray:
139 | # [0, 1] --> [0, 255]
140 | img_tensor = torch.mul(img_tensor, 255.0)
141 | # BCHW --> BHWC
142 | img_tensor = img_tensor.permute(0, 2, 3, 1)
143 |
144 | img_np = img_tensor.byte().cpu().numpy()
145 | return img_np
146 |
--------------------------------------------------------------------------------
/experiment/m/utils/visualization_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # For licensing see accompanying LICENSE file.
3 | # Copyright (C) 2022 Apple Inc. All Rights Reserved.
4 | #
5 |
6 | from torch import Tensor
7 | import cv2
8 | import numpy as np
9 | import copy
10 | from typing import Optional, List
11 |
12 | from utils.color_map import Colormap
13 | from utils import logger
14 |
15 | FONT_SIZE = cv2.FONT_HERSHEY_PLAIN
16 | LABEL_COLOR = [255, 255, 255]
17 | TEXT_THICKNESS = 1
18 | RECT_BORDER_THICKNESS = 2
19 |
20 |
21 | def visualize_boxes_xyxy(image: np.ndarray, boxes: np.ndarray) -> np.ndarray:
22 | """Utility function to draw bounding boxes of objects on a given image"""
23 | boxes = boxes.astype(np.int)
24 |
25 | new_image = copy.deepcopy(image)
26 | for box_idx in range(boxes.shape[0]):
27 | coords = boxes[box_idx]
28 | r, g, b = 255, 0, 0
29 | # top -left corner
30 | start_coord = (coords[0], coords[1])
31 | # bottom-right corner
32 | end_coord = (coords[2], coords[3])
33 | cv2.rectangle(new_image, end_coord, start_coord, (r, g, b), thickness=1)
34 | return new_image
35 |
36 |
37 | def draw_bounding_boxes(
38 | image: np.ndarray,
39 | boxes: np.ndarray,
40 | labels: np.ndarray,
41 | scores: np.ndarray,
42 | color_map: Optional = None,
43 | object_names: Optional[List] = None,
44 | is_bgr_format: Optional[bool] = False,
45 | save_path: Optional[str] = None,
46 | ) -> None:
47 | """Utility function to draw bounding boxes of objects along with their labels and score on a given image"""
48 | boxes = boxes.astype(np.int)
49 |
50 | if is_bgr_format:
51 | # convert from BGR to RGB colorspace
52 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
53 |
54 | if color_map is None:
55 | color_map = Colormap().get_box_color_codes()
56 |
57 | for label, score, coords in zip(labels, scores, boxes):
58 | r, g, b = color_map[label]
59 | c1 = (coords[0], coords[1])
60 | c2 = (coords[2], coords[3])
61 |
62 | cv2.rectangle(image, c1, c2, (r, g, b), thickness=RECT_BORDER_THICKNESS)
63 | if object_names is not None:
64 | label_text = "{label}: {score:.2f}".format(
65 | label=object_names[label], score=score
66 | )
67 | t_size = cv2.getTextSize(label_text, FONT_SIZE, 1, TEXT_THICKNESS)[0]
68 | new_c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
69 | cv2.rectangle(image, c1, new_c2, (r, g, b), -1)
70 | cv2.putText(
71 | image,
72 | label_text,
73 | (c1[0], c1[1] + t_size[1] + 4),
74 | FONT_SIZE,
75 | 1,
76 | LABEL_COLOR,
77 | TEXT_THICKNESS,
78 | )
79 |
80 | if save_path is not None:
81 | cv2.imwrite(save_path, image)
82 | logger.log("Detection results stored at: {}".format(save_path))
83 | return image
84 |
85 |
86 | def convert_to_cityscape_format(img: Tensor) -> Tensor:
87 | """Utility to map predicted segmentation labels to cityscapes format"""
88 | img[img == 19] = 255
89 | img[img == 18] = 33
90 | img[img == 17] = 32
91 | img[img == 16] = 31
92 | img[img == 15] = 28
93 | img[img == 14] = 27
94 | img[img == 13] = 26
95 | img[img == 12] = 25
96 | img[img == 11] = 24
97 | img[img == 10] = 23
98 | img[img == 9] = 22
99 | img[img == 8] = 21
100 | img[img == 7] = 20
101 | img[img == 6] = 19
102 | img[img == 5] = 17
103 | img[img == 4] = 13
104 | img[img == 3] = 12
105 | img[img == 2] = 11
106 | img[img == 1] = 8
107 | img[img == 0] = 7
108 | img[img == 255] = 0
109 | return img
110 |
--------------------------------------------------------------------------------
/option.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from options.utils import load_config_file
3 | from cvnets import arguments_model, arguments_nn_layers
4 | from typing import Optional
5 |
6 | def get_training_arguments(parse_args=True, config_path=None):
7 | parser = argparse.ArgumentParser()
8 | parser = arguments_nn_layers(parser=parser)
9 | parser = arguments_model(parser=parser)
10 | parser.add_argument('--common.config-file', type=str, default='./../../configs/mobilevit_xs.yaml')
11 | parser.add_argument('--dataset.category', type=str, default='classification')
12 | if parse_args:
13 | if config_path:
14 | opts = parser.parse_args(['--common.config-file', config_path])
15 | else:
16 | opts = parser.parse_args()
17 | opts = load_config_file(opts)
18 | return opts
19 | else:
20 | return parser
21 |
--------------------------------------------------------------------------------
/pretrained_model/mobilevit_s.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AI-Application-and-Integration-Lab/DGUA_FAS/4a33c6f85c9fe40e647754768474281e6684bebc/pretrained_model/mobilevit_s.pt
--------------------------------------------------------------------------------
/util/dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torchvision import transforms as T
3 | from torch.utils.data import Dataset
4 | from PIL import Image
5 |
6 | class YunpeiDataset(Dataset):
7 | def __init__(self, data_pd, transforms=None, train=True):
8 | self.train = train
9 | self.photo_path = data_pd['photo_path'].tolist()
10 | self.photo_label = data_pd['photo_label'].tolist()
11 | self.photo_belong_to_video_ID = data_pd['photo_belong_to_video_ID'].tolist()
12 | if transforms is None:
13 | if not train:
14 | self.transforms = T.Compose([
15 | T.ToTensor(),
16 | T.Normalize(mean=[0.485, 0.456, 0.406],
17 | std=[0.229, 0.224, 0.225])
18 | ])
19 | else:
20 | self.transforms = T.Compose([
21 | T.RandomHorizontalFlip(),
22 | T.ToTensor(),
23 | T.Normalize(mean=[0.485, 0.456, 0.406],
24 | std=[0.229, 0.224, 0.225])
25 | ])
26 | else:
27 | self.transforms = transforms
28 |
29 | def __len__(self):
30 | return len(self.photo_path)
31 |
32 | def __getitem__(self, item):
33 | if self.train:
34 | img_path = self.photo_path[item]
35 | label = self.photo_label[item]
36 | img = Image.open(img_path).resize((256, 256))
37 | img = self.transforms(img)
38 | return img, label
39 | else:
40 | img_path = self.photo_path[item]
41 | label = self.photo_label[item]
42 | videoID = self.photo_belong_to_video_ID[item]
43 | img = Image.open(img_path).resize((256, 256))
44 | img = self.transforms(img)
45 | return img, label, videoID
46 |
--------------------------------------------------------------------------------
/util/evaluate.py:
--------------------------------------------------------------------------------
1 | from util.utils import AverageMeter, accuracy
2 | from util.statistic import get_EER_states, get_HTER_at_thr, calculate, calculate_threshold
3 | from sklearn.metrics import roc_auc_score
4 | from torch.autograd import Variable
5 | import torch
6 | import torch.nn as nn
7 | from torch.nn import functional as F
8 | import numpy as np
9 |
10 | def eval(valid_dataloader, model):
11 | criterion = nn.CrossEntropyLoss()
12 | valid_losses = AverageMeter()
13 | valid_top1 = AverageMeter()
14 | prob_dict = {}
15 | label_dict = {}
16 | model.eval()
17 | output_dict_tmp = {}
18 | target_dict_tmp = {}
19 |
20 | with torch.no_grad():
21 | for iter, (input, target, videoID) in enumerate(valid_dataloader):
22 | input = Variable(input).cuda()
23 | target = Variable(torch.from_numpy(np.array(target)).long()).cuda()
24 | cls_out = model(input)
25 | prob = F.softmax(cls_out, dim=1).cpu().data.numpy()[:, 1]
26 | label = target.cpu().data.numpy()
27 | videoID = videoID.cpu().data.numpy()
28 | for i in range(len(prob)):
29 | if(videoID[i] in prob_dict.keys()):
30 | prob_dict[videoID[i]].append(prob[i])
31 | label_dict[videoID[i]].append(label[i])
32 | output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))
33 | target_dict_tmp[videoID[i]].append(target[i].view(1))
34 | else:
35 | prob_dict[videoID[i]] = []
36 | label_dict[videoID[i]] = []
37 | prob_dict[videoID[i]].append(prob[i])
38 | label_dict[videoID[i]].append(label[i])
39 | output_dict_tmp[videoID[i]] = []
40 | target_dict_tmp[videoID[i]] = []
41 | output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))
42 | target_dict_tmp[videoID[i]].append(target[i].view(1))
43 | prob_list = []
44 | label_list = []
45 | na = []
46 | for key in prob_dict.keys():
47 | avg_single_video_prob = sum(prob_dict[key]) / len(prob_dict[key])
48 | avg_single_video_label = sum(label_dict[key]) / len(label_dict[key])
49 | prob_list = np.append(prob_list, avg_single_video_prob)
50 | label_list = np.append(label_list, avg_single_video_label)
51 | # compute loss and acc for every video
52 | avg_single_video_output = sum(output_dict_tmp[key]) / len(output_dict_tmp[key])
53 | avg_single_video_target = (sum(target_dict_tmp[key]) / len(target_dict_tmp[key])).long()
54 | loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0))
55 | acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,))
56 | # loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0))
57 | # acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,))
58 | valid_losses.update(loss.item())
59 | valid_top1.update(acc_valid[0])
60 |
61 | if avg_single_video_label == 2:
62 | na += [avg_single_video_prob]
63 |
64 | label_list = np.where(np.array(label_list) == 1, np.ones_like(label_list), np.zeros_like(label_list))
65 | auc_score = roc_auc_score(label_list, prob_list)
66 | cur_EER_valid, threshold, _, _ = get_EER_states(prob_list, label_list)
67 | ACC_threshold = calculate_threshold(prob_list, label_list, threshold)
68 | cur_HTER_valid = get_HTER_at_thr(prob_list, label_list, threshold)
69 |
70 | na_acc = torch.mean((torch.tensor(na) < threshold).type(torch.float)).item()
71 | return [valid_losses.avg, valid_top1.avg, cur_EER_valid, cur_HTER_valid, auc_score, threshold, ACC_threshold*100, na_acc]
72 |
73 |
--------------------------------------------------------------------------------
/util/get_loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import numpy as np
4 | import pandas as pd
5 | import torch
6 | from sklearn.model_selection import train_test_split
7 | from torch.utils.data import DataLoader
8 | from util.dataset import YunpeiDataset
9 | from util.utils import sample_frames
10 |
11 | def get_dataset(src1_data, src1_train_num_frames, src2_data, src2_train_num_frames, src3_data, src3_train_num_frames,
12 | tgt1_data, tgt_test_num_frames, batch_size):
13 | print('Load Source Data')
14 | print('Source Data: ', src1_data)
15 | src1_train_data_fake = sample_frames(flag=0, num_frames=src1_train_num_frames, dataset_name=src1_data)
16 | src1_train_data_real = sample_frames(flag=1, num_frames=src1_train_num_frames, dataset_name=src1_data)
17 | print('Source Data: ', src2_data)
18 | src2_train_data_fake = sample_frames(flag=0, num_frames=src2_train_num_frames, dataset_name=src2_data)
19 | src2_train_data_real = sample_frames(flag=1, num_frames=src2_train_num_frames, dataset_name=src2_data)
20 | print('Source Data: ', src3_data)
21 | src3_train_data_fake = sample_frames(flag=0, num_frames=src3_train_num_frames, dataset_name=src3_data)
22 | src3_train_data_real = sample_frames(flag=1, num_frames=src3_train_num_frames, dataset_name=src3_data)
23 |
24 |
25 | print('Load Target Data')
26 | print('Target Data: ', tgt1_data)
27 | tgt_test_data = sample_frames(flag=2, num_frames=tgt_test_num_frames, dataset_name=tgt1_data)
28 |
29 |
30 | src1_train_dataloader_fake = DataLoader(YunpeiDataset(src1_train_data_fake, train=True),
31 | batch_size=batch_size, shuffle=True)
32 | src1_train_dataloader_real = DataLoader(YunpeiDataset(src1_train_data_real, train=True),
33 | batch_size=batch_size, shuffle=True)
34 | src2_train_dataloader_fake = DataLoader(YunpeiDataset(src2_train_data_fake, train=True),
35 | batch_size=batch_size, shuffle=True)
36 | src2_train_dataloader_real = DataLoader(YunpeiDataset(src2_train_data_real, train=True),
37 | batch_size=batch_size, shuffle=True)
38 | src3_train_dataloader_fake = DataLoader(YunpeiDataset(src3_train_data_fake, train=True),
39 | batch_size=batch_size, shuffle=True)
40 | src3_train_dataloader_real = DataLoader(YunpeiDataset(src3_train_data_real, train=True),
41 | batch_size=batch_size, shuffle=True)
42 |
43 |
44 | tgt_dataloader = DataLoader(YunpeiDataset(tgt_test_data, train=False), batch_size=batch_size, shuffle=False)
45 | return src1_train_dataloader_fake, src1_train_dataloader_real, \
46 | src2_train_dataloader_fake, src2_train_dataloader_real, \
47 | src3_train_dataloader_fake, src3_train_dataloader_real, \
48 | tgt_dataloader
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/util/statistic.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 |
4 | def eval_state(probs, labels, thr):
5 | predict = probs >= thr
6 | TN = np.sum((labels == 0) & (predict == False))
7 | FN = np.sum((labels == 1) & (predict == False))
8 | FP = np.sum((labels == 0) & (predict == True))
9 | TP = np.sum((labels == 1) & (predict == True))
10 | return TN, FN, FP, TP
11 |
12 | def calculate(probs, labels):
13 | TN, FN, FP, TP = eval_state(probs, labels, 0.5)
14 | APCER = 1.0 if (FP + TN == 0) else FP / float(FP + TN)
15 | NPCER = 1.0 if (FN + TP == 0) else FN / float(FN + TP)
16 | ACER = (APCER + NPCER) / 2.0
17 | ACC = (TP + TN) / labels.shape[0]
18 | return APCER, NPCER, ACER, ACC
19 |
20 | def calculate_threshold(probs, labels, threshold):
21 | TN, FN, FP, TP = eval_state(probs, labels, threshold)
22 | ACC = (TP + TN) / labels.shape[0]
23 | return ACC
24 |
25 | def get_threshold(probs, grid_density):
26 | Min, Max = min(probs), max(probs)
27 | thresholds = []
28 | for i in range(grid_density + 1):
29 | thresholds.append(0.0 + i * 1.0 / float(grid_density))
30 | thresholds.append(1.1)
31 | return thresholds
32 |
33 | def get_EER_states(probs, labels, grid_density = 10000):
34 | thresholds = get_threshold(probs, grid_density)
35 | min_dist = 1.0
36 | min_dist_states = []
37 | FRR_list = []
38 | FAR_list = []
39 | for thr in thresholds:
40 | TN, FN, FP, TP = eval_state(probs, labels, thr)
41 | if(FN + TP == 0):
42 | FRR = TPR = 1.0
43 | FAR = FP / float(FP + TN)
44 | TNR = TN / float(TN + FP)
45 | elif(FP + TN == 0):
46 | TNR = FAR = 1.0
47 | FRR = FN / float(FN + TP)
48 | TPR = TP / float(TP + FN)
49 | else:
50 | FAR = FP / float(FP + TN)
51 | FRR = FN / float(FN + TP)
52 | TNR = TN / float(TN + FP)
53 | TPR = TP / float(TP + FN)
54 | dist = math.fabs(FRR - FAR)
55 | FAR_list.append(FAR)
56 | FRR_list.append(FRR)
57 | if dist <= min_dist:
58 | min_dist = dist
59 | min_dist_states = [FAR, FRR, thr]
60 | EER = (min_dist_states[0] + min_dist_states[1]) / 2.0
61 | thr = min_dist_states[2]
62 | return EER, thr, FRR_list, FAR_list
63 |
64 | def get_HTER_at_thr(probs, labels, thr):
65 | TN, FN, FP, TP = eval_state(probs, labels, thr)
66 | if (FN + TP == 0):
67 | FRR = 1.0
68 | FAR = FP / float(FP + TN)
69 | elif(FP + TN == 0):
70 | FAR = 1.0
71 | FRR = FN / float(FN + TP)
72 | else:
73 | FAR = FP / float(FP + TN)
74 | FRR = FN / float(FN + TP)
75 | HTER = (FAR + FRR) / 2.0
76 | return HTER
77 |
--------------------------------------------------------------------------------
/util/utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import math
3 | import pandas as pd
4 | import torch
5 | import os
6 | import sys
7 | import shutil
8 |
9 | def adjust_learning_rate(optimizer, epoch, init_param_lr, lr_epoch_1, lr_epoch_2):
10 | i = 0
11 | for param_group in optimizer.param_groups:
12 | init_lr = init_param_lr[i]
13 | i += 1
14 | if(epoch <= lr_epoch_1):
15 | param_group['lr'] = init_lr * 0.1 ** 0
16 | elif(epoch <= lr_epoch_2):
17 | param_group['lr'] = init_lr * 0.1 ** 1
18 | else:
19 | param_group['lr'] = init_lr * 0.1 ** 2
20 |
21 | import matplotlib.pyplot as plt
22 | def draw_roc(frr_list, far_list, roc_auc):
23 | plt.switch_backend('agg')
24 | plt.rcParams['figure.figsize'] = (6.0, 6.0)
25 | plt.title('ROC')
26 | for i, d in enumerate(frr_list):
27 | frr_list[i] = 1.0 - d
28 | plt.plot(far_list, frr_list, 'b', label='AUC = %0.4f' % roc_auc)
29 | plt.legend(loc='upper right')
30 | plt.plot([0, 1], [0, 1], 'r--')
31 | plt.grid(ls='--')
32 | plt.ylabel('True Positive Rate')
33 | plt.xlabel('False Positive Rate')
34 | save_dir = './save_results/ROC/'
35 | if not os.path.exists(save_dir):
36 | os.makedirs(save_dir)
37 | plt.savefig('./save_results/ROC/ROC.png')
38 | file = open('./save_results/ROC/FAR_FRR.txt', 'w')
39 | save_json = []
40 | dict = {}
41 | dict['FAR'] = far_list
42 | dict['FRR'] = frr_list
43 | save_json.append(dict)
44 | json.dump(save_json, file, indent=4)
45 |
46 | def sample_frames(flag, num_frames, dataset_name):
47 | '''
48 | from every video (frames) to sample num_frames to test
49 | return: the choosen frames' path and label
50 | '''
51 | # The process is a litter cumbersome, you can change to your way for convenience
52 | root_path = '../../data_label/' + dataset_name
53 | if(flag == 0): # select the fake images
54 | label_path = root_path + '/fake_label.json'
55 | save_label_path = root_path + '/choose_fake_label.json'
56 | elif(flag == 1): # select the real images
57 | label_path = root_path + '/real_label.json'
58 | save_label_path = root_path + '/choose_real_label.json'
59 | else: # select all the real and fake images
60 | label_path = root_path + '/all_label.json'
61 | save_label_path = root_path + '/choose_all_label.json'
62 |
63 | all_label_json = json.load(open(label_path, 'r'))
64 | f_sample = open(save_label_path, 'w')
65 | length = len(all_label_json)
66 | # three componets: frame_prefix, frame_num, png
67 | saved_frame_prefix = '/'.join(all_label_json[0]['photo_path'].split('/')[:-1])
68 | final_json = []
69 | video_number = 0
70 | single_video_frame_list = []
71 | single_video_frame_num = 0
72 | single_video_label = 0
73 | for i in range(length):
74 | photo_path = all_label_json[i]['photo_path']
75 | photo_label = all_label_json[i]['photo_label']
76 | frame_prefix = '/'.join(photo_path.split('/')[:-1])
77 | # the last frame
78 | if (i == length - 1):
79 | photo_frame = int(photo_path.split('/')[-1].split('.')[0])
80 | single_video_frame_list.append(photo_frame)
81 | single_video_frame_num += 1
82 | single_video_label = photo_label
83 | # a new video, so process the saved one
84 | if (frame_prefix != saved_frame_prefix or i == length - 1):
85 | # [1, 2, 3, 4,.....]
86 | single_video_frame_list.sort()
87 | frame_interval = math.floor(single_video_frame_num / num_frames)
88 | for j in range(num_frames):
89 | dict = {}
90 | # dict['photo_path'] = saved_frame_prefix + '/' + str(
91 | # single_video_frame_list[6 + j * frame_interval]) + '.png'
92 | if dataset_name not in {'cefa', 'wmca_train', 'wmca_test'}:
93 | dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[ j * frame_interval]):03d}' + '.png'
94 | elif dataset_name == 'cefa':
95 | print(single_video_frame_list)
96 | print(saved_frame_prefix)
97 | dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[6 + j * frame_interval]):04d}' + '.jpg'
98 | else:
99 | dict['photo_path'] = saved_frame_prefix + '/' + f'{(single_video_frame_list[j * frame_interval]):03d}' + '.jpg'
100 | dict['photo_label'] = single_video_label
101 | dict['photo_belong_to_video_ID'] = video_number
102 | final_json.append(dict)
103 | video_number += 1
104 | saved_frame_prefix = frame_prefix
105 | single_video_frame_list.clear()
106 | single_video_frame_num = 0
107 | # get every frame information
108 | photo_frame = int(photo_path.split('/')[-1].split('.')[0])
109 | single_video_frame_list.append(photo_frame)
110 | single_video_frame_num += 1
111 | single_video_label = photo_label
112 | if(flag == 0):
113 | print("Total video number(fake): ", video_number, dataset_name)
114 | elif(flag == 1):
115 | print("Total video number(real): ", video_number, dataset_name)
116 | else:
117 | print("Total video number(target): ", video_number, dataset_name)
118 | json.dump(final_json, f_sample, indent=4)
119 | f_sample.close()
120 |
121 | f_json = open(save_label_path)
122 | sample_data_pd = pd.read_json(f_json)
123 | return sample_data_pd
124 |
125 | class AverageMeter(object):
126 | """Computes and stores the average and current value"""
127 | def __init__(self):
128 | self.reset()
129 |
130 | def reset(self):
131 | self.val = 0
132 | self.avg = 0
133 | self.sum = 0
134 | self.count = 0
135 |
136 | def update(self, val, n=1):
137 | self.val = val
138 | self.sum += val * n
139 | self.count += n
140 | self.avg = self.sum / self.count
141 |
142 | def accuracy(output, target, topk=(1,)):
143 | """Computes the accuracy over the k top predictions for the specified values of k"""
144 | with torch.no_grad():
145 | maxk = max(topk)
146 | batch_size = target.size(0)
147 |
148 | _, pred = output.topk(maxk, 1, True, True)
149 | pred = pred.t()
150 | correct = pred.eq(target.view(1, -1).expand_as(pred))
151 |
152 | res = []
153 | for k in topk:
154 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
155 | res.append(correct_k.mul_(100.0 / batch_size))
156 | return res
157 |
158 | def mkdirs(checkpoint_path, best_model_path, logs):
159 | if not os.path.exists(checkpoint_path):
160 | os.makedirs(checkpoint_path)
161 | if not os.path.exists(best_model_path):
162 | os.makedirs(best_model_path)
163 | if not os.path.exists(logs):
164 | os.mkdir(logs)
165 |
166 | def time_to_str(t, mode='min'):
167 | if mode=='min':
168 | t = int(t)/60
169 | hr = t//60
170 | min = t%60
171 | return '%2d hr %02d min'%(hr,min)
172 | elif mode=='sec':
173 | t = int(t)
174 | min = t//60
175 | sec = t%60
176 | return '%2d min %02d sec'%(min,sec)
177 | else:
178 | raise NotImplementedError
179 |
180 | class Logger(object):
181 | def __init__(self):
182 | self.terminal = sys.stdout
183 | self.file = None
184 |
185 | def open(self, file, mode=None):
186 | if mode is None:
187 | mode = 'w'
188 | self.file = open(file, mode)
189 | def write(self, message, is_terminal=1, is_file=1):
190 | if '\r' in message:
191 | is_file = 0
192 | if is_terminal == 1:
193 | self.terminal.write(message)
194 | self.terminal.flush()
195 | if is_file == 1:
196 | self.file.write(message)
197 | self.file.flush()
198 |
199 | def flush(self):
200 | # this flush method is needed for python 3 compatibility.
201 | # this handles the flush command by doing nothing.
202 | # you might want to specify some extra behavior here.
203 | pass
204 |
205 | def save_checkpoint(save_list, is_best, model, gpus, checkpoint_path, best_model_path, filename='_checkpoint.pth.tar'):
206 | epoch = save_list[0]
207 | valid_args = save_list[1]
208 | best_model_HTER = round(save_list[2], 5)
209 | best_model_ACC = save_list[3]
210 | best_model_ACER = save_list[4]
211 | threshold = save_list[5]
212 | if(len(gpus) > 1):
213 | old_state_dict = model.state_dict()
214 | from collections import OrderedDict
215 | new_state_dict = OrderedDict()
216 | for k, v in old_state_dict.items():
217 | flag = k.find('.module.')
218 | if (flag != -1):
219 | k = k.replace('.module.', '.')
220 | new_state_dict[k] = v
221 | state = {
222 | "epoch": epoch,
223 | "state_dict": new_state_dict,
224 | "valid_arg": valid_args,
225 | "best_model_EER": best_model_HTER,
226 | "best_model_ACER": best_model_ACER,
227 | "best_model_ACC": best_model_ACC,
228 | "threshold": threshold
229 | }
230 | else:
231 | state = {
232 | "epoch": epoch,
233 | "state_dict": model.state_dict(),
234 | "valid_arg": valid_args,
235 | "best_model_EER": best_model_HTER,
236 | "best_model_ACER": best_model_ACER,
237 | "best_model_ACC": best_model_ACC,
238 | "threshold": threshold
239 | }
240 | filepath = checkpoint_path + filename
241 | torch.save(state, filepath)
242 | # just save best model
243 | if is_best:
244 | # shutil.copy(filepath, best_model_path + 'model_best_' + str(best_model_HTER) + '_' + str(epoch) + '.pth.tar')
245 | shutil.copy(filepath, best_model_path + 'best_model.pth.tar')
246 |
247 | def zero_param_grad(params):
248 | for p in params:
249 | if p.grad is not None:
250 | p.grad.zero_()
251 |
--------------------------------------------------------------------------------