├── .gitignore ├── README.md ├── figures └── framework.jpg ├── methods ├── TAST │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── Pipfile │ ├── Pipfile.lock │ ├── README.md │ ├── SECURITY.md │ ├── docker │ │ └── Dockerfile │ ├── domainbed │ │ ├── __init__.py │ │ ├── adapt_algorithms.py │ │ ├── algorithms.py │ │ ├── command_launchers.py │ │ ├── datasets.py │ │ ├── hparams_registry.py │ │ ├── lib │ │ │ ├── big_transfer.py │ │ │ ├── fast_data_loader.py │ │ │ ├── misc.py │ │ │ ├── mlp_mixer.py │ │ │ ├── query.py │ │ │ ├── reporting.py │ │ │ ├── vision_transformer.py │ │ │ └── wide_resnet.py │ │ ├── misc │ │ │ ├── domain_net_duplicates.txt │ │ │ ├── test_sweep_data │ │ │ │ ├── 0657090f9a83ff76efe083a104fde93a │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 06db52bd7fcbb8172f97f11a62015261 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 07ea1841921ad29c18ae52563b274925 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 0c53bbff83d887850721788187907586 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 0ec227d205744455c681614d9f55d841 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 0fe0ed57077c0c9291931a388ba21be2 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 1b0678ef843d122c17404ab8bd138523 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 1b424e4ac8bc11c9d3f36b1729e19547 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 24c1684361b7442877526ab118da7117 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 24cf797be205aaef612b14beefc4c1a3 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 2b696be39395e8830222b505f6aa45d8 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 2dd075c39b257eb019b4a8d813525113 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 3539ff8139b8f1797865a2f26e51c70f │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 371b3e2afe1e7a754e49b2324bf159b6 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 41b0ac2ee570d8ace449c34ada3fdd01 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 4a18a8be66b762f1ad5f45408bc27c78 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 4ccfd57ae38cfc8fd5fba4293614ab26 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 539c70bc47514b76736c480df7036b8b │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 63837f74bf4ac60044c74aa87114b386 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 66006bc6faa9f96db95a5bcfc3e4340a │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 66779ee52d1111eddfcc6dafa8ae983c │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 691f8b51c9f69b380113a6a2645392bb │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 6d481a40ca86768fad6a5088cb58458e │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 708942ac219532c45db7898ef9cfb955 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 728347e87d1c533379956bf94dca6fef │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 7a6119601f2d7f4ce36e0d5d478332dd │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 85964cf17f520330ea56101aed9602e5 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 86394db2b6c2ecd1e3b08e99e14759f2 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 8cfbf830754065d02f9723c57abc992e │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 90961e3a45300a2d4771fc090627166e │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── 9f1d308cb3d13c7358eefd027ba1de04 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── bf09cd8e443d5445cc15b7503c14264d │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── bfce2823ee1c49ab624fde5c5e2c1143 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── c62625063d3aee2f08e5c908e7677e83 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── ca571be94ad9fdb0c2bece0061ff3f89 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── cf42c3176baf91b96bb7dd0ff3c686cc │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── d093618124c5748762707da1c6804d75 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── ea7d2d5149dd9167b364d433bb355be1 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── ee8f05db2b9ae5a36273cc0d2161f8c0 │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ ├── f61766414e6b0db40063d7bc4ecdaa2b │ │ │ │ │ ├── done │ │ │ │ │ ├── err.txt │ │ │ │ │ ├── out.txt │ │ │ │ │ └── results.jsonl │ │ │ │ └── results.txt │ │ │ ├── test_sweep_results.txt │ │ │ └── vlcs_files.txt │ │ ├── model_selection.py │ │ ├── modeling │ │ │ ├── losses.py │ │ │ ├── mixstyle.py │ │ │ ├── model_manager.py │ │ │ ├── nets.py │ │ │ ├── resnet.py │ │ │ └── tent.py │ │ ├── networks.py │ │ ├── results │ │ │ └── 2020_10_06_7df6f06 │ │ │ │ ├── results.png │ │ │ │ └── results.tex │ │ ├── scripts │ │ │ ├── collect_results.py │ │ │ ├── download.py │ │ │ ├── list_top_hparams.py │ │ │ ├── save_images.py │ │ │ ├── supervised_adaptation.py │ │ │ ├── sweep.py │ │ │ ├── train.py │ │ │ ├── unsup_adapt.py │ │ │ └── unsupervised_adaptation.py │ │ └── test │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── lib │ │ │ ├── __init__.py │ │ │ ├── test_misc.py │ │ │ └── test_query.py │ │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── test_collect_results.py │ │ │ ├── test_sweep.py │ │ │ └── test_train.py │ │ │ ├── test_datasets.py │ │ │ ├── test_hparams_registry.py │ │ │ ├── test_model_selection.py │ │ │ ├── test_models.py │ │ │ └── test_networks.py │ ├── scripts │ │ ├── hparam_search.sh │ │ ├── launch.sh │ │ ├── source.sh │ │ ├── sup.sh │ │ └── unsup.sh │ └── test_log │ │ ├── done │ │ ├── done_T3A │ │ ├── out.txt │ │ ├── out_T3A.txt │ │ ├── results.jsonl │ │ └── results_T3A.jsonl └── TSD │ ├── LICENSE │ ├── README.md │ └── code │ ├── adapt_algorithm.py │ ├── alg │ ├── alg.py │ ├── algs │ │ ├── ANDMask.py │ │ ├── CORAL.py │ │ ├── DANN.py │ │ ├── ERM.py │ │ ├── GDRNet.py │ │ ├── GroupDRO.py │ │ ├── MLDG.py │ │ ├── MMD.py │ │ ├── Mixup.py │ │ ├── RSC.py │ │ ├── VREx.py │ │ └── base.py │ ├── modelopera.py │ └── opt.py │ ├── datautil │ ├── getdataloader.py │ ├── imgdata │ │ ├── imgdataload.py │ │ └── util.py │ ├── mydataloader.py │ └── util.py │ ├── modeling │ ├── losses.py │ ├── mixstyle.py │ ├── model_manager.py │ ├── nets.py │ ├── resnet.py │ └── tent.py │ ├── network │ ├── Adver_network.py │ ├── common_network.py │ ├── img_network.py │ └── util.py │ ├── train.py │ ├── tta_output │ ├── err.txt │ └── out.txt │ ├── unsupervise_adapt.py │ └── utils │ └── util.py └── run ├── table_1_origin_DG_MFIDDR_DRTiD.sh ├── table_1_target_DG_DRTiD.sh ├── table_1_target_DG_MFIDDR.sh ├── table_1_target_DG_MFIDDR_quarter.sh ├── table_1_target_DG_MFIDDR_quarter_patient_level.sh ├── table_2_tta_compare_TENT_Adacontrast_ours.sh ├── table_3_ablation_study.sh ├── table_4.sh ├── table_s1_target_ESDG_DRTiD.sh └── table_s1_target_ESDG_MFIDDR.sh /.gitignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | **/*.cpython-311* 3 | **/*.pyc 4 | **/*.tar 5 | **/*.log 6 | **/wandb/** 7 | trained_models/Result_1view/* 8 | trained_models/Result_2view/* 9 | trained_models/Result_4view/* 10 | retigen/output 11 | run/output 12 | .gitignore -------------------------------------------------------------------------------- /figures/framework.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/figures/framework.jpg -------------------------------------------------------------------------------- /methods/TAST/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Facebook has adopted a Code of Conduct that we expect project participants to adhere to. 4 | Please read the [full text](https://code.fb.com/codeofconduct/) 5 | so that you can understand what actions will and will not be tolerated. 6 | -------------------------------------------------------------------------------- /methods/TAST/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to `DomainBed` 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Pull Requests 6 | We actively welcome your pull requests. 7 | 8 | 1. Fork the repo and create your branch from `master`. 9 | 2. If you've added code that should be tested, add tests. 10 | 3. If you've changed APIs, update the documentation. 11 | 4. Ensure the test suite passes. 12 | 5. Make sure your code lints. 13 | 6. If you haven't already, complete the Contributor License Agreement ("CLA"). 14 | 15 | ## Contributor License Agreement ("CLA") 16 | In order to accept your pull request, we need you to submit a CLA. You only need 17 | to do this once to work on any of Facebook's open source projects. 18 | 19 | Complete your CLA here: 20 | 21 | ## Issues 22 | We use GitHub issues to track public bugs. Please ensure your description is 23 | clear and has sufficient instructions to be able to reproduce the issue. 24 | 25 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe 26 | disclosure of security bugs. In those cases, please go through the process 27 | outlined on that page and do not file a public issue. 28 | 29 | ## License 30 | By contributing to `DomainBed`, you agree that your contributions 31 | will be licensed under the LICENSE file in the root directory of this source 32 | tree. 33 | -------------------------------------------------------------------------------- /methods/TAST/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Facebook, Inc. and its affiliates. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /methods/TAST/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | torch = "*" 8 | torchvision = "*" 9 | torchaudio = "*" 10 | tqdm = "*" 11 | einops = "*" 12 | python-dotenv = "*" 13 | rarfile = "*" 14 | scipy = "*" 15 | jupyterlab = "*" 16 | wilds = "*" 17 | gdown = "*" 18 | wget = "*" 19 | timm = "*" 20 | 21 | [dev-packages] 22 | pipenv = "*" 23 | flake8 = "*" 24 | autopep8 = "*" 25 | wandb = "*" 26 | jupyter = "*" 27 | 28 | [requires] 29 | python_version = "3.7" 30 | -------------------------------------------------------------------------------- /methods/TAST/README.md: -------------------------------------------------------------------------------- 1 | ### TAST ### 2 | This code is for the paper "Test-time Adaptation via Self-training with Nearest Neighbor information (TAST)", accetped to ICLR'23. 3 | We use the publicly released code "https://github.com/matsuolab/T3A". 4 | You can follow the descriptions about installation and experiments in "https://github.com/matsuolab/T3A". 5 | 6 | ### Dependencies ### 7 | Python 3.7.11 8 | PyTorch 1.8.0 9 | Torchvision 0.9.0 10 | CUDA 10.2 11 | CUDNN 7605 12 | NumPy 1.2 13 | PIL 8.4.0 14 | 15 | ### Data ### 16 | You can download the domain generalization benchmarks, namely VLCS, PACS, OfficeHome, and TerraIncognita by following the procedure. 17 | e.g.) python -m domainbed.scripts.download --data_dir=/my/datasets/path --dataset pacs 18 | 19 | You can change pacs to vlcs, office_home, terra_incognita to download other datasets. 20 | 21 | ### Train ### 22 | You can train a model on training domains. 23 | e.g.) python -m domainbed.scripts.train\ 24 | --data_dir /my/datasets/path\ 25 | --output_dir /my/pretrain/path\ 26 | --algorithm ERM\ 27 | --dataset PACS\ 28 | --hparams "{\"backbone\": \"resnet18-BN\"}" 29 | 30 | You can use backbone networks such as resnet50-BN, resnet50 which are presented in the train.py file. 31 | The trained network and information about the training are recorded in "/my/pretrain/path" 32 | 33 | ### Test-time adaptation ### 34 | While testing, we adapt trained classifiers. 35 | e.g.) python -m domainbed.scripts.unsupervised_adaptation\ 36 | --input_dir=/my/pretrain/path\ 37 | --adapt_algorithm=TAST 38 | 39 | You can use the test-time adaptation algorithms such as T3A, TAST, and TAST_bn which are presented in the adapt_algorithms.py file. 40 | 41 | Then, the test reulsts will be recorded in "/my/pretrain/path/out_TAST.txt" and "/my/pretrain/path/results_TAST.jsonl" 42 | -------------------------------------------------------------------------------- /methods/TAST/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Use this section to tell people about which versions of your project are 6 | currently being supported with security updates. 7 | 8 | | Version | Supported | 9 | | ------- | ------------------ | 10 | | 5.1.x | :white_check_mark: | 11 | | 5.0.x | :x: | 12 | | 4.0.x | :white_check_mark: | 13 | | < 4.0 | :x: | 14 | 15 | ## Reporting a Vulnerability 16 | 17 | Use this section to tell people how to report a vulnerability. 18 | 19 | Tell them where to go, how often they can expect to get an update on a 20 | reported vulnerability, what to expect if the vulnerability is accepted or 21 | declined, etc. 22 | -------------------------------------------------------------------------------- /methods/TAST/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # [1] base setting 2 | FROM nvidia/cuda:10.2-cudnn7-devel 3 | 4 | ENV DEBIAN_FRONTEND=noninteractive 5 | ENV HOME /root 6 | 7 | 8 | # [2] zsh 9 | RUN apt-get update && apt-get -y upgrade && \ 10 | apt-get install -y \ 11 | wget \ 12 | curl \ 13 | git \ 14 | vim-athena \ 15 | zsh \ 16 | tmux \ 17 | unzip --no-install-recommends 18 | 19 | SHELL ["/bin/zsh", "-c"] 20 | RUN wget http://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | zsh 21 | 22 | 23 | # [3] pyenv 24 | 25 | RUN apt-get update && \ 26 | apt-get install -y \ 27 | make \ 28 | build-essential \ 29 | libssl-dev \ 30 | zlib1g-dev \ 31 | libbz2-dev \ 32 | libreadline-dev \ 33 | libsqlite3-dev \ 34 | llvm \ 35 | libncurses5-dev \ 36 | libncursesw5-dev \ 37 | xz-utils \ 38 | tk-dev \ 39 | libffi-dev \ 40 | liblzma-dev \ 41 | python-openssl --no-install-recommends 42 | 43 | RUN curl https://pyenv.run | zsh && \ 44 | echo '' >> $HOME/.zshrc && \ 45 | echo 'export PATH="$HOME/.pyenv/bin:$PATH"' >> $HOME/.zshrc && \ 46 | echo 'eval "$(pyenv init -)"' >> $HOME/.zshrc && \ 47 | echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.zshrc 48 | 49 | ENV PYENV_ROOT $HOME/.pyenv 50 | ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH 51 | 52 | RUN pyenv install 3.7.4 && \ 53 | pyenv global 3.7.4 && \ 54 | pyenv rehash 55 | 56 | 57 | # [4] python 58 | RUN apt-get update && apt-get install -y ffmpeg nodejs npm 59 | RUN pip install pipenv 60 | 61 | 62 | CMD ["/bin/zsh"] 63 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/command_launchers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | """ 4 | A command launcher launches a list of commands on a cluster; implement your own 5 | launcher to add support for your cluster. We've provided an example launcher 6 | which runs all commands serially on the local machine. 7 | """ 8 | 9 | import subprocess 10 | import time 11 | import torch 12 | 13 | def local_launcher(commands): 14 | """Launch commands serially on the local machine.""" 15 | for cmd in commands: 16 | subprocess.call(cmd, shell=True) 17 | 18 | def dummy_launcher(commands): 19 | """ 20 | Doesn't run anything; instead, prints each command. 21 | Useful for testing. 22 | """ 23 | for cmd in commands: 24 | print(f'Dummy launcher: {cmd}') 25 | 26 | def multi_gpu_launcher(commands): 27 | """ 28 | Launch commands on the local machine, using all GPUs in parallel. 29 | """ 30 | print('WARNING: using experimental multi_gpu_launcher.') 31 | n_gpus = torch.cuda.device_count() 32 | procs_by_gpu = [None]*n_gpus 33 | 34 | while len(commands) > 0: 35 | for gpu_idx in range(n_gpus): 36 | proc = procs_by_gpu[gpu_idx] 37 | if (proc is None) or (proc.poll() is not None): 38 | # Nothing is running on this GPU; launch a command. 39 | cmd = commands.pop(0) 40 | new_proc = subprocess.Popen( 41 | f'CUDA_VISIBLE_DEVICES={gpu_idx} {cmd}', shell=True) 42 | procs_by_gpu[gpu_idx] = new_proc 43 | break 44 | time.sleep(1) 45 | 46 | # Wait for the last few tasks to finish before returning 47 | for p in procs_by_gpu: 48 | if p is not None: 49 | p.wait() 50 | 51 | REGISTRY = { 52 | 'local': local_launcher, 53 | 'dummy': dummy_launcher, 54 | 'multi_gpu': multi_gpu_launcher 55 | } 56 | 57 | try: 58 | from domainbed import facebook 59 | facebook.register_command_launchers(REGISTRY) 60 | except ImportError: 61 | pass 62 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/lib/fast_data_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import torch 4 | 5 | class _InfiniteSampler(torch.utils.data.Sampler): 6 | """Wraps another Sampler to yield an infinite stream.""" 7 | def __init__(self, sampler): 8 | self.sampler = sampler 9 | 10 | def __iter__(self): 11 | while True: 12 | for batch in self.sampler: 13 | yield batch 14 | 15 | class InfiniteDataLoader: 16 | def __init__(self, dataset, weights, batch_size, num_workers): 17 | super().__init__() 18 | 19 | if weights is None: 20 | sampler = torch.utils.data.RandomSampler(dataset, 21 | replacement=True) 22 | else: 23 | sampler = torch.utils.data.WeightedRandomSampler(weights, 24 | replacement=True, 25 | num_samples=batch_size) 26 | 27 | # if weights is None: 28 | # weights = torch.ones(len(dataset)) 29 | 30 | batch_sampler = torch.utils.data.BatchSampler( 31 | sampler, 32 | batch_size=batch_size, 33 | drop_last=True) 34 | 35 | self._infinite_iterator = iter(torch.utils.data.DataLoader( 36 | dataset, 37 | num_workers=num_workers, 38 | batch_sampler=_InfiniteSampler(batch_sampler) 39 | )) 40 | 41 | def __iter__(self): 42 | while True: 43 | yield next(self._infinite_iterator) 44 | 45 | def __len__(self): 46 | raise ValueError 47 | 48 | class FastDataLoader: 49 | """DataLoader wrapper with slightly improved speed by not respawning worker 50 | processes at every epoch.""" 51 | def __init__(self, dataset, batch_size, num_workers): 52 | super().__init__() 53 | 54 | batch_sampler = torch.utils.data.BatchSampler( 55 | torch.utils.data.RandomSampler(dataset, replacement=False), 56 | batch_size=batch_size, 57 | drop_last=False 58 | ) 59 | 60 | self._infinite_iterator = iter(torch.utils.data.DataLoader( 61 | dataset, 62 | num_workers=num_workers, 63 | batch_sampler=_InfiniteSampler(batch_sampler) 64 | )) 65 | 66 | self._length = len(batch_sampler) 67 | 68 | def __iter__(self): 69 | for _ in range(len(self)): 70 | yield next(self._infinite_iterator) 71 | 72 | def __len__(self): 73 | return self._length 74 | 75 | 76 | class DataParallelPassthrough(torch.nn.DataParallel): 77 | def __getattr__(self, name): 78 | try: 79 | return super().__getattr__(name) 80 | except AttributeError: 81 | return getattr(self.module, name) -------------------------------------------------------------------------------- /methods/TAST/domainbed/lib/mlp_mixer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import timm 3 | from domainbed.lib.vision_transformer import Identity 4 | 5 | 6 | class MLPMixer(torch.nn.Module): 7 | KNOWN_MODELS = { 8 | 'Mixer-B16': timm.models.mlp_mixer.mixer_b16_224_in21k, 9 | 'Mixer-L16': timm.models.mlp_mixer.mixer_l16_224_in21k, 10 | } 11 | def __init__(self, input_shape, hparams): 12 | super().__init__() 13 | func = self.KNOWN_MODELS[hparams['backbone']] 14 | self.network = func(pretrained=True) 15 | self.n_outputs = self.network.norm.normalized_shape[0] 16 | self.network.head = Identity() 17 | self.hparams = hparams 18 | 19 | def forward(self, x): 20 | """Encode x into a feature vector of size n_outputs.""" 21 | return self.network(x) 22 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/lib/reporting.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import collections 4 | 5 | import json 6 | import os 7 | 8 | import tqdm 9 | 10 | from domainbed.lib.query import Q 11 | 12 | def load_records(path): 13 | records = [] 14 | for i, subdir in tqdm.tqdm(list(enumerate(os.listdir(path))), 15 | ncols=80, 16 | leave=False): 17 | results_path = os.path.join(path, subdir, "results.jsonl") 18 | try: 19 | with open(results_path, "r") as f: 20 | for line in f: 21 | records.append(json.loads(line[:-1])) 22 | except IOError: 23 | pass 24 | 25 | return Q(records) 26 | 27 | def get_grouped_records(records): 28 | """Group records by (trial_seed, dataset, algorithm, test_env). Because 29 | records can have multiple test envs, a given record may appear in more than 30 | one group.""" 31 | result = collections.defaultdict(lambda: []) 32 | for r in records: 33 | for test_env in r["args"]["test_envs"]: 34 | group = (r["args"]["trial_seed"], 35 | r["args"]["dataset"], 36 | r["args"]["algorithm"], 37 | test_env) 38 | result[group].append(r) 39 | return Q([{"trial_seed": t, "dataset": d, "algorithm": a, "test_env": e, 40 | "records": Q(r)} for (t,d,a,e),r in result.items()]) 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/lib/wide_resnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | """ 4 | From https://github.com/meliketoy/wide-resnet.pytorch 5 | """ 6 | 7 | import sys 8 | 9 | import numpy as np 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | import torch.nn.init as init 14 | from torch.autograd import Variable 15 | 16 | 17 | def conv3x3(in_planes, out_planes, stride=1): 18 | return nn.Conv2d( 19 | in_planes, 20 | out_planes, 21 | kernel_size=3, 22 | stride=stride, 23 | padding=1, 24 | bias=True) 25 | 26 | 27 | def conv_init(m): 28 | classname = m.__class__.__name__ 29 | if classname.find('Conv') != -1: 30 | init.xavier_uniform_(m.weight, gain=np.sqrt(2)) 31 | init.constant_(m.bias, 0) 32 | elif classname.find('BatchNorm') != -1: 33 | init.constant_(m.weight, 1) 34 | init.constant_(m.bias, 0) 35 | 36 | 37 | class wide_basic(nn.Module): 38 | def __init__(self, in_planes, planes, dropout_rate, stride=1): 39 | super(wide_basic, self).__init__() 40 | self.bn1 = nn.BatchNorm2d(in_planes) 41 | self.conv1 = nn.Conv2d( 42 | in_planes, planes, kernel_size=3, padding=1, bias=True) 43 | self.dropout = nn.Dropout(p=dropout_rate) 44 | self.bn2 = nn.BatchNorm2d(planes) 45 | self.conv2 = nn.Conv2d( 46 | planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) 47 | 48 | self.shortcut = nn.Sequential() 49 | if stride != 1 or in_planes != planes: 50 | self.shortcut = nn.Sequential( 51 | nn.Conv2d( 52 | in_planes, planes, kernel_size=1, stride=stride, 53 | bias=True), ) 54 | 55 | def forward(self, x): 56 | out = self.dropout(self.conv1(F.relu(self.bn1(x)))) 57 | out = self.conv2(F.relu(self.bn2(out))) 58 | out += self.shortcut(x) 59 | 60 | return out 61 | 62 | 63 | class Wide_ResNet(nn.Module): 64 | """Wide Resnet with the softmax layer chopped off""" 65 | def __init__(self, input_shape, depth, widen_factor, dropout_rate): 66 | super(Wide_ResNet, self).__init__() 67 | self.in_planes = 16 68 | 69 | assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4' 70 | n = (depth - 4) / 6 71 | k = widen_factor 72 | 73 | # print('| Wide-Resnet %dx%d' % (depth, k)) 74 | nStages = [16, 16 * k, 32 * k, 64 * k] 75 | 76 | self.conv1 = conv3x3(input_shape[0], nStages[0]) 77 | self.layer1 = self._wide_layer( 78 | wide_basic, nStages[1], n, dropout_rate, stride=1) 79 | self.layer2 = self._wide_layer( 80 | wide_basic, nStages[2], n, dropout_rate, stride=2) 81 | self.layer3 = self._wide_layer( 82 | wide_basic, nStages[3], n, dropout_rate, stride=2) 83 | self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9) 84 | 85 | self.n_outputs = nStages[3] 86 | 87 | def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride): 88 | strides = [stride] + [1] * (int(num_blocks) - 1) 89 | layers = [] 90 | 91 | for stride in strides: 92 | layers.append(block(self.in_planes, planes, dropout_rate, stride)) 93 | self.in_planes = planes 94 | 95 | return nn.Sequential(*layers) 96 | 97 | def forward(self, x): 98 | out = self.conv1(x) 99 | out = self.layer1(out) 100 | out = self.layer2(out) 101 | out = self.layer3(out) 102 | out = F.relu(self.bn1(out)) 103 | out = F.avg_pool2d(out, 8) 104 | return out[:, :, 0, 0] 105 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0657090f9a83ff76efe083a104fde93a/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0657090f9a83ff76efe083a104fde93a/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/0657090f9a83ff76efe083a104fde93a/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0657090f9a83ff76efe083a104fde93a/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/0657090f9a83ff76efe083a104fde93a 18 | save_model_every_checkpoint: False 19 | seed: 360234358 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6448763251 0.6572438163 0.4602352941 0.4821092279 0.4028941356 0.3856707317 0.4883376527 0.4888888889 0.0000000000 1.6960320473 0 0.9077303410 37 | 1.0000000000 1.0000000000 0.5694117647 0.5536723164 0.7517136329 0.7225609756 0.9303961496 0.8592592593 10.335689045 0.2295612923 300 0.2678673498 38 | 0.9991166078 1.0000000000 0.5957647059 0.5800376648 0.7635186596 0.7240853659 0.9440947797 0.8548148148 20.671378091 0.0907488818 600 0.2698669426 39 | 1.0000000000 1.0000000000 0.5976470588 0.6082862524 0.7559025133 0.7256097561 0.9800074047 0.8503703704 31.007067137 0.0480223160 900 0.2695488143 40 | 1.0000000000 1.0000000000 0.5680000000 0.5687382298 0.7482863671 0.7362804878 0.9840799704 0.8474074074 34.452296819 0.0351698661 1000 0.2753722453 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/06db52bd7fcbb8172f97f11a62015261/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/06db52bd7fcbb8172f97f11a62015261/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/06db52bd7fcbb8172f97f11a62015261/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/06db52bd7fcbb8172f97f11a62015261/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/06db52bd7fcbb8172f97f11a62015261 18 | save_model_every_checkpoint: False 19 | seed: 1826196677 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.1475265018 0.1342756184 0.0672941176 0.0753295669 0.2429550647 0.2240853659 0.1384672344 0.1555555556 0.0000000000 1.8871159554 0 0.6768667698 37 | 0.9867491166 0.9964664311 0.7336470588 0.7193973635 0.7715156131 0.7606707317 0.8393187708 0.8192592593 2.1201413428 0.7141554105 300 0.1475044028 38 | 0.9902826855 0.9858657244 0.7788235294 0.7495291902 0.8015993907 0.8094512195 0.8656053314 0.7940740741 4.2402826855 0.5276730498 600 0.1483345437 39 | 0.9823321555 0.9858657244 0.7825882353 0.7193973635 0.8423457730 0.7881097561 0.8852276934 0.8237037037 6.3604240283 0.4728276532 900 0.1456738242 40 | 0.9832155477 0.9823321555 0.8009411765 0.7514124294 0.8488194973 0.8109756098 0.8992965568 0.8444444444 7.0671378092 0.4487797840 1000 0.1817230749 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/07ea1841921ad29c18ae52563b274925/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/07ea1841921ad29c18ae52563b274925/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/07ea1841921ad29c18ae52563b274925/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/07ea1841921ad29c18ae52563b274925/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/07ea1841921ad29c18ae52563b274925 18 | save_model_every_checkpoint: False 19 | seed: 164938159 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6369257951 0.6537102473 0.5082352941 0.5348399247 0.4508758568 0.4375000000 0.4427989633 0.4607407407 0.0000000000 1.6150231361 0 2.2098460197 37 | 0.9876325088 0.9858657244 0.8108235294 0.7947269303 0.6972581874 0.6783536585 0.8881895594 0.8325925926 10.335689045 0.5566045662 300 0.7824950083 38 | 0.9876325088 0.9858657244 0.8978823529 0.7853107345 0.7102056359 0.7134146341 0.9511292114 0.8340740741 20.671378091 0.3126574263 600 0.7610859227 39 | 0.9885159011 0.9858657244 0.9331764706 0.7476459510 0.7170601676 0.7012195122 0.9707515735 0.8311111111 31.007067137 0.1981815844 900 0.7655067587 40 | 0.9805653710 0.9717314488 0.9421176471 0.7853107345 0.7307692308 0.6798780488 0.9637171418 0.8207407407 34.452296819 0.1589800572 1000 0.7399253964 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0c53bbff83d887850721788187907586/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0c53bbff83d887850721788187907586/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/0c53bbff83d887850721788187907586/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0c53bbff83d887850721788187907586/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/0c53bbff83d887850721788187907586 18 | save_model_every_checkpoint: False 19 | seed: 883692786 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6139575972 0.6183745583 0.4654117647 0.4613935970 0.3769992384 0.4192073171 0.4527952610 0.4059259259 0.0000000000 1.5639189482 0 1.3415405750 37 | 0.9982332155 0.9929328622 0.6927058824 0.6798493409 0.8549124143 0.8064024390 0.7963717142 0.7674074074 8.4805653710 0.2506012543 300 0.2245095642 38 | 1.0000000000 0.9893992933 0.6254117647 0.6120527307 0.9440213252 0.8185975610 0.7023324695 0.6814814815 16.961130742 0.1403411952 600 0.2259919771 39 | 0.9973498233 0.9929328622 0.6009411765 0.5894538606 0.9257425743 0.7713414634 0.6823398741 0.6755555556 25.441696113 0.0984130776 900 0.2255344065 40 | 1.0000000000 0.9964664311 0.6174117647 0.6045197740 0.9676313785 0.8109756098 0.6564235468 0.6488888889 28.268551236 0.0749892714 1000 0.2303549671 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0ec227d205744455c681614d9f55d841/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0ec227d205744455c681614d9f55d841/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/0ec227d205744455c681614d9f55d841/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0ec227d205744455c681614d9f55d841/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/0ec227d205744455c681614d9f55d841 18 | save_model_every_checkpoint: False 19 | seed: 1652397067 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6289752650 0.6466431095 0.4720000000 0.4934086629 0.3888042650 0.3856707317 0.4535357275 0.4474074074 0.0000000000 1.5672284365 0 0.8409721851 37 | 0.9761484099 0.9787985866 0.7877647059 0.7099811676 0.7616146230 0.7301829268 0.9222510181 0.8414814815 8.4805653710 0.5017355401 300 0.4830384008 38 | 0.9726148410 0.9646643110 0.8748235294 0.7608286252 0.6816450876 0.6615853659 0.9263235839 0.8296296296 16.961130742 0.3284906636 600 0.4819568117 39 | 0.9752650177 0.9787985866 0.9256470588 0.7589453861 0.7086824067 0.6966463415 0.9559422436 0.8177777778 25.441696113 0.2250106066 900 0.4664689159 40 | 0.9681978799 0.9646643110 0.9138823529 0.7325800377 0.7349581112 0.6890243902 0.9766753054 0.8355555556 28.268551236 0.1948434772 1000 0.4899235868 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0fe0ed57077c0c9291931a388ba21be2/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0fe0ed57077c0c9291931a388ba21be2/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/0fe0ed57077c0c9291931a388ba21be2/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/0fe0ed57077c0c9291931a388ba21be2/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/0fe0ed57077c0c9291931a388ba21be2 18 | save_model_every_checkpoint: False 19 | seed: 232202035 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6139575972 0.6183745583 0.4668235294 0.4613935970 0.3766184311 0.4192073171 0.4535357275 0.4059259259 0.0000000000 1.5417273045 0 1.5597932339 37 | 0.9717314488 0.9575971731 0.8437647059 0.7570621469 0.5761614623 0.6006097561 0.6797482414 0.6696296296 8.4805653710 0.3278960832 300 0.5082177607 38 | 1.0000000000 1.0000000000 0.8785882353 0.7664783427 0.5670220868 0.5807926829 0.7012217697 0.6622222222 16.961130742 0.2037799085 600 0.5176990946 39 | 1.0000000000 1.0000000000 0.9317647059 0.7570621469 0.6245239909 0.6539634146 0.7515734913 0.7259259259 25.441696113 0.1357027507 900 0.5146216901 40 | 0.9991166078 0.9893992933 0.9228235294 0.7438794727 0.6054836253 0.6326219512 0.7319511292 0.7022222222 28.268551236 0.0931368149 1000 0.4918298554 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b0678ef843d122c17404ab8bd138523/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b0678ef843d122c17404ab8bd138523/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/1b0678ef843d122c17404ab8bd138523/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b0678ef843d122c17404ab8bd138523/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/1b0678ef843d122c17404ab8bd138523 18 | save_model_every_checkpoint: False 19 | seed: 703675087 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6033568905 0.6007067138 0.3477647059 0.3521657250 0.3335872049 0.3780487805 0.3791188449 0.3318518519 0.0000000000 1.6503455639 0 1.3420743942 37 | 0.8966431095 0.8692579505 0.7712941176 0.7514124294 0.8042650419 0.7865853659 0.7049241022 0.6829629630 2.1201413428 0.7344291466 300 0.1374709209 38 | 0.8984098940 0.8763250883 0.7802352941 0.7438794727 0.8297791318 0.8201219512 0.7334320622 0.7155555556 4.2402826855 0.5958860209 600 0.1401097918 39 | 0.4355123675 0.4628975265 0.7924705882 0.7401129944 0.8191165270 0.7713414634 0.6467974824 0.6311111111 6.3604240283 0.5318177843 900 0.1377514847 40 | 0.9107773852 0.8727915194 0.8061176471 0.7740112994 0.8206397563 0.8003048780 0.7600888560 0.7200000000 7.0671378092 0.4978464527 1000 0.1623143768 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b424e4ac8bc11c9d3f36b1729e19547/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b424e4ac8bc11c9d3f36b1729e19547/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/1b424e4ac8bc11c9d3f36b1729e19547/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/1b424e4ac8bc11c9d3f36b1729e19547/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/1b424e4ac8bc11c9d3f36b1729e19547 18 | save_model_every_checkpoint: False 19 | seed: 808031485 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6033568905 0.6148409894 0.4550588235 0.4595103578 0.3450114242 0.3932926829 0.4420584969 0.3985185185 0.0000000000 1.4451200962 0 1.4165942669 37 | 0.9867491166 0.9787985866 0.7491764706 0.7325800377 0.5639756283 0.6006097561 0.7001110700 0.6518518519 2.1201413428 0.4410370264 300 0.1582184227 38 | 0.9991166078 0.9929328622 0.7783529412 0.7288135593 0.5662604722 0.5807926829 0.6878933728 0.6681481481 4.2402826855 0.3040031821 600 0.1537931506 39 | 1.0000000000 1.0000000000 0.8084705882 0.7288135593 0.5982482864 0.6112804878 0.7230655313 0.6888888889 6.3604240283 0.2854706001 900 0.1461815945 40 | 0.9991166078 1.0000000000 0.8141176471 0.7532956685 0.6587966489 0.6493902439 0.7152906331 0.6992592593 7.0671378092 0.2706131497 1000 0.1883794379 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24c1684361b7442877526ab118da7117/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24c1684361b7442877526ab118da7117/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/24c1684361b7442877526ab118da7117/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24c1684361b7442877526ab118da7117/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/24c1684361b7442877526ab118da7117 18 | save_model_every_checkpoint: False 19 | seed: 845862410 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 1] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6157243816 0.6219081272 0.4663529412 0.4613935970 0.3769992384 0.4207317073 0.4539059608 0.4103703704 0.0000000000 1.6230642796 0 0.5895545483 37 | 0.9611307420 0.9646643110 0.6536470588 0.6290018832 0.8651942117 0.8445121951 0.8974453906 0.8251851852 8.4805653710 0.4414077417 300 0.2258998156 38 | 0.9708480565 0.9681978799 0.6094117647 0.5800376648 0.9116527037 0.8140243902 0.9433543132 0.8266666667 16.961130742 0.2386230343 600 0.2265182082 39 | 0.9726148410 0.9752650177 0.6315294118 0.6346516008 0.9638233054 0.8216463415 0.9733432062 0.8548148148 25.441696113 0.1686591896 900 0.2260356387 40 | 0.9814487633 0.9787985866 0.6785882353 0.6723163842 0.9535415080 0.8307926829 0.9637171418 0.8355555556 28.268551236 0.1337041207 1000 0.2317301798 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24cf797be205aaef612b14beefc4c1a3/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24cf797be205aaef612b14beefc4c1a3/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/24cf797be205aaef612b14beefc4c1a3/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/24cf797be205aaef612b14beefc4c1a3/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/24cf797be205aaef612b14beefc4c1a3 18 | save_model_every_checkpoint: False 19 | seed: 2080818722 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6121908127 0.6289752650 0.4597647059 0.4896421846 0.3846153846 0.3871951220 0.4435394298 0.4459259259 0.0000000000 1.4719194174 0 0.7130000591 37 | 0.9955830389 0.9823321555 0.6569411765 0.6572504708 0.8419649657 0.7652439024 0.9026286561 0.8385185185 8.4805653710 0.2995765518 300 0.3290495388 38 | 0.9982332155 0.9929328622 0.6635294118 0.6572504708 0.9105102818 0.7759146341 0.9540910774 0.8429629630 16.961130742 0.1636779740 600 0.3299173093 39 | 0.9982332155 0.9964664311 0.6371764706 0.6177024482 0.9565879665 0.7850609756 0.9663087745 0.8429629630 25.441696113 0.1089462244 900 0.3275924150 40 | 0.9982332155 0.9964664311 0.6658823529 0.6817325800 0.9527798934 0.7896341463 0.9433543132 0.8281481481 28.268551236 0.0757257283 1000 0.3295744514 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2b696be39395e8830222b505f6aa45d8/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2b696be39395e8830222b505f6aa45d8/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/2b696be39395e8830222b505f6aa45d8/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2b696be39395e8830222b505f6aa45d8/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/2b696be39395e8830222b505f6aa45d8 18 | save_model_every_checkpoint: False 19 | seed: 555331067 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6210247350 0.6219081272 0.4677647059 0.4595103578 0.3769992384 0.4237804878 0.4838948538 0.4488888889 0.0000000000 1.5794914961 0 1.2731909752 37 | 0.9991166078 0.9964664311 0.8108235294 0.7476459510 0.8594821021 0.8307926829 0.7734172529 0.7629629630 8.4805653710 0.3850402999 300 0.5130404655 38 | 0.9814487633 0.9646643110 0.8625882353 0.7137476460 0.9089870526 0.8140243902 0.7223250648 0.7125925926 16.961130742 0.2540220108 600 0.5093434628 39 | 0.9991166078 0.9929328622 0.7576470588 0.6741996234 0.9158415842 0.7987804878 0.7500925583 0.7407407407 25.441696113 0.1773750270 900 0.4844152689 40 | 1.0000000000 0.9964664311 0.9265882353 0.7325800377 0.9398324448 0.7667682927 0.6915957053 0.6948148148 28.268551236 0.1285581028 1000 0.4817661285 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2dd075c39b257eb019b4a8d813525113/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2dd075c39b257eb019b4a8d813525113/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/2dd075c39b257eb019b4a8d813525113/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/2dd075c39b257eb019b4a8d813525113/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/2dd075c39b257eb019b4a8d813525113 18 | save_model_every_checkpoint: False 19 | seed: 1451105084 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6148409894 0.6254416961 0.5562352941 0.5404896422 0.4714394516 0.4527439024 0.4668641244 0.4651851852 0.0000000000 1.5553741455 0 0.7478101254 37 | 0.8595406360 0.8586572438 0.8244705882 0.7853107345 0.8625285605 0.7591463415 0.7478711588 0.7377777778 8.4805653710 0.5564234919 300 0.5294828455 38 | 0.3533568905 0.3498233216 0.8141176471 0.6779661017 0.9002284844 0.7698170732 0.6749352092 0.6918518519 16.961130742 0.3541142742 600 0.6940090084 39 | 0.4125441696 0.3992932862 0.9270588235 0.7495291902 0.9584920030 0.7881097561 0.7164013328 0.7185185185 25.441696113 0.2470643952 900 0.6397310909 40 | 0.3409893993 0.3074204947 0.9308235294 0.7288135593 0.9504950495 0.7804878049 0.6364309515 0.6518518519 28.268551236 0.1845126691 1000 0.6637606668 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/3539ff8139b8f1797865a2f26e51c70f/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/3539ff8139b8f1797865a2f26e51c70f/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/3539ff8139b8f1797865a2f26e51c70f/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/3539ff8139b8f1797865a2f26e51c70f/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/3539ff8139b8f1797865a2f26e51c70f 18 | save_model_every_checkpoint: False 19 | seed: 77312117 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6121908127 0.6289752650 0.4597647059 0.4896421846 0.3849961919 0.3871951220 0.4442798963 0.4459259259 0.0000000000 1.6168131828 0 1.5035538673 37 | 0.9911660777 0.9611307420 0.4945882353 0.4990583804 0.6031987814 0.5914634146 0.8637541651 0.7837037037 8.4805653710 0.2213101814 300 0.2264140566 38 | 0.9982332155 1.0000000000 0.6588235294 0.6779661017 0.7220106626 0.6996951220 0.9274342836 0.8385185185 16.961130742 0.1078731784 600 0.2273491073 39 | 0.9814487633 0.9717314488 0.6320000000 0.6195856874 0.6721249048 0.6371951220 0.8844872270 0.7925925926 25.441696113 0.0730464640 900 0.2263356590 40 | 1.0000000000 0.9964664311 0.5811764706 0.5800376648 0.7555217060 0.7271341463 0.9785264717 0.8533333333 28.268551236 0.0753941641 1000 0.2314931631 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/371b3e2afe1e7a754e49b2324bf159b6/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/371b3e2afe1e7a754e49b2324bf159b6/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/371b3e2afe1e7a754e49b2324bf159b6/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/371b3e2afe1e7a754e49b2324bf159b6/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/371b3e2afe1e7a754e49b2324bf159b6 18 | save_model_every_checkpoint: False 19 | seed: 673138363 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 1] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.2791519435 0.2650176678 0.0489411765 0.0489642185 0.0872048743 0.1036585366 0.2436134765 0.2325925926 0.0000000000 1.6690740585 0 0.5699374676 37 | 0.9823321555 0.9717314488 0.6056470588 0.5856873823 0.8153084539 0.8033536585 0.8670862643 0.8385185185 2.1201413428 0.6575384592 300 0.0850741275 38 | 0.9858657244 0.9858657244 0.6960000000 0.6911487759 0.8297791318 0.7972560976 0.8681969641 0.8355555556 4.2402826855 0.4300726643 600 0.0843147270 39 | 0.9611307420 0.9363957597 0.6588235294 0.6384180791 0.8587204874 0.8445121951 0.8926323584 0.8251851852 6.3604240283 0.3910656881 900 0.0857653062 40 | 0.9602473498 0.9575971731 0.6268235294 0.6101694915 0.8712871287 0.8216463415 0.8907811922 0.8281481481 7.0671378092 0.3222925671 1000 0.0998253107 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/41b0ac2ee570d8ace449c34ada3fdd01/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/41b0ac2ee570d8ace449c34ada3fdd01/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/41b0ac2ee570d8ace449c34ada3fdd01/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/41b0ac2ee570d8ace449c34ada3fdd01/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/41b0ac2ee570d8ace449c34ada3fdd01 18 | save_model_every_checkpoint: False 19 | seed: 1402607286 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6113074205 0.6289752650 0.4489411765 0.4821092279 0.3625285605 0.3719512195 0.4409477971 0.4533333333 0.0000000000 1.5662012100 0 1.9600167274 37 | 1.0000000000 0.9964664311 0.8291764706 0.7702448211 0.6416603199 0.6021341463 0.7426878934 0.7407407407 10.335689045 0.3412963475 300 0.5609163912 38 | 1.0000000000 0.9929328622 0.8964705882 0.7853107345 0.5799695354 0.5838414634 0.7256571640 0.7259259259 20.671378091 0.1954872701 600 0.5597918383 39 | 0.9991166078 1.0000000000 0.9477647059 0.7608286252 0.6359482102 0.5990853659 0.7378748612 0.7348148148 31.007067137 0.1185131688 900 0.5421174677 40 | 1.0000000000 1.0000000000 0.9454117647 0.7890772128 0.5990099010 0.5807926829 0.7064050352 0.7081481481 34.452296819 0.0762012539 1000 0.5556480336 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4a18a8be66b762f1ad5f45408bc27c78/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4a18a8be66b762f1ad5f45408bc27c78/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/4a18a8be66b762f1ad5f45408bc27c78/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4a18a8be66b762f1ad5f45408bc27c78/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/4a18a8be66b762f1ad5f45408bc27c78 18 | save_model_every_checkpoint: False 19 | seed: 1355770594 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6139575972 0.6183745583 0.4705882353 0.4595103578 0.3777608530 0.4192073171 0.4535357275 0.4014814815 0.0000000000 1.8560335636 0 1.0693266392 37 | 1.0000000000 0.9858657244 0.5774117647 0.5894538606 0.7688499619 0.7728658537 0.9222510181 0.8355555556 8.4805653710 0.2207562274 300 0.2226435788 38 | 0.9964664311 0.9929328622 0.5524705882 0.5687382298 0.7440974867 0.7332317073 0.9366901148 0.8192592593 16.961130742 0.1166370596 600 0.2226641949 39 | 0.9991166078 0.9964664311 0.5567058824 0.5612052731 0.7296268088 0.7332317073 0.9511292114 0.8192592593 25.441696113 0.0710875637 900 0.2238802059 40 | 1.0000000000 0.9964664311 0.5515294118 0.5442561205 0.7288651942 0.7012195122 0.9733432062 0.8414814815 28.268551236 0.0552595345 1000 0.2269736028 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4ccfd57ae38cfc8fd5fba4293614ab26/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4ccfd57ae38cfc8fd5fba4293614ab26/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/4ccfd57ae38cfc8fd5fba4293614ab26/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/4ccfd57ae38cfc8fd5fba4293614ab26/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/4ccfd57ae38cfc8fd5fba4293614ab26 18 | save_model_every_checkpoint: False 19 | seed: 225583337 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.5803886926 0.6007067138 0.5736470588 0.5762711864 0.4249809596 0.4161585366 0.4150314698 0.4074074074 0.0000000000 1.6056649685 0 1.2535927296 37 | 0.6439929329 0.6678445230 0.7957647059 0.7363465160 0.8846153846 0.8125000000 0.7512032581 0.7274074074 10.335689045 0.5884232441 300 0.6497526010 38 | 0.6925795053 0.7632508834 0.8818823529 0.7890772128 0.9280274181 0.7637195122 0.7312106627 0.7170370370 20.671378091 0.3515189211 600 0.6339190245 39 | 0.5468197880 0.5795053004 0.9312941176 0.7514124294 0.9634424981 0.7835365854 0.7234357645 0.7318518519 31.007067137 0.2306714023 900 0.6368054978 40 | 0.4717314488 0.4664310954 0.9487058824 0.7645951036 0.9619192688 0.7942073171 0.7171417993 0.7229629630 34.452296819 0.1516468529 1000 0.6133238769 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/539c70bc47514b76736c480df7036b8b/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/539c70bc47514b76736c480df7036b8b/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/539c70bc47514b76736c480df7036b8b/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/539c70bc47514b76736c480df7036b8b/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/539c70bc47514b76736c480df7036b8b 18 | save_model_every_checkpoint: False 19 | seed: 365467527 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.2994699647 0.2897526502 0.5430588235 0.4896421846 0.4779131759 0.5015243902 0.4879674195 0.4459259259 0.0000000000 1.7162715197 0 1.5893950462 37 | 0.9885159011 0.9717314488 0.8277647059 0.7796610169 0.7372429551 0.7682926829 0.9211403184 0.8325925926 8.4805653710 0.5158454158 300 0.4736440802 38 | 0.9584805654 0.9540636042 0.8320000000 0.7645951036 0.6751713633 0.7057926829 0.9218807849 0.8207407407 16.961130742 0.3099103693 600 0.4764646832 39 | 0.9787985866 0.9752650177 0.9232941176 0.7344632768 0.7296268088 0.7378048780 0.9585338763 0.8059259259 25.441696113 0.2075849420 900 0.4813308350 40 | 0.9893992933 0.9717314488 0.9402352941 0.7532956685 0.7204874334 0.7240853659 0.9689004073 0.8177777778 28.268551236 0.1533024151 1000 0.4948641443 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/63837f74bf4ac60044c74aa87114b386/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/63837f74bf4ac60044c74aa87114b386/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/63837f74bf4ac60044c74aa87114b386/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/63837f74bf4ac60044c74aa87114b386/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/63837f74bf4ac60044c74aa87114b386 18 | save_model_every_checkpoint: False 19 | seed: 1154273106 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6121908127 0.6395759717 0.4710588235 0.4651600753 0.3187357197 0.3658536585 0.4439096631 0.4014814815 0.0000000000 1.7305045128 0 0.7390449047 37 | 0.9991166078 0.9929328622 0.7510588235 0.7306967985 0.7303884235 0.7256097561 0.8593113662 0.8074074074 2.1201413428 0.4912618790 300 0.1476260916 38 | 0.9955830389 0.9929328622 0.7821176471 0.7457627119 0.7269611577 0.7012195122 0.8378378378 0.8059259259 4.2402826855 0.3587874381 600 0.1536783393 39 | 0.9991166078 1.0000000000 0.8192941176 0.7589453861 0.7463823305 0.7530487805 0.9107737875 0.8414814815 6.3604240283 0.3129132905 900 0.1507747587 40 | 0.9982332155 0.9964664311 0.7934117647 0.7532956685 0.6980198020 0.7164634146 0.8870788597 0.8192592593 7.0671378092 0.2962619931 1000 0.1803278637 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66006bc6faa9f96db95a5bcfc3e4340a/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66006bc6faa9f96db95a5bcfc3e4340a/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/66006bc6faa9f96db95a5bcfc3e4340a/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66006bc6faa9f96db95a5bcfc3e4340a/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/66006bc6faa9f96db95a5bcfc3e4340a 18 | save_model_every_checkpoint: False 19 | seed: 1721972278 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6227915194 0.6431095406 0.4569411765 0.4783427495 0.4154607768 0.4298780488 0.4613106257 0.4592592593 0.0000000000 1.8584471941 0 1.0781459808 37 | 0.9876325088 0.9787985866 0.8000000000 0.7514124294 0.8667174410 0.7774390244 0.9107737875 0.8592592593 8.4805653710 0.5497589125 300 0.5222175368 38 | 0.9699646643 0.9752650177 0.8075294118 0.6930320151 0.8827113481 0.7606707317 0.9170677527 0.8014814815 16.961130742 0.3329716441 600 0.5305303041 39 | 0.9496466431 0.9505300353 0.9058823529 0.7382297552 0.9230769231 0.7621951220 0.9596445761 0.8355555556 25.441696113 0.2295369956 900 0.5154033097 40 | 0.9637809187 0.9505300353 0.8498823529 0.7325800377 0.9169840061 0.7454268293 0.9596445761 0.8444444444 28.268551236 0.1873281671 1000 0.5267591643 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66779ee52d1111eddfcc6dafa8ae983c/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66779ee52d1111eddfcc6dafa8ae983c/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/66779ee52d1111eddfcc6dafa8ae983c/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/66779ee52d1111eddfcc6dafa8ae983c/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/66779ee52d1111eddfcc6dafa8ae983c 18 | save_model_every_checkpoint: False 19 | seed: 121752067 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6219081272 0.6183745583 0.4630588235 0.4538606403 0.3800456969 0.4314024390 0.4535357275 0.4059259259 0.0000000000 1.6938700676 0 1.5393824577 37 | 0.9973498233 0.9964664311 0.6625882353 0.6384180791 0.8743335872 0.8384146341 0.8878193262 0.8192592593 8.4805653710 0.2932585667 300 0.3279992390 38 | 0.9991166078 0.9823321555 0.6362352941 0.6233521657 0.9166031988 0.8262195122 0.9411329137 0.8400000000 16.961130742 0.1662088908 600 0.3277589742 39 | 1.0000000000 0.9964664311 0.6141176471 0.5932203390 0.9295506474 0.7881097561 0.9681599408 0.8251851852 25.441696113 0.1098899398 900 0.3271942870 40 | 1.0000000000 0.9964664311 0.6512941176 0.6365348399 0.9531607007 0.8262195122 0.9800074047 0.8385185185 28.268551236 0.0795394431 1000 0.3333628297 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/691f8b51c9f69b380113a6a2645392bb/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/691f8b51c9f69b380113a6a2645392bb/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/691f8b51c9f69b380113a6a2645392bb/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/691f8b51c9f69b380113a6a2645392bb/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/691f8b51c9f69b380113a6a2645392bb 18 | save_model_every_checkpoint: False 19 | seed: 1308297739 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 1] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.3286219081 0.3533568905 0.4724705882 0.5084745763 0.3773800457 0.3932926829 0.3702332469 0.3437037037 0.0000000000 1.8032251596 0 1.7875323296 37 | 0.9920494700 0.9929328622 0.6385882353 0.6384180791 0.8903274943 0.7972560976 0.9203998519 0.8488888889 10.335689045 0.4660329765 300 0.2698347815 38 | 0.9929328622 0.9964664311 0.6070588235 0.5951035782 0.9402132521 0.8079268293 0.9552017771 0.8696296296 20.671378091 0.2222180948 600 0.2707187851 39 | 0.9664310954 0.9717314488 0.6498823529 0.6610169492 0.9550647372 0.7804878049 0.9651980748 0.8607407407 31.007067137 0.1402724676 900 0.2694199697 40 | 0.9796819788 0.9752650177 0.6696470588 0.6798493409 0.9702970297 0.8003048780 0.9829692706 0.8785185185 34.452296819 0.1025925899 1000 0.2757954836 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/6d481a40ca86768fad6a5088cb58458e/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/6d481a40ca86768fad6a5088cb58458e/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/6d481a40ca86768fad6a5088cb58458e/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/6d481a40ca86768fad6a5088cb58458e/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/6d481a40ca86768fad6a5088cb58458e 18 | save_model_every_checkpoint: False 19 | seed: 1155380425 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6095406360 0.6289752650 0.4268235294 0.4538606403 0.4002284844 0.3963414634 0.4450203628 0.4459259259 0.0000000000 1.5691214800 0 1.3228244781 37 | 0.9681978799 0.9575971731 0.8042352941 0.7627118644 0.8476770754 0.7759146341 0.8863383932 0.8488888889 10.335689045 0.5472711667 300 0.6333832312 38 | 0.9655477032 0.9787985866 0.8672941176 0.7608286252 0.9192688500 0.7637195122 0.9363198815 0.8385185185 20.671378091 0.3273245532 600 0.6328919633 39 | 0.9761484099 0.9717314488 0.8917647059 0.7627118644 0.9554455446 0.8003048780 0.9626064421 0.8622222222 31.007067137 0.2252053858 900 0.6284170349 40 | 0.9549469965 0.9611307420 0.9388235294 0.7551789077 0.9691546078 0.8033536585 0.9629766753 0.8385185185 34.452296819 0.1616993903 1000 0.6197570014 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/708942ac219532c45db7898ef9cfb955/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/708942ac219532c45db7898ef9cfb955/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/708942ac219532c45db7898ef9cfb955/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/708942ac219532c45db7898ef9cfb955/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/708942ac219532c45db7898ef9cfb955 18 | save_model_every_checkpoint: False 19 | seed: 759729212 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6121908127 0.6289752650 0.4663529412 0.4934086629 0.3910891089 0.3871951220 0.4439096631 0.4459259259 0.0000000000 1.7968641520 0 0.7578103542 37 | 0.9955830389 0.9929328622 0.6004705882 0.6082862524 0.8781416603 0.7942073171 0.7275083302 0.7555555556 8.4805653710 0.2684762215 300 0.2228098536 38 | 1.0000000000 1.0000000000 0.5971764706 0.5875706215 0.9226961158 0.7774390244 0.6749352092 0.6711111111 16.961130742 0.1445222108 600 0.2233129327 39 | 0.9973498233 1.0000000000 0.6522352941 0.6723163842 0.9683929931 0.8140243902 0.7574972233 0.7659259259 25.441696113 0.0898542125 900 0.2237123227 40 | 0.9982332155 1.0000000000 0.6508235294 0.6610169492 0.9565879665 0.7850609756 0.7189929656 0.7244444444 28.268551236 0.0731842542 1000 0.2280582023 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/728347e87d1c533379956bf94dca6fef/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/728347e87d1c533379956bf94dca6fef/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/728347e87d1c533379956bf94dca6fef/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/728347e87d1c533379956bf94dca6fef/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/728347e87d1c533379956bf94dca6fef 18 | save_model_every_checkpoint: False 19 | seed: 876870413 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6492932862 0.6537102473 0.4743529412 0.4538606403 0.3651942117 0.3978658537 0.3843021103 0.3644444444 0.0000000000 1.6890671253 0 1.6487302780 37 | 1.0000000000 0.9929328622 0.6014117647 0.6139359699 0.7372429551 0.7240853659 0.8607922991 0.8370370370 2.1201413428 0.3830932904 300 0.0842626444 38 | 0.9964664311 0.9929328622 0.5538823529 0.5461393597 0.7448591013 0.7484756098 0.8955942244 0.8311111111 4.2402826855 0.2157828625 600 0.0850240620 39 | 0.9982332155 0.9964664311 0.5929411765 0.5819209040 0.7760853008 0.7743902439 0.9107737875 0.8488888889 6.3604240283 0.1769324361 900 0.0844715873 40 | 0.9973498233 0.9929328622 0.5929411765 0.5856873823 0.7757044935 0.7972560976 0.9089226213 0.8400000000 7.0671378092 0.1479099048 1000 0.0993904734 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/7a6119601f2d7f4ce36e0d5d478332dd/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/7a6119601f2d7f4ce36e0d5d478332dd/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/7a6119601f2d7f4ce36e0d5d478332dd/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/7a6119601f2d7f4ce36e0d5d478332dd/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/7a6119601f2d7f4ce36e0d5d478332dd 18 | save_model_every_checkpoint: False 19 | seed: 745093665 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6272084806 0.6395759717 0.4964705882 0.4632768362 0.4329779132 0.4710365854 0.4742687893 0.4237037037 0.0000000000 1.6059685946 0 1.8818829060 37 | 0.9911660777 0.9787985866 0.7374117647 0.7099811676 0.8476770754 0.8094512195 0.9059607553 0.8296296296 8.4805653710 0.5164279704 300 0.6003480299 38 | 0.9628975265 0.9611307420 0.8588235294 0.7589453861 0.8933739528 0.8201219512 0.9278045168 0.8207407407 16.961130742 0.3321266067 600 0.6016749573 39 | 0.9761484099 0.9752650177 0.9091764706 0.7419962335 0.9424980960 0.7865853659 0.9685301740 0.8222222222 25.441696113 0.2240007397 900 0.5920131238 40 | 0.9779151943 0.9681978799 0.9322352941 0.7532956685 0.9531607007 0.7942073171 0.9592743428 0.8325925926 28.268551236 0.1744494830 1000 0.5965073538 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/85964cf17f520330ea56101aed9602e5/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/85964cf17f520330ea56101aed9602e5/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/85964cf17f520330ea56101aed9602e5/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/85964cf17f520330ea56101aed9602e5/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/85964cf17f520330ea56101aed9602e5 18 | save_model_every_checkpoint: False 19 | seed: 969090155 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6484098940 0.6678445230 0.4936470588 0.5178907721 0.4025133283 0.4131097561 0.4653831914 0.4548148148 0.0000000000 1.7983697653 0 1.2256424427 37 | 1.0000000000 1.0000000000 0.8155294118 0.7853107345 0.8739527799 0.7728658537 0.7686042207 0.7525925926 8.4805653710 0.4000296687 300 0.8405228472 38 | 0.9982332155 0.9964664311 0.8061176471 0.7118644068 0.9185072353 0.7804878049 0.7782302851 0.7881481481 16.961130742 0.2543533290 600 0.6019054683 39 | 1.0000000000 1.0000000000 0.9162352941 0.7702448211 0.9265041889 0.7469512195 0.6730840429 0.6696296296 25.441696113 0.1691624259 900 0.5962682295 40 | 0.9991166078 0.9858657244 0.9284705882 0.7608286252 0.9158415842 0.7271341463 0.6571640133 0.6651851852 28.268551236 0.1234844617 1000 0.5267507792 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/86394db2b6c2ecd1e3b08e99e14759f2/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/86394db2b6c2ecd1e3b08e99e14759f2/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/86394db2b6c2ecd1e3b08e99e14759f2/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/86394db2b6c2ecd1e3b08e99e14759f2/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/86394db2b6c2ecd1e3b08e99e14759f2 18 | save_model_every_checkpoint: False 19 | seed: 664692933 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.2181978799 0.2190812721 0.0418823529 0.0640301318 0.3297791318 0.2942073171 0.1355053684 0.1688888889 0.0000000000 1.5366128683 0 1.3182864189 37 | 0.9973498233 1.0000000000 0.7072941176 0.6911487759 0.8137852247 0.7804878049 0.7649018882 0.7540740741 2.1201413428 0.4162907769 300 0.0860185695 38 | 0.9991166078 0.9964664311 0.6687058824 0.6421845574 0.8198781417 0.8094512195 0.8019252129 0.7629629630 4.2402826855 0.2737542759 600 0.0876681225 39 | 1.0000000000 1.0000000000 0.6508235294 0.6308851224 0.8735719726 0.8246951220 0.7526841910 0.7422222222 6.3604240283 0.2153730621 900 0.0867732620 40 | 0.9938162544 1.0000000000 0.6221176471 0.6233521657 0.8069306931 0.7332317073 0.6319881525 0.6518518519 7.0671378092 0.2005969730 1000 0.1003058171 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/8cfbf830754065d02f9723c57abc992e/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/8cfbf830754065d02f9723c57abc992e/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/8cfbf830754065d02f9723c57abc992e/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/8cfbf830754065d02f9723c57abc992e/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/8cfbf830754065d02f9723c57abc992e 18 | save_model_every_checkpoint: False 19 | seed: 1878899245 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.7544169611 0.7349823322 0.4640000000 0.4990583804 0.4185072353 0.4344512195 0.4439096631 0.4459259259 0.0000000000 1.6586600542 0 0.8204424381 37 | 1.0000000000 1.0000000000 0.6381176471 0.6195856874 0.9021325209 0.7942073171 0.7460199926 0.7688888889 10.335689045 0.2694484687 300 0.2729239146 38 | 0.9991166078 0.9964664311 0.6084705882 0.5969868173 0.9405940594 0.7942073171 0.7141799334 0.7200000000 20.671378091 0.1227226931 600 0.2742725794 39 | 1.0000000000 1.0000000000 0.6475294118 0.6572504708 0.9630616908 0.8003048780 0.7671232877 0.7762962963 31.007067137 0.0694726440 900 0.2802266463 40 | 1.0000000000 0.9964664311 0.6244705882 0.6101694915 0.9813404417 0.8079268293 0.7778600518 0.7777777778 34.452296819 0.0363020070 1000 0.2752757215 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/90961e3a45300a2d4771fc090627166e/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/90961e3a45300a2d4771fc090627166e/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/90961e3a45300a2d4771fc090627166e/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/90961e3a45300a2d4771fc090627166e/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/90961e3a45300a2d4771fc090627166e 18 | save_model_every_checkpoint: False 19 | seed: 733096875 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6298586572 0.6431095406 0.4014117647 0.4369114878 0.4059405941 0.3932926829 0.4487226953 0.4429629630 0.0000000000 1.6435878277 0 1.4049792290 37 | 0.9991166078 1.0000000000 0.6625882353 0.6591337100 0.8899466870 0.7987804878 0.9244724176 0.8651851852 10.335689045 0.3126775041 300 0.4027417898 38 | 0.9973498233 0.9893992933 0.5948235294 0.5969868173 0.9173648134 0.7667682927 0.9581636431 0.8592592593 20.671378091 0.1523421495 600 0.4037892016 39 | 1.0000000000 0.9964664311 0.6536470588 0.6497175141 0.9657273420 0.7987804878 0.9759348389 0.8829629630 31.007067137 0.1036048375 900 0.4036473759 40 | 1.0000000000 0.9964664311 0.6578823529 0.6553672316 0.9706778370 0.7865853659 0.9748241392 0.8814814815 34.452296819 0.0652515952 1000 0.4080266762 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/9f1d308cb3d13c7358eefd027ba1de04/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/9f1d308cb3d13c7358eefd027ba1de04/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/9f1d308cb3d13c7358eefd027ba1de04/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/9f1d308cb3d13c7358eefd027ba1de04/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/9f1d308cb3d13c7358eefd027ba1de04 18 | save_model_every_checkpoint: False 19 | seed: 1443892482 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [1] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6095406360 0.6219081272 0.4070588235 0.3992467043 0.3549124143 0.3414634146 0.3661606812 0.3792592593 0.0000000000 1.6126624346 0 1.0119540691 37 | 0.9982332155 0.9964664311 0.6141176471 0.5988700565 0.8309215537 0.7987804878 0.8507960015 0.7985185185 2.1201413428 0.4554009163 300 0.1058194629 38 | 0.9991166078 0.9929328622 0.6310588235 0.6082862524 0.8518659558 0.8323170732 0.8933728249 0.8400000000 4.2402826855 0.2957518518 600 0.1057730643 39 | 1.0000000000 0.9929328622 0.5642352941 0.5630885122 0.8526275704 0.8094512195 0.8952239911 0.8444444444 6.3604240283 0.2582681263 900 0.1059892249 40 | 1.0000000000 0.9964664311 0.6197647059 0.6026365348 0.8659558264 0.8185975610 0.8918918919 0.8133333333 7.0671378092 0.2397152161 1000 0.1159045529 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bf09cd8e443d5445cc15b7503c14264d/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bf09cd8e443d5445cc15b7503c14264d/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/bf09cd8e443d5445cc15b7503c14264d/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bf09cd8e443d5445cc15b7503c14264d/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/bf09cd8e443d5445cc15b7503c14264d 18 | save_model_every_checkpoint: False 19 | seed: 267264279 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2, 3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6333922261 0.6466431095 0.4720000000 0.5047080979 0.3990860625 0.4009146341 0.4450203628 0.4488888889 0.0000000000 1.5340688229 0 1.5293178558 37 | 0.9964664311 1.0000000000 0.7821176471 0.7363465160 0.5913937548 0.5838414634 0.6375416512 0.6281481481 8.4805653710 0.3256840587 300 0.4966490340 38 | 0.9982332155 0.9964664311 0.8837647059 0.7777777778 0.5194211729 0.4893292683 0.6904850056 0.6592592593 16.961130742 0.2106622866 600 0.5106681673 39 | 1.0000000000 0.9964664311 0.9416470588 0.7325800377 0.6127189642 0.5609756098 0.6638282118 0.6251851852 25.441696113 0.1415937501 900 0.5148218004 40 | 1.0000000000 0.9964664311 0.9374117647 0.7740112994 0.5529322163 0.5503048780 0.6704924102 0.6488888889 28.268551236 0.1061877130 1000 0.5078280520 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bfce2823ee1c49ab624fde5c5e2c1143/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bfce2823ee1c49ab624fde5c5e2c1143/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/bfce2823ee1c49ab624fde5c5e2c1143/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/bfce2823ee1c49ab624fde5c5e2c1143/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/bfce2823ee1c49ab624fde5c5e2c1143 18 | save_model_every_checkpoint: False 19 | seed: 729020776 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6192579505 0.6254416961 0.4724705882 0.4670433145 0.3773800457 0.4146341463 0.4535357275 0.4074074074 0.0000000000 1.5559741259 0 1.1550295353 37 | 0.9991166078 0.9929328622 0.8305882353 0.7570621469 0.7395277989 0.7560975610 0.9166975194 0.8325925926 8.4805653710 0.3378844495 300 0.5076199762 38 | 1.0000000000 0.9823321555 0.8832941176 0.7532956685 0.6622239147 0.6905487805 0.9403924472 0.8237037037 16.961130742 0.2088435666 600 0.4945691800 39 | 1.0000000000 0.9964664311 0.9327058824 0.7419962335 0.7117288652 0.7317073171 0.9722325065 0.8340740741 25.441696113 0.1441033643 900 0.4829492307 40 | 0.9982332155 0.9964664311 0.9327058824 0.7608286252 0.6664127951 0.6570121951 0.9592743428 0.8014814815 28.268551236 0.1133815604 1000 0.5154592729 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/c62625063d3aee2f08e5c908e7677e83/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/c62625063d3aee2f08e5c908e7677e83/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/c62625063d3aee2f08e5c908e7677e83/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/c62625063d3aee2f08e5c908e7677e83/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/c62625063d3aee2f08e5c908e7677e83 18 | save_model_every_checkpoint: False 19 | seed: 99481980 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.5821554770 0.5971731449 0.4663529412 0.4613935970 0.3716679360 0.4146341463 0.4424287301 0.4014814815 0.0000000000 1.7584050894 0 1.1681911945 37 | 0.7579505300 0.7738515901 0.7632941176 0.7024482109 0.8632901752 0.7942073171 0.7023324695 0.7155555556 8.4805653710 0.5978830648 300 0.4919224509 38 | 0.7279151943 0.6749116608 0.8592941176 0.7306967985 0.9116527037 0.8109756098 0.7045538689 0.6711111111 16.961130742 0.3988586284 600 0.4802287292 39 | 0.3683745583 0.3568904594 0.9143529412 0.7325800377 0.9592536177 0.8079268293 0.7067752684 0.7125925926 25.441696113 0.2422290696 900 0.4851771371 40 | 0.7393992933 0.7279151943 0.9298823529 0.7683615819 0.9306930693 0.7957317073 0.7467604591 0.7525925926 28.268551236 0.1837190475 1000 0.5015410733 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ca571be94ad9fdb0c2bece0061ff3f89/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ca571be94ad9fdb0c2bece0061ff3f89/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/ca571be94ad9fdb0c2bece0061ff3f89/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ca571be94ad9fdb0c2bece0061ff3f89/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/ca571be94ad9fdb0c2bece0061ff3f89 18 | save_model_every_checkpoint: False 19 | seed: 99890861 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6113074205 0.6289752650 0.4597647059 0.4896421846 0.3846153846 0.3871951220 0.4435394298 0.4459259259 0.0000000000 1.5248144865 0 1.3489863873 37 | 0.9982332155 1.0000000000 0.7924705882 0.7325800377 0.7300076161 0.7012195122 0.8959644576 0.8370370370 8.4805653710 0.3458396457 300 0.4882594180 38 | 1.0000000000 0.9964664311 0.8489411765 0.7419962335 0.6839299315 0.6798780488 0.9215105516 0.8325925926 16.961130742 0.2202832393 600 0.4904143016 39 | 0.9991166078 0.9964664311 0.8983529412 0.7777777778 0.6949733435 0.6646341463 0.9396519807 0.8281481481 25.441696113 0.1531876612 900 0.4965575910 40 | 0.9991166078 0.9964664311 0.9440000000 0.7890772128 0.7220106626 0.6981707317 0.9692706405 0.8325925926 28.268551236 0.1111858229 1000 0.5122962880 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/cf42c3176baf91b96bb7dd0ff3c686cc/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/cf42c3176baf91b96bb7dd0ff3c686cc/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/cf42c3176baf91b96bb7dd0ff3c686cc/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/cf42c3176baf91b96bb7dd0ff3c686cc/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/cf42c3176baf91b96bb7dd0ff3c686cc 18 | save_model_every_checkpoint: False 19 | seed: 1726329315 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [3] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.1316254417 0.1236749117 0.3920000000 0.3672316384 0.2494287890 0.2423780488 0.1417993336 0.1407407407 0.0000000000 1.8617510796 0 1.3313741684 37 | 0.9982332155 1.0000000000 0.7567058824 0.7401129944 0.7947448591 0.7606707317 0.7319511292 0.7200000000 2.1201413428 0.5276519541 300 0.1504819067 38 | 0.9991166078 0.9929328622 0.7840000000 0.7382297552 0.8347296268 0.8033536585 0.7726767864 0.7674074074 4.2402826855 0.3625088304 600 0.1520125484 39 | 0.9982332155 0.9964664311 0.7924705882 0.7269303202 0.8244478294 0.7393292683 0.6823398741 0.6948148148 6.3604240283 0.3448445238 900 0.1523122589 40 | 1.0000000000 0.9964664311 0.8089411765 0.7702448211 0.8434881950 0.7743902439 0.7652721214 0.7614814815 7.0671378092 0.3251545057 1000 0.1789008522 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/d093618124c5748762707da1c6804d75/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/d093618124c5748762707da1c6804d75/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/d093618124c5748762707da1c6804d75/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/d093618124c5748762707da1c6804d75/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/d093618124c5748762707da1c6804d75 18 | save_model_every_checkpoint: False 19 | seed: 794352299 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [3] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.5768551237 0.6007067138 0.4447058824 0.4783427495 0.3591012947 0.3506097561 0.4116993706 0.4074074074 0.0000000000 1.8192925453 0 1.2970163822 37 | 0.9982332155 0.9964664311 0.8131764706 0.7645951036 0.8709063214 0.7881097561 0.7789707516 0.7911111111 10.335689045 0.4202846115 300 0.6344150265 38 | 1.0000000000 1.0000000000 0.8696470588 0.7683615819 0.9017517136 0.7759146341 0.7900777490 0.7733333333 20.671378091 0.2483884268 600 0.6257179348 39 | 0.9982332155 0.9929328622 0.9176470588 0.7419962335 0.9569687738 0.7926829268 0.7367641614 0.7170370370 31.007067137 0.1585837312 900 0.6380308660 40 | 1.0000000000 0.9964664311 0.9162352941 0.7476459510 0.9760091394 0.7881097561 0.7726767864 0.7629629630 34.452296819 0.1284457469 1000 0.6444939446 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ea7d2d5149dd9167b364d433bb355be1/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ea7d2d5149dd9167b364d433bb355be1/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/ea7d2d5149dd9167b364d433bb355be1/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ea7d2d5149dd9167b364d433bb355be1/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 0 17 | output_dir: domainbed/misc/test_sweep_data/ea7d2d5149dd9167b364d433bb355be1 18 | save_model_every_checkpoint: False 19 | seed: 560039459 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 1] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 32 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 5e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.0 34 | weight_decay: 0.0 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.5901060071 0.5971731449 0.3717647059 0.3804143126 0.3865194212 0.3810975610 0.4368752314 0.4414814815 0.0000000000 1.6423946619 0 1.4854812622 37 | 0.9885159011 0.9964664311 0.6032941176 0.5988700565 0.8735719726 0.7621951220 0.9078119215 0.8311111111 8.4805653710 0.4036260696 300 0.2337139837 38 | 0.9743816254 0.9752650177 0.6470588235 0.6478342750 0.9367859863 0.8094512195 0.9500185117 0.8592592593 16.961130742 0.2497328627 600 0.2362791340 39 | 0.9743816254 0.9858657244 0.6000000000 0.5932203390 0.9520182788 0.7881097561 0.9700111070 0.8355555556 25.441696113 0.1506629159 900 0.2351563136 40 | 0.9655477032 0.9717314488 0.6277647059 0.6327683616 0.9748667174 0.7865853659 0.9759348389 0.8370370370 28.268551236 0.1228756825 1000 0.2404113364 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ee8f05db2b9ae5a36273cc0d2161f8c0/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ee8f05db2b9ae5a36273cc0d2161f8c0/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/ee8f05db2b9ae5a36273cc0d2161f8c0/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/ee8f05db2b9ae5a36273cc0d2161f8c0/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/ee8f05db2b9ae5a36273cc0d2161f8c0 18 | save_model_every_checkpoint: False 19 | seed: 901962056 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [2] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 39 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.7028930742148706e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 0.00044832883881609976 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.6130742049 0.6325088339 0.4564705882 0.4896421846 0.3865194212 0.3826219512 0.4357645317 0.4222222222 0.0000000000 1.5491540432 0 1.1524951458 37 | 1.0000000000 1.0000000000 0.8183529412 0.7664783427 0.7117288652 0.6890243902 0.9059607553 0.8518518519 10.335689045 0.3856955582 300 0.6124396364 38 | 0.9991166078 1.0000000000 0.8541176471 0.7401129944 0.7520944402 0.7439024390 0.9489078119 0.8444444444 20.671378091 0.2231503439 600 0.6130792896 39 | 0.9982332155 0.9964664311 0.9261176471 0.7777777778 0.6984006093 0.6753048780 0.9603850426 0.8237037037 31.007067137 0.1381842596 900 0.6130368471 40 | 1.0000000000 1.0000000000 0.9449411765 0.7683615819 0.7349581112 0.7195121951 0.9766753054 0.8562962963 34.452296819 0.1258270860 1000 0.6275756717 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/f61766414e6b0db40063d7bc4ecdaa2b/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/f61766414e6b0db40063d7bc4ecdaa2b/err.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/misc/test_sweep_data/f61766414e6b0db40063d7bc4ecdaa2b/err.txt -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/f61766414e6b0db40063d7bc4ecdaa2b/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.6 3 | PyTorch: 1.7.0 4 | Torchvision: 0.8.1 5 | CUDA: 9.2 6 | CUDNN: 7603 7 | NumPy: 1.19.4 8 | PIL: 8.1.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: /checkpoint/dlp/datasets_new 13 | dataset: VLCS 14 | holdout_fraction: 0.2 15 | hparams: None 16 | hparams_seed: 1 17 | output_dir: domainbed/misc/test_sweep_data/f61766414e6b0db40063d7bc4ecdaa2b 18 | save_model_every_checkpoint: False 19 | seed: 512619814 20 | skip_model_save: False 21 | steps: 1001 22 | task: domain_generalization 23 | test_envs: [0, 2] 24 | trial_seed: 1 25 | uda_holdout_fraction: 0 26 | HParams: 27 | batch_size: 8 28 | class_balanced: False 29 | data_augmentation: True 30 | lr: 2.2352558725944602e-05 31 | nonlinear_classifier: False 32 | resnet18: False 33 | resnet_dropout: 0.5 34 | weight_decay: 1.9967320578799288e-06 35 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 36 | 0.0662544170 0.0459363958 0.2922352941 0.2617702448 0.2859862909 0.2530487805 0.1899296557 0.1940740741 0.0000000000 1.7476719618 0 1.3853642941 37 | 0.9779151943 0.9681978799 0.7562352941 0.7438794727 0.6637471439 0.7012195122 0.8293224732 0.7807407407 2.1201413428 0.7123324679 300 0.1359348575 38 | 0.9885159011 0.9752650177 0.7821176471 0.7777777778 0.7124904798 0.7088414634 0.8704183636 0.8148148148 4.2402826855 0.5137957147 600 0.1346128742 39 | 0.9637809187 0.9646643110 0.7891764706 0.7382297552 0.6774562072 0.6981707317 0.8685671973 0.8118518519 6.3604240283 0.4774057284 900 0.1330896823 40 | 0.9646643110 0.9505300353 0.7680000000 0.7363465160 0.7696115765 0.7987804878 0.8870788597 0.8370370370 7.0671378092 0.4129467555 1000 0.1624276757 41 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_data/results.txt: -------------------------------------------------------------------------------- 1 | Total records: 200 2 | 3 | -------- Dataset: VLCS, model selection method: training-domain validation set 4 | Algorithm C L S V Avg 5 | ERM 98.0 +/- 0.2 64.2 +/- 0.8 74.1 +/- 0.4 77.1 +/- 0.2 78.3 6 | 7 | -------- Averages, model selection method: training-domain validation set 8 | Algorithm VLCS Avg 9 | ERM 78.3 +/- 0.0 78.3 10 | 11 | -------- Dataset: VLCS, model selection method: leave-one-domain-out cross-validation 12 | Algorithm C L S V Avg 13 | ERM 96.9 +/- 1.0 64.4 +/- 0.9 70.5 +/- 0.5 76.7 +/- 0.1 77.1 14 | 15 | -------- Averages, model selection method: leave-one-domain-out cross-validation 16 | Algorithm VLCS Avg 17 | ERM 77.1 +/- 0.1 77.1 18 | 19 | -------- Dataset: VLCS, model selection method: test-domain validation set (oracle) 20 | Algorithm C L S V Avg 21 | ERM 96.9 +/- 1.0 65.9 +/- 0.5 71.6 +/- 1.3 76.9 +/- 0.3 77.8 22 | 23 | -------- Averages, model selection method: test-domain validation set (oracle) 24 | Algorithm VLCS Avg 25 | ERM 77.8 +/- 0.3 77.8 26 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/misc/test_sweep_results.txt: -------------------------------------------------------------------------------- 1 | Total records: 200 2 | 3 | -------- Dataset: VLCS, model selection method: training-domain validation set 4 | Algorithm C L S V Avg 5 | ERM 98.0 +/- 0.2 64.2 +/- 0.8 74.1 +/- 0.4 77.1 +/- 0.2 78.3 6 | 7 | -------- Averages, model selection method: training-domain validation set 8 | Algorithm VLCS Avg 9 | ERM 78.3 +/- 0.0 78.3 10 | 11 | -------- Dataset: VLCS, model selection method: leave-one-domain-out cross-validation 12 | Algorithm C L S V Avg 13 | ERM 96.9 +/- 1.0 64.4 +/- 0.9 70.5 +/- 0.5 76.7 +/- 0.1 77.1 14 | 15 | -------- Averages, model selection method: leave-one-domain-out cross-validation 16 | Algorithm VLCS Avg 17 | ERM 77.1 +/- 0.1 77.1 18 | 19 | -------- Dataset: VLCS, model selection method: test-domain validation set (oracle) 20 | Algorithm C L S V Avg 21 | ERM 96.9 +/- 1.0 65.9 +/- 0.5 71.6 +/- 1.3 76.9 +/- 0.3 77.8 22 | 23 | -------- Averages, model selection method: test-domain validation set (oracle) 24 | Algorithm VLCS Avg 25 | ERM 77.8 +/- 0.3 77.8 26 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/modeling/mixstyle.py: -------------------------------------------------------------------------------- 1 | import random 2 | from contextlib import contextmanager 3 | import torch 4 | import torch.nn as nn 5 | 6 | 7 | def deactivate_mixstyle(m): 8 | if type(m) == MixStyle: 9 | m.set_activation_status(False) 10 | 11 | 12 | def activate_mixstyle(m): 13 | if type(m) == MixStyle: 14 | m.set_activation_status(True) 15 | 16 | 17 | def random_mixstyle(m): 18 | if type(m) == MixStyle: 19 | m.update_mix_method('random') 20 | 21 | 22 | def crossdomain_mixstyle(m): 23 | if type(m) == MixStyle: 24 | m.update_mix_method('crossdomain') 25 | 26 | 27 | @contextmanager 28 | def run_without_mixstyle(model): 29 | # Assume MixStyle was initially activated 30 | try: 31 | model.apply(deactivate_mixstyle) 32 | yield 33 | finally: 34 | model.apply(activate_mixstyle) 35 | 36 | 37 | @contextmanager 38 | def run_with_mixstyle(model, mix=None): 39 | # Assume MixStyle was initially deactivated 40 | if mix == 'random': 41 | model.apply(random_mixstyle) 42 | 43 | elif mix == 'crossdomain': 44 | model.apply(crossdomain_mixstyle) 45 | 46 | try: 47 | model.apply(activate_mixstyle) 48 | yield 49 | finally: 50 | model.apply(deactivate_mixstyle) 51 | 52 | 53 | class MixStyle(nn.Module): 54 | """MixStyle. 55 | 56 | Reference: 57 | Zhou et al. Domain Generalization with MixStyle. ICLR 2021. 58 | """ 59 | 60 | def __init__(self, p=0.5, alpha=0.1, eps=1e-6, mix='random'): 61 | """ 62 | Args: 63 | p (float): probability of using MixStyle. 64 | alpha (float): parameter of the Beta distribution. 65 | eps (float): scaling parameter to avoid numerical issues. 66 | mix (str): how to mix. 67 | """ 68 | super().__init__() 69 | self.p = p 70 | self.beta = torch.distributions.Beta(alpha, alpha) 71 | self.eps = eps 72 | self.alpha = alpha 73 | self.mix = mix 74 | self._activated = True 75 | 76 | def __repr__(self): 77 | return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})' 78 | 79 | def set_activation_status(self, status=True): 80 | self._activated = status 81 | 82 | def update_mix_method(self, mix='random'): 83 | self.mix = mix 84 | 85 | def forward(self, x): 86 | if not self.training or not self._activated: 87 | return x 88 | 89 | if random.random() > self.p: 90 | return x 91 | 92 | B = x.size(0) 93 | 94 | mu = x.mean(dim=[2, 3], keepdim=True) 95 | var = x.var(dim=[2, 3], keepdim=True) 96 | sig = (var + self.eps).sqrt() 97 | mu, sig = mu.detach(), sig.detach() 98 | x_normed = (x-mu) / sig 99 | 100 | lmda = self.beta.sample((B, 1, 1, 1)) 101 | lmda = lmda.to(x.device) 102 | 103 | if self.mix == 'random': 104 | # random shuffle 105 | perm = torch.randperm(B) 106 | 107 | elif self.mix == 'crossdomain': 108 | # split into two halves and swap the order 109 | perm = torch.arange(B - 1, -1, -1) # inverse index 110 | perm_b, perm_a = perm.chunk(2) 111 | perm_b = perm_b[torch.randperm(B // 2)] 112 | perm_a = perm_a[torch.randperm(B // 2)] 113 | perm = torch.cat([perm_b, perm_a], 0) 114 | 115 | else: 116 | raise NotImplementedError 117 | 118 | mu2, sig2 = mu[perm], sig[perm] 119 | mu_mix = mu*lmda + mu2 * (1-lmda) 120 | sig_mix = sig*lmda + sig2 * (1-lmda) 121 | 122 | return x_normed*sig_mix + mu_mix 123 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/results/2020_10_06_7df6f06/results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/methods/TAST/domainbed/results/2020_10_06_7df6f06/results.png -------------------------------------------------------------------------------- /methods/TAST/domainbed/scripts/save_images.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | """ 4 | Save some representative images from each dataset to disk. 5 | """ 6 | import random 7 | import torch 8 | import argparse 9 | from domainbed import hparams_registry 10 | from domainbed import datasets 11 | import imageio 12 | import os 13 | from tqdm import tqdm 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser(description='Domain generalization') 17 | parser.add_argument('--data_dir', type=str) 18 | parser.add_argument('--output_dir', type=str) 19 | args = parser.parse_args() 20 | 21 | os.makedirs(args.output_dir, exist_ok=True) 22 | datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO'] 23 | 24 | for dataset_name in tqdm(datasets_to_save): 25 | hparams = hparams_registry.default_hparams('ERM', dataset_name) 26 | dataset = datasets.get_dataset_class(dataset_name)( 27 | args.data_dir, 28 | list(range(datasets.num_environments(dataset_name))), 29 | hparams) 30 | for env_idx, env in enumerate(tqdm(dataset)): 31 | for i in tqdm(range(50)): 32 | idx = random.choice(list(range(len(env)))) 33 | x, y = env[idx] 34 | while y > 10: 35 | idx = random.choice(list(range(len(env)))) 36 | x, y = env[idx] 37 | if x.shape[0] == 2: 38 | x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:] 39 | if x.min() < 0: 40 | mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None] 41 | std = torch.tensor([0.229, 0.224, 0.225])[:,None,None] 42 | x = (x * std) + mean 43 | assert(x.min() >= 0) 44 | assert(x.max() <= 1) 45 | x = (x * 255.99) 46 | x = x.numpy().astype('uint8').transpose(1,2,0) 47 | imageio.imwrite( 48 | os.path.join(args.output_dir, 49 | f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'), 50 | x) 51 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | 4 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import torch 4 | 5 | DEBUG_DATASETS = ['Debug28', 'Debug224'] 6 | 7 | def make_minibatches(dataset, batch_size): 8 | """Test helper to make a minibatches array like train.py""" 9 | minibatches = [] 10 | for env in dataset: 11 | X = torch.stack([env[i][0] for i in range(batch_size)]).cuda() 12 | y = torch.stack([torch.as_tensor(env[i][1]) 13 | for i in range(batch_size)]).cuda() 14 | minibatches.append((X, y)) 15 | return minibatches 16 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/lib/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | 4 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/lib/test_misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import unittest 4 | from domainbed.lib import misc 5 | 6 | class TestMisc(unittest.TestCase): 7 | 8 | def test_make_weights_for_balanced_classes(self): 9 | dataset = [('A', 0), ('B', 1), ('C', 0), ('D', 2), ('E', 3), ('F', 0)] 10 | result = misc.make_weights_for_balanced_classes(dataset) 11 | self.assertEqual(result.sum(), 1) 12 | self.assertEqual(result[0], result[2]) 13 | self.assertEqual(result[1], result[3]) 14 | self.assertEqual(3 * result[0], result[1]) 15 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/lib/test_query.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import unittest 4 | from domainbed.lib.query import Q, make_selector_fn 5 | 6 | class TestQuery(unittest.TestCase): 7 | def test_everything(self): 8 | numbers = Q([1, 4, 2]) 9 | people = Q([ 10 | {'name': 'Bob', 'age': 40}, 11 | {'name': 'Alice', 'age': 20}, 12 | {'name': 'Bob', 'age': 10} 13 | ]) 14 | 15 | self.assertEqual(numbers.select(lambda x: 2*x), [2, 8, 4]) 16 | 17 | self.assertEqual(numbers.min(), 1) 18 | self.assertEqual(numbers.max(), 4) 19 | self.assertEqual(numbers.mean(), 7/3) 20 | 21 | self.assertEqual(people.select('name'), ['Bob', 'Alice', 'Bob']) 22 | 23 | self.assertEqual( 24 | set(people.group('name').map(lambda _,g: g.select('age').mean())), 25 | set([25, 20]) 26 | ) 27 | 28 | self.assertEqual(people.argmax('age'), people[0]) 29 | 30 | def test_group_by_unhashable(self): 31 | jobs = Q([ 32 | {'hparams': {1:2}, 'score': 3}, 33 | {'hparams': {1:2}, 'score': 4}, 34 | {'hparams': {2:4}, 'score': 5} 35 | ]) 36 | grouped = jobs.group('hparams') 37 | self.assertEqual(grouped, [ 38 | ({1:2}, [jobs[0], jobs[1]]), 39 | ({2:4}, [jobs[2]]) 40 | ]) 41 | 42 | def test_comma_selector(self): 43 | struct = {'a': {'b': 1}, 'c': 2} 44 | fn = make_selector_fn('a.b,c') 45 | self.assertEqual(fn(struct), (1, 2)) 46 | 47 | def test_unique(self): 48 | numbers = Q([1,2,1,3,2,1,3,1,2,3]) 49 | self.assertEqual(numbers.unique(), [1,2,3]) 50 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | 4 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/scripts/test_collect_results.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import argparse 4 | import itertools 5 | import json 6 | import os 7 | import subprocess 8 | import sys 9 | import time 10 | import unittest 11 | import uuid 12 | 13 | import torch 14 | 15 | from domainbed import datasets 16 | from domainbed import hparams_registry 17 | from domainbed import algorithms 18 | from domainbed import networks 19 | from domainbed.test import helpers 20 | from domainbed.scripts import collect_results 21 | 22 | from parameterized import parameterized 23 | import io 24 | import textwrap 25 | 26 | class TestCollectResults(unittest.TestCase): 27 | 28 | def test_format_mean(self): 29 | self.assertEqual( 30 | collect_results.format_mean([0.1, 0.2, 0.3], False)[2], 31 | '20.0 +/- 4.7') 32 | self.assertEqual( 33 | collect_results.format_mean([0.1, 0.2, 0.3], True)[2], 34 | '20.0 $\pm$ 4.7') 35 | 36 | def test_print_table_non_latex(self): 37 | temp_out = io.StringIO() 38 | sys.stdout = temp_out 39 | table = [['1', '2'], ['3', '4']] 40 | collect_results.print_table(table, 'Header text', ['R1', 'R2'], 41 | ['C1', 'C2'], colwidth=10, latex=False) 42 | sys.stdout = sys.__stdout__ 43 | self.assertEqual( 44 | temp_out.getvalue(), 45 | textwrap.dedent(""" 46 | -------- Header text 47 | C1 C2 48 | R1 1 2 49 | R2 3 4 50 | """) 51 | ) 52 | 53 | def test_print_table_latex(self): 54 | temp_out = io.StringIO() 55 | sys.stdout = temp_out 56 | table = [['1', '2'], ['3', '4']] 57 | collect_results.print_table(table, 'Header text', ['R1', 'R2'], 58 | ['C1', 'C2'], colwidth=10, latex=True) 59 | sys.stdout = sys.__stdout__ 60 | self.assertEqual( 61 | temp_out.getvalue(), 62 | textwrap.dedent(r""" 63 | \begin{center} 64 | \adjustbox{max width=\textwidth}{% 65 | \begin{tabular}{lcc} 66 | \toprule 67 | \textbf{C1 & \textbf{C2 \\ 68 | \midrule 69 | R1 & 1 & 2 \\ 70 | R2 & 3 & 4 \\ 71 | \bottomrule 72 | \end{tabular}} 73 | \end{center} 74 | """) 75 | ) 76 | 77 | def test_get_grouped_records(self): 78 | pass # TODO 79 | 80 | def test_print_results_tables(self): 81 | pass # TODO 82 | 83 | def test_load_records(self): 84 | pass # TODO 85 | 86 | def test_end_to_end(self): 87 | """ 88 | Test that collect_results.py's output matches a manually-verified 89 | ground-truth when run on a given directory of test sweep data. 90 | 91 | If you make any changes to the output of collect_results.py, you'll need 92 | to update the ground-truth and manually verify that it's still 93 | correct. The command used to update the ground-truth is: 94 | 95 | python -m domainbed.scripts.collect_results --input_dir=domainbed/misc/test_sweep_data \ 96 | | tee domainbed/misc/test_sweep_results.txt 97 | 98 | Furthermore, if you make any changes to the data format, you'll also 99 | need to rerun the test sweep. The command used to run the test sweep is: 100 | 101 | python -m domainbed.scripts.sweep launch --data_dir=$DATA_DIR \ 102 | --output_dir=domainbed/misc/test_sweep_data --algorithms ERM \ 103 | --datasets VLCS --steps 1001 --n_hparams 2 --n_trials 2 \ 104 | --command_launcher local 105 | """ 106 | result = subprocess.run('python -m domainbed.scripts.collect_results' 107 | ' --input_dir=domainbed/misc/test_sweep_data', shell=True, 108 | stdout=subprocess.PIPE) 109 | 110 | with open('domainbed/misc/test_sweep_results.txt', 'r') as f: 111 | ground_truth = f.read() 112 | 113 | self.assertEqual(result.stdout.decode('utf8'), ground_truth) 114 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/scripts/test_train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | # import argparse 4 | # import itertools 5 | import json 6 | import os 7 | import subprocess 8 | # import sys 9 | # import time 10 | import unittest 11 | import uuid 12 | 13 | import torch 14 | 15 | # import datasets 16 | # import hparams_registry 17 | # import algorithms 18 | # import networks 19 | # from parameterized import parameterized 20 | 21 | # import test.helpers 22 | 23 | class TestTrain(unittest.TestCase): 24 | 25 | @unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment ' 26 | 'variable') 27 | def test_end_to_end(self): 28 | """Test that train.py successfully completes one step""" 29 | output_dir = os.path.join('/tmp', str(uuid.uuid4())) 30 | os.makedirs(output_dir, exist_ok=True) 31 | 32 | subprocess.run(f'python -m domainbed.scripts.train --dataset RotatedMNIST ' 33 | f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} ' 34 | f'--steps=501', shell=True) 35 | 36 | with open(os.path.join(output_dir, 'results.jsonl')) as f: 37 | lines = [l[:-1] for l in f] 38 | last_epoch = json.loads(lines[-1]) 39 | self.assertEqual(last_epoch['step'], 500) 40 | # Conservative values; anything lower and something's likely wrong. 41 | self.assertGreater(last_epoch['env0_in_acc'], 0.80) 42 | self.assertGreater(last_epoch['env1_in_acc'], 0.95) 43 | self.assertGreater(last_epoch['env2_in_acc'], 0.95) 44 | self.assertGreater(last_epoch['env3_in_acc'], 0.95) 45 | self.assertGreater(last_epoch['env3_in_acc'], 0.95) 46 | 47 | with open(os.path.join(output_dir, 'out.txt')) as f: 48 | text = f.read() 49 | self.assertTrue('500' in text) 50 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/test_datasets.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | """Unit tests.""" 4 | 5 | import argparse 6 | import itertools 7 | import json 8 | import os 9 | import subprocess 10 | import sys 11 | import time 12 | import unittest 13 | import uuid 14 | 15 | import torch 16 | 17 | from domainbed import datasets 18 | from domainbed import hparams_registry 19 | from domainbed import algorithms 20 | from domainbed import networks 21 | 22 | from parameterized import parameterized 23 | 24 | from domainbed.test import helpers 25 | 26 | class TestDatasets(unittest.TestCase): 27 | 28 | @parameterized.expand(itertools.product(datasets.DATASETS)) 29 | @unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment ' 30 | 'variable') 31 | def test_dataset_erm(self, dataset_name): 32 | """ 33 | Test that ERM can complete one step on a given dataset without raising 34 | an error. 35 | Also test that num_environments() works correctly. 36 | """ 37 | batch_size = 8 38 | hparams = hparams_registry.default_hparams('ERM', dataset_name) 39 | dataset = datasets.get_dataset_class(dataset_name)( 40 | os.environ['DATA_DIR'], [], hparams) 41 | self.assertEqual(datasets.num_environments(dataset_name), 42 | len(dataset)) 43 | algorithm = algorithms.get_algorithm_class('ERM')( 44 | dataset.input_shape, 45 | dataset.num_classes, 46 | len(dataset), 47 | hparams).cuda() 48 | minibatches = helpers.make_minibatches(dataset, batch_size) 49 | algorithm.update(minibatches) 50 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/test_hparams_registry.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import unittest 4 | import itertools 5 | 6 | from domainbed import hparams_registry 7 | from domainbed import datasets 8 | from domainbed import algorithms 9 | 10 | from parameterized import parameterized 11 | 12 | class TestHparamsRegistry(unittest.TestCase): 13 | 14 | @parameterized.expand(itertools.product(algorithms.ALGORITHMS, datasets.DATASETS)) 15 | def test_random_hparams_deterministic(self, algorithm_name, dataset_name): 16 | """Test that hparams_registry.random_hparams is deterministic""" 17 | a = hparams_registry.random_hparams(algorithm_name, dataset_name, 0) 18 | b = hparams_registry.random_hparams(algorithm_name, dataset_name, 0) 19 | self.assertEqual(a.keys(), b.keys()) 20 | for key in a.keys(): 21 | self.assertEqual(a[key], b[key], key) 22 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/test_models.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | """Unit tests.""" 4 | 5 | import argparse 6 | import itertools 7 | import json 8 | import os 9 | import subprocess 10 | import sys 11 | import time 12 | import unittest 13 | import uuid 14 | 15 | import torch 16 | 17 | from domainbed import datasets 18 | from domainbed import hparams_registry 19 | from domainbed import algorithms 20 | from domainbed import networks 21 | from domainbed.test import helpers 22 | 23 | from parameterized import parameterized 24 | 25 | 26 | class TestAlgorithms(unittest.TestCase): 27 | 28 | @parameterized.expand(itertools.product(helpers.DEBUG_DATASETS, algorithms.ALGORITHMS)) 29 | def test_init_update_predict(self, dataset_name, algorithm_name): 30 | """Test that a given algorithm inits, updates and predicts without raising 31 | errors.""" 32 | batch_size = 8 33 | hparams = hparams_registry.default_hparams(algorithm_name, dataset_name) 34 | dataset = datasets.get_dataset_class(dataset_name)('', [], hparams) 35 | minibatches = helpers.make_minibatches(dataset, batch_size) 36 | algorithm_class = algorithms.get_algorithm_class(algorithm_name) 37 | algorithm = algorithm_class(dataset.input_shape, dataset.num_classes, len(dataset), 38 | hparams).cuda() 39 | for _ in range(3): 40 | self.assertIsNotNone(algorithm.update(minibatches)) 41 | algorithm.eval() 42 | self.assertEqual(list(algorithm.predict(minibatches[0][0]).shape), 43 | [batch_size, dataset.num_classes]) 44 | -------------------------------------------------------------------------------- /methods/TAST/domainbed/test/test_networks.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | 3 | import argparse 4 | import itertools 5 | import json 6 | import os 7 | import subprocess 8 | import sys 9 | import time 10 | import unittest 11 | import uuid 12 | 13 | import torch 14 | 15 | from domainbed import datasets 16 | from domainbed import hparams_registry 17 | from domainbed import algorithms 18 | from domainbed import networks 19 | from domainbed.test import helpers 20 | 21 | from parameterized import parameterized 22 | 23 | 24 | class TestNetworks(unittest.TestCase): 25 | 26 | @parameterized.expand(itertools.product(helpers.DEBUG_DATASETS)) 27 | def test_featurizer(self, dataset_name): 28 | """Test that Featurizer() returns a module which can take a 29 | correctly-sized input and return a correctly-sized output.""" 30 | batch_size = 8 31 | hparams = hparams_registry.default_hparams('ERM', dataset_name) 32 | dataset = datasets.get_dataset_class(dataset_name)('', [], hparams) 33 | input_ = helpers.make_minibatches(dataset, batch_size)[0][0] 34 | input_shape = dataset.input_shape 35 | algorithm = networks.Featurizer(input_shape, hparams).cuda() 36 | output = algorithm(input_) 37 | self.assertEqual(list(output.shape), [batch_size, algorithm.n_outputs]) 38 | -------------------------------------------------------------------------------- /methods/TAST/scripts/hparam_search.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # $1 : backbone 5 | # $2 : datasets 6 | # $3 : algorithms (ERM, CORAL, etc) 7 | # 8 | # 9 | # sh scripts/hparam_search.sh resnet50 PACS ERM 10 | # sh scripts/hparam_search.sh resnet50 PACS CORAL # different base algorithm 11 | 12 | 13 | python -m domainbed.scripts.sweep delete_incomplete\ 14 | --data_dir=./dataset \ 15 | --output_dir=./sweep_hparam/$1 \ 16 | --command_launcher multi_gpu\ 17 | --algorithms $3\ 18 | --datasets $2\ 19 | --n_hparams_from 0 \ 20 | --n_hparams 20 \ 21 | --n_trials_from 0 \ 22 | --n_trials 3 \ 23 | --single_test_envs \ 24 | --hparams "{\"backbone\": \"$1\"}" \ 25 | --skip_confirmation 26 | 27 | python -m domainbed.scripts.sweep launch\ 28 | --data_dir=./dataset \ 29 | --output_dir=./sweep_hparam/$1 \ 30 | --command_launcher multi_gpu\ 31 | --algorithms $3\ 32 | --datasets $2\ 33 | --n_hparams_from 0 \ 34 | --n_hparams 20\ 35 | --n_trials_from 0 \ 36 | --n_trials 3 \ 37 | --single_test_envs \ 38 | --hparams "{\"backbone\": \"$1\"}" \ 39 | --skip_confirmation 40 | 41 | python -m domainbed.scripts.sweep unsupervised_adaptation\ 42 | --data_dir=./dataset \ 43 | --output_dir=./sweep_hparam/$1 \ 44 | --command_launcher multi_gpu\ 45 | --algorithms $3\ 46 | --datasets $2\ 47 | --n_hparams_from 0 \ 48 | --n_hparams 20\ 49 | --n_trials_from 0 \ 50 | --n_trials 3 \ 51 | --single_test_envs \ 52 | --hparams "{\"backbone\": \"$1\"}" \ 53 | --skip_confirmation -------------------------------------------------------------------------------- /methods/TAST/scripts/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin.sh $6 2 | 3 | # 4 | # $1 : mode (pretrain/sup/unsup) 5 | # $2 : backbone name (resnet50, ViT-B16, etc) 6 | # $3 : n_traials_from 7 | # $4 : n_trials 8 | # $5 : command launcher (local/multi_gpu) 9 | # 10 | # 11 | # sh scripts/launch.sh pretrain resnet50 10 3 local 12 | # sh scripts/launch.sh sup resnet50 10 3 local 13 | # sh scripts/launch.sh unsup resnet50 10 3 local 14 | 15 | 16 | if [ $1 = "unsup" ]; then 17 | echo "unsupervised adaptation" 18 | sh scripts/unsup.sh $2 PACS $3 $4 $5 19 | sh scripts/unsup.sh $2 VLCS $3 $4 $5 20 | sh scripts/unsup.sh $2 OfficeHome $3 $4 $5 21 | sh scripts/unsup.sh $2 TerraIncognita $3 $4 $5 22 | elif [ $1 = "sup" ]; then 23 | echo "supervised adaptation" 24 | sh scripts/sup.sh $2 PACS $3 $4 $5 25 | sh scripts/sup.sh $2 VLCS $3 $4 $5 26 | sh scripts/sup.sh $2 OfficeHome $3 $4 $5 27 | sh scripts/sup.sh $2 TerraIncognita $3 $4 $5 28 | elif [ $1 = "pretrain" ]; then 29 | echo "pretraining the ERM model" 30 | sh scripts/source.sh $2 PACS $3 $4 $5 31 | sh scripts/source.sh $2 VLCS $3 $4 $5 32 | sh scripts/source.sh $2 OfficeHome $3 $4 $5 33 | sh scripts/source.sh $2 TerraIncognita $3 $4 $5 34 | else 35 | echo "Invalid option" 36 | fi 37 | -------------------------------------------------------------------------------- /methods/TAST/scripts/source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | nvidia-smi 3 | 4 | python -m domainbed.scripts.sweep delete_incomplete\ 5 | --data_dir=./dataset \ 6 | --output_dir=./sweep/$1 \ 7 | --command_launcher $5\ 8 | --algorithms ERM\ 9 | --datasets $2\ 10 | --n_hparams 1\ 11 | --n_trials_from $3 \ 12 | --n_trials $4 \ 13 | --single_test_envs \ 14 | --hparams "{\"backbone\": \"$1\"}" \ 15 | --skip_confirmation 16 | 17 | python -m domainbed.scripts.sweep launch\ 18 | --data_dir=./dataset \ 19 | --output_dir=./sweep/$1 \ 20 | --command_launcher $5\ 21 | --algorithms ERM\ 22 | --datasets $2\ 23 | --n_hparams 1\ 24 | --n_trials_from $3 \ 25 | --n_trials $4 \ 26 | --single_test_envs \ 27 | --hparams "{\"backbone\": \"$1\"}" \ 28 | --skip_confirmation 29 | -------------------------------------------------------------------------------- /methods/TAST/scripts/sup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python -m domainbed.scripts.sweep supervised_adaptation\ 4 | --data_dir=./dataset \ 5 | --output_dir=./sweep/$1 \ 6 | --command_launcher $5\ 7 | --algorithms ERM\ 8 | --datasets $2\ 9 | --n_hparams 1\ 10 | --n_trials_from $3 \ 11 | --n_trials $4 \ 12 | --single_test_envs \ 13 | --hparams "{\"backbone\": \"$1\"}" \ 14 | --skip_confirmation 15 | 16 | -------------------------------------------------------------------------------- /methods/TAST/scripts/unsup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | nvidia-smi 3 | 4 | 5 | python -m domainbed.scripts.sweep unsupervised_adaptation\ 6 | --data_dir=./dataset \ 7 | --output_dir=./sweep/$1 \ 8 | --command_launcher multi_gpu\ 9 | --algorithms ERM\ 10 | --datasets $2\ 11 | --n_hparams 1\ 12 | --n_trials_from $3 \ 13 | --n_trials $4 \ 14 | --single_test_envs \ 15 | --hparams "{\"backbone\": \"$1\"}" \ 16 | --skip_confirmation 17 | -------------------------------------------------------------------------------- /methods/TAST/test_log/done: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/test_log/done_T3A: -------------------------------------------------------------------------------- 1 | done -------------------------------------------------------------------------------- /methods/TAST/test_log/out.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.4 3 | PyTorch: 1.8.1+cu102 4 | Torchvision: 0.9.1+cu102 5 | CUDA: 10.2 6 | CUDNN: 7605 7 | NumPy: 1.20.3 8 | PIL: 8.2.0 9 | Args: 10 | algorithm: ERM 11 | checkpoint_freq: None 12 | data_dir: ./dataset 13 | dataset: PACS 14 | holdout_fraction: 0.2 15 | hparams: {"backbone": "resnet50"} 16 | hparams_seed: 0 17 | output_dir: test_log 18 | save_model_every_checkpoint: False 19 | seed: 0 20 | skip_model_save: False 21 | steps: None 22 | task: domain_generalization 23 | test_envs: [0] 24 | trial_seed: 0 25 | uda_holdout_fraction: 0 26 | HParams: 27 | backbone: resnet50 28 | batch_size: 32 29 | class_balanced: False 30 | data_augmentation: True 31 | lr: 5e-05 32 | nonlinear_classifier: False 33 | resnet18: False 34 | resnet_dropout: 0.0 35 | weight_decay: 0.0 36 | env0_in_acc env0_out_acc env1_in_acc env1_out_acc env2_in_acc env2_out_acc env3_in_acc env3_out_acc epoch loss step step_time 37 | 0.1348383160 0.1613691932 0.2084221748 0.1880341880 0.1085329341 0.1077844311 0.2465012723 0.2560509554 0.0000000000 2.0145323277 0 0.5652303696 38 | 0.8157413057 0.7946210269 0.9701492537 0.9401709402 0.9970059880 0.9760479042 0.9570610687 0.9312101911 7.1856287425 0.2431723745 300 0.3051173544 39 | 0.8084197682 0.7946210269 0.9925373134 0.9487179487 0.9955089820 0.9730538922 0.9850508906 0.9426751592 14.371257485 0.0595255844 600 0.3056812501 40 | 0.8639414277 0.8410757946 0.9962686567 0.9529914530 0.9985029940 0.9850299401 0.9888676845 0.9503184713 21.556886227 0.0358117026 900 0.3060787042 41 | 0.8596705308 0.8679706601 0.9920042644 0.9658119658 0.9992514970 0.9700598802 0.9898218830 0.9439490446 28.742514970 0.0245259488 1200 0.3063416107 42 | 0.8657718121 0.8606356968 0.9978678038 0.9572649573 0.9985029940 0.9670658683 0.9961832061 0.9528662420 35.928143712 0.0199237736 1500 0.3064441387 43 | 0.8029286150 0.8117359413 0.9845415778 0.9487179487 0.9977544910 0.9670658683 0.9856870229 0.9528662420 43.113772455 0.0186860652 1800 0.3063919266 44 | 0.7870652837 0.7921760391 0.9941364606 0.9423076923 0.9977544910 0.9580838323 0.9933206107 0.9515923567 50.299401197 0.0189342222 2100 0.3065292684 45 | 0.8090298963 0.8288508557 0.9946695096 0.9636752137 0.9985029940 0.9700598802 0.9971374046 0.9592356688 57.485029940 0.0168044657 2400 0.3063244462 46 | 0.8804148871 0.8997555012 0.9893390192 0.9444444444 0.9970059880 0.9640718563 0.9945928753 0.9439490446 64.670658682 0.0136970092 2700 0.3064418070 47 | 0.8700427090 0.8728606357 0.9941364606 0.9529914530 0.9977544910 0.9730538922 0.9898218830 0.9375796178 71.856287425 0.0146767327 3000 0.3062377556 48 | 0.8541793777 0.8361858191 0.9978678038 0.9636752137 1.0000000000 0.9790419162 0.9945928753 0.9528662420 79.041916167 0.0163570924 3300 0.3058830595 49 | 0.8602806589 0.8435207824 0.9989339019 0.9572649573 0.9985029940 0.9760479042 0.9974554707 0.9605095541 86.227544910 0.0103801637 3600 0.3064088925 50 | 0.8212324588 0.8092909535 0.9962686567 0.9401709402 0.9970059880 0.9670658683 0.9939567430 0.9426751592 93.413173652 0.0147323909 3900 0.3064320850 51 | 0.8370957901 0.8410757946 0.9973347548 0.9551282051 0.9992514970 0.9670658683 0.9965012723 0.9515923567 100.59880239 0.0118218101 4200 0.3062431264 52 | 0.8206223307 0.8557457213 0.9989339019 0.9551282051 0.9985029940 0.9670658683 0.9971374046 0.9490445860 107.78443113 0.0083560429 4500 0.3066036995 53 | 0.8517388652 0.8557457213 0.9978678038 0.9508547009 1.0000000000 0.9730538922 0.9980916031 0.9554140127 114.97005988 0.0091357930 4800 0.3063246751 54 | 0.8090298963 0.8361858191 0.9989339019 0.9679487179 0.9977544910 0.9610778443 0.9968193384 0.9566878981 119.76047904 0.0102657288 5000 0.3059591365 55 | -------------------------------------------------------------------------------- /methods/TAST/test_log/out_T3A.txt: -------------------------------------------------------------------------------- 1 | Environment: 2 | Python: 3.7.4 3 | PyTorch: 1.8.1+cu102 4 | Torchvision: 0.9.1+cu102 5 | CUDA: 10.2 6 | CUDNN: 7605 7 | NumPy: 1.20.3 8 | PIL: 8.2.0 9 | Args: 10 | adapt_algorithm: T3A 11 | algorithm: ERM 12 | checkpoint_freq: None 13 | data_dir: ./dataset 14 | dataset: PACS 15 | holdout_fraction: 0.2 16 | hparams: {"backbone": "resnet50"} 17 | hparams_seed: 0 18 | input_dir: test_log 19 | output_dir: test_log 20 | save_model_every_checkpoint: False 21 | seed: 0 22 | skip_model_save: False 23 | steps: None 24 | task: domain_generalization 25 | test_envs: [0] 26 | trial_seed: 0 27 | uda_holdout_fraction: 0 28 | HParams: 29 | backbone: resnet50 30 | batch_size: 32 31 | class_balanced: False 32 | data_augmentation: True 33 | lr: 5e-05 34 | nonlinear_classifier: False 35 | resnet18: False 36 | resnet_dropout: 0.0 37 | weight_decay: 0.0 38 | Base model's results 39 | env0_in_acc env0_in_ent env0_out_acc env0_out_ent env1_in_acc env1_in_ent env1_out_acc env1_out_ent env2_in_acc env2_in_ent env2_out_acc env2_out_ent env3_in_acc env3_in_ent env3_out_acc env3_out_ent 40 | 0.8541793777 0.1449299917 0.8361858191 0.1328322474 0.9989339019 0.0056033220 0.9444444444 0.0438111851 1.0000000000 0.0023905056 0.9760479042 0.0200748501 0.9939567430 0.0173334001 0.9579617834 0.0455281054 41 | 42 | After T3A 43 | env0_in_acc env0_in_ent env0_out_acc env0_out_ent env1_in_acc env1_in_ent env1_out_acc env1_out_ent env2_in_acc env2_in_ent env2_out_acc env2_out_ent env3_in_acc env3_in_ent env3_out_acc env3_out_ent filter_K 44 | 0.8615196078 0.0300979523 0.8463541667 0.0195607367 0.9983836207 0.0008239143 0.9665178571 0.0048839899 0.9992378049 0.0000014360 0.9687500000 0.0079756258 0.9926658163 0.0040930418 0.9531250000 0.0098382344 1 45 | 0.8639705882 0.0303275467 0.8515625000 0.0209141834 0.9978448276 0.0007431315 0.9620535714 0.0076167837 0.9992378049 0.0000021623 0.9687500000 0.0054733354 0.9942602041 0.0031892638 0.9518229167 0.0104223108 5 46 | 0.8676470588 0.0291756524 0.8515625000 0.0168399035 0.9978448276 0.0011644578 0.9620535714 0.0065066690 0.9992378049 0.0000008054 0.9687500000 0.0074157378 0.9933035714 0.0033154711 0.9505208333 0.0109221917 20 47 | 0.8762254902 0.0294459265 0.8437500000 0.0226431394 0.9983836207 0.0012714628 0.9575892857 0.0074657881 0.9992378049 0.0000007235 0.9718750000 0.0074016270 0.9942602041 0.0030418858 0.9505208333 0.0100194169 50 48 | 0.8768382353 0.0323306602 0.8437500000 0.0212254503 0.9989224138 0.0011713412 0.9575892857 0.0089458891 0.9992378049 0.0000009033 0.9718750000 0.0074016274 0.9955357143 0.0026035005 0.9505208333 0.0110478771 100 49 | 0.8713235294 0.0330403700 0.8489583333 0.0175633694 0.9989224138 0.0014108214 0.9642857143 0.0100299865 0.9992378049 0.0000015491 0.9718750000 0.0060503826 0.9875637755 0.0039479561 0.9466145833 0.0148385064 -1 50 | -------------------------------------------------------------------------------- /methods/TSD/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 SakurajimaMaiii 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /methods/TSD/README.md: -------------------------------------------------------------------------------- 1 | # Feature Alignment and Uniformity for Test Time Adaptation 2 | __This repo is officical PyTorch implement of Feature Alignment and Uniformity for Test Time Adaptation (CVPR 2023) by [Shuai Wang](https://scholar.google.com/citations?user=UbGMEyQAAAAJ&hl=en), [Daoan Zhang](https://dwan.ch/), [Zipei Yan](https://yanzipei.github.io/), [Jianguo Zhang](https://scholar.google.com/citations?user=ypSmZtIAAAAJ&hl=en), [Rui Li](https://scholar.google.com/citations?user=zTByNnsAAAAJ&hl=en&oi=ao).__ 3 | This paper could be found at [arXiv](https://arxiv.org/abs/2303.10902), [open access](https://openaccess.thecvf.com/content/CVPR2023/html/Wang_Feature_Alignment_and_Uniformity_for_Test_Time_Adaptation_CVPR_2023_paper.html) and [IEEEXplore](https://ieeexplore.ieee.org/document/10203978). 4 | This codebase is mainly based on [T3A](https://github.com/matsuolab/T3A) and [DeepDG](https://github.com/jindongwang/transferlearning/tree/master/code/DeepDG). 5 | ## 💻 Dependence 6 | We use `python==3.8.13`, other packages including: 7 | ``` 8 | torch==1.12.0 9 | torchvision==0.13.0 10 | numpy==1.22.3 11 | tqdm==4.65.0 12 | timm==0.6.12 13 | scikit-learn==1.2.2 14 | pillow==9.0.1 15 | ``` 16 | If you want to use efficientnet, please confirm `torchvision>=0.11.0`. 17 | ## Dataset 18 | Download datasets used in our paper from: 19 | [PACS](https://drive.google.com/uc?id=1JFr8f805nMUelQWWmfnJR3y4_SYoN5Pd) 20 | [OfficeHome](https://drive.google.com/uc?id=1uY0pj7oFsjMxRwaD3Sxy0jgel0fsYXLC) 21 | [VLCS](https://drive.google.com/uc?id=1skwblH1_okBwxWxmRsp9_qi15hyPpxg8) 22 | [DomainNet](http://ai.bu.edu/M3SDA/) 23 | Download them from the above links, and organize them as follows. 24 | ``` 25 | |-your_data_dir 26 | |-PACS 27 | |-art_painting 28 | |-cartoon 29 | |-photo 30 | |-sketch 31 | |-OfficeHome 32 | |-Art 33 | |-Clipart 34 | |-Product 35 | |-RealWorld 36 | |-VLCS 37 | |-Caltech101 38 | |-LabelMe 39 | |-SUN09 40 | |-VOC2007 41 | |-DomainNet 42 | |-clipart 43 | |-infograph 44 | |-painting 45 | |-quickdraw 46 | |-real 47 | |-sketch 48 | ``` 49 | ## Train source model 50 | Please use `train.py` to train the source model. For example: 51 | ``` 52 | cd code 53 | python train.py --dataset PACS --data_dir your_data_dir --opt_type Adam --lr 5e-5 --max_epoch 50 54 | ``` 55 | Change `--dataset PACS` for other datasets, such as `office-home`, `VLCS`, `DomainNet`. 56 | Set `--net` to use different backbones, such as `resnext50`, `ViT-B16`. 57 | ## Test time adaptation 58 | ``` 59 | python unsupervise_adapt.py --dataset PACS\ 60 | --data_dir your_data_dir\ 61 | --adapt_alg TSD\ 62 | --pretrain_dir your_pretrain_model_dir\ 63 | --lr 1e-4 64 | ``` 65 | Change `--adapt_alg TSD` to use different methods of test time adaptation, e.g. `T3A`, `SHOT-IM`, `Tent`. 66 | `--pretrain_dir` denotes the path of source model, e.g. `./train_outputs/model.pkl`. 67 | Empirically, set `--lr` to 1e-4 or 1e-5 achieves good performance. 68 | You can also search it using _training domain validation set_. 69 | ## 📝 Citation 70 | If this repo is useful for your research, please consider citing our paper: 71 | ``` 72 | @inproceedings{wang2023feature, 73 | title={Feature alignment and uniformity for test time adaptation}, 74 | author={Wang, Shuai and Zhang, Daoan and Yan, Zipei and Zhang, Jianguo and Li, Rui}, 75 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 76 | pages={20050--20060}, 77 | year={2023} 78 | } 79 | ``` 80 | ## ✉️ Contact 81 | Please contact bit.ybws@gmail.com 82 | 83 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/alg.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from alg.algs.ERM import ERM 3 | # from alg.algs.MMD import MMD 4 | # from alg.algs.CORAL import CORAL 5 | # from alg.algs.DANN import DANN 6 | # from alg.algs.RSC import RSC 7 | # from alg.algs.Mixup import Mixup 8 | # from alg.algs.MLDG import MLDG 9 | # from alg.algs.GroupDRO import GroupDRO 10 | # from alg.algs.ANDMask import ANDMask 11 | # from alg.algs.VREx import VREx 12 | from alg.algs.GDRNet import GDRNet 13 | 14 | ALGORITHMS = [ 15 | 'ERM', 16 | 'GDRNet' 17 | ] 18 | 19 | # ALGORITHMS = [ 20 | # 'ERM', 21 | # 'Mixup', 22 | # 'CORAL', 23 | # 'MMD', 24 | # 'DANN', 25 | # 'MLDG', 26 | # 'GroupDRO', 27 | # 'RSC', 28 | # 'ANDMask', 29 | # 'VREx', 30 | # 'GDRNet' 31 | # ] 32 | 33 | 34 | def get_algorithm_class(algorithm_name): 35 | if algorithm_name not in globals(): 36 | raise NotImplementedError( 37 | "Algorithm not found: {}".format(algorithm_name)) 38 | return globals()[algorithm_name] 39 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/ANDMask.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn.functional as F 4 | from alg.algs.ERM import ERM 5 | import torch.autograd as autograd 6 | 7 | 8 | class ANDMask(ERM): 9 | def __init__(self, args): 10 | super(ANDMask, self).__init__(args) 11 | 12 | self.tau = args.tau 13 | 14 | def update(self, minibatches, opt, sch): 15 | 16 | total_loss = 0 17 | param_gradients = [[] for _ in self.network.parameters()] 18 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 19 | all_logits = self.network(all_x) 20 | all_logits_idx = 0 21 | for i, data in enumerate(minibatches): 22 | x, y = data[0].cuda().float(), data[1].cuda().long() 23 | logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]] 24 | all_logits_idx += x.shape[0] 25 | 26 | env_loss = F.cross_entropy(logits, y) 27 | total_loss += env_loss 28 | 29 | env_grads = autograd.grad( 30 | env_loss, self.network.parameters(), retain_graph=True) 31 | for grads, env_grad in zip(param_gradients, env_grads): 32 | grads.append(env_grad) 33 | 34 | mean_loss = total_loss / len(minibatches) 35 | 36 | opt.zero_grad() 37 | self.mask_grads(self.tau, param_gradients, self.network.parameters()) 38 | opt.step() 39 | if sch: 40 | sch.step() 41 | 42 | return {'total': mean_loss.item()} 43 | 44 | def mask_grads(self, tau, gradients, params): 45 | 46 | for param, grads in zip(params, gradients): 47 | grads = torch.stack(grads, dim=0) 48 | grad_signs = torch.sign(grads) 49 | mask = torch.mean(grad_signs, dim=0).abs() >= self.tau 50 | mask = mask.to(torch.float32) 51 | avg_grad = torch.mean(grads, dim=0) 52 | 53 | mask_t = (mask.sum() / mask.numel()) 54 | param.grad = mask * avg_grad 55 | param.grad *= (1. / (1e-10 + mask_t)) 56 | 57 | return 0 58 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/CORAL.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn.functional as F 4 | from alg.algs.ERM import ERM 5 | 6 | 7 | class CORAL(ERM): 8 | def __init__(self, args): 9 | super(CORAL, self).__init__(args) 10 | self.args = args 11 | self.kernel_type = "mean_cov" 12 | 13 | def coral(self, x, y): 14 | mean_x = x.mean(0, keepdim=True) 15 | mean_y = y.mean(0, keepdim=True) 16 | cent_x = x - mean_x 17 | cent_y = y - mean_y 18 | cova_x = (cent_x.t() @ cent_x) / (len(x) - 1) 19 | cova_y = (cent_y.t() @ cent_y) / (len(y) - 1) 20 | 21 | mean_diff = (mean_x - mean_y).pow(2).mean() 22 | cova_diff = (cova_x - cova_y).pow(2).mean() 23 | 24 | return mean_diff + cova_diff 25 | 26 | def update(self, minibatches, opt, sch): 27 | objective = 0 28 | penalty = 0 29 | nmb = len(minibatches) 30 | 31 | features = [self.featurizer( 32 | data[0].cuda().float()) for data in minibatches] 33 | classifs = [self.classifier(fi) for fi in features] 34 | targets = [data[1].cuda().long() for data in minibatches] 35 | 36 | for i in range(nmb): 37 | objective += F.cross_entropy(classifs[i], targets[i]) 38 | for j in range(i + 1, nmb): 39 | penalty += self.coral(features[i], features[j]) 40 | 41 | objective /= nmb 42 | if nmb > 1: 43 | penalty /= (nmb * (nmb - 1) / 2) 44 | 45 | opt.zero_grad() 46 | (objective + (self.args.mmd_gamma*penalty)).backward() 47 | opt.step() 48 | if sch: 49 | sch.step() 50 | if torch.is_tensor(penalty): 51 | penalty = penalty.item() 52 | 53 | return {'class': objective.item(), 'coral': penalty, 'total': (objective.item() + (self.args.mmd_gamma*penalty))} 54 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/DANN.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from alg.modelopera import get_fea 7 | from network import Adver_network, common_network 8 | from alg.algs.base import Algorithm 9 | 10 | 11 | class DANN(Algorithm): 12 | 13 | def __init__(self, args): 14 | 15 | super(DANN, self).__init__(args) 16 | 17 | self.featurizer = get_fea(args) 18 | self.classifier = common_network.feat_classifier( 19 | args.num_classes, self.featurizer.in_features, args.classifier) 20 | self.discriminator = Adver_network.Discriminator( 21 | self.featurizer.in_features, args.dis_hidden, args.domain_num - len(args.test_envs)) 22 | self.args = args 23 | 24 | def update(self, minibatches, opt, sch): 25 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 26 | all_y = torch.cat([data[1].cuda().long() for data in minibatches]) 27 | all_z = self.featurizer(all_x) 28 | 29 | disc_input = all_z 30 | disc_input = Adver_network.ReverseLayerF.apply( 31 | disc_input, self.args.alpha) 32 | disc_out = self.discriminator(disc_input) 33 | disc_labels = torch.cat([ 34 | torch.full((data[0].shape[0], ), i, 35 | dtype=torch.int64, device='cuda') 36 | for i, data in enumerate(minibatches) 37 | ]) 38 | 39 | disc_loss = F.cross_entropy(disc_out, disc_labels) 40 | all_preds = self.classifier(all_z) 41 | classifier_loss = F.cross_entropy(all_preds, all_y) 42 | loss = classifier_loss+disc_loss 43 | opt.zero_grad() 44 | loss.backward() 45 | opt.step() 46 | if sch: 47 | sch.step() 48 | return {'total': loss.item(), 'class': classifier_loss.item(), 'dis': disc_loss.item()} 49 | 50 | def predict(self, x): 51 | return self.classifier(self.featurizer(x)) 52 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/ERM.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from alg.algs.base import Algorithm 7 | from modeling import model_manager as models 8 | 9 | import os 10 | class ERM(Algorithm): 11 | """ 12 | Empirical Risk Minimization (ERM) 13 | """ 14 | 15 | def __init__(self, args): 16 | super(ERM, self).__init__(args) 17 | # self.featurizer = get_fea(args) 18 | # self.classifier = common_network.feat_classifier( 19 | # args.num_classes, self.featurizer.in_features, args.classifier) 20 | self.featurizer = models.get_backbone_resnet50() 21 | self.classifier = models.get_classifier(self.featurizer.out_features()) 22 | 23 | self.network = nn.Sequential( 24 | self.featurizer, self.classifier) 25 | 26 | def update(self, minibatches, opt, sch): 27 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 28 | all_y = torch.cat([data[1].cuda().long() for data in minibatches]) 29 | loss = F.cross_entropy(self.predict(all_x), all_y) 30 | 31 | opt.zero_grad() 32 | loss.backward() 33 | opt.step() 34 | if sch: 35 | sch.step() 36 | return {'class': loss.item()} 37 | 38 | def predict(self, x): 39 | return self.network(x) 40 | 41 | def renew_model(self, log_path): 42 | net_path = os.path.join(log_path, 'best_model.pth') 43 | classifier_path = os.path.join(log_path, 'best_classifier.pth') 44 | print(net_path) 45 | print(classifier_path) 46 | self.featurizer.load_state_dict(torch.load(net_path)) 47 | self.classifier.load_state_dict(torch.load(classifier_path)) 48 | 49 | # class ERM(Algorithm): 50 | # """ 51 | # Empirical Risk Minimization (ERM) 52 | # """ 53 | 54 | # def __init__(self, args): 55 | # super(ERM, self).__init__(args) 56 | # self.featurizer = get_fea(args) 57 | # self.classifier = common_network.feat_classifier( 58 | # args.num_classes, self.featurizer.in_features, args.classifier) 59 | 60 | # self.network = nn.Sequential( 61 | # self.featurizer, self.classifier) 62 | 63 | # def update(self, minibatches, opt, sch): 64 | # all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 65 | # all_y = torch.cat([data[1].cuda().long() for data in minibatches]) 66 | # loss = F.cross_entropy(self.predict(all_x), all_y) 67 | 68 | # opt.zero_grad() 69 | # loss.backward() 70 | # opt.step() 71 | # if sch: 72 | # sch.step() 73 | # return {'class': loss.item()} 74 | 75 | # def predict(self, x): 76 | # return self.network(x) 77 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/GDRNet.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from alg.algs.base import Algorithm 7 | from modeling import model_manager as models 8 | 9 | 10 | class ERM(Algorithm): 11 | """ 12 | Empirical Risk Minimization (ERM) 13 | """ 14 | 15 | def __init__(self, args): 16 | super(ERM, self).__init__(args) 17 | # self.featurizer = get_fea(args) 18 | # self.classifier = common_network.feat_classifier( 19 | # args.num_classes, self.featurizer.in_features, args.classifier) 20 | self.featurizer = models.get_backbone_resnet50() 21 | self.classifier = models.get_classifier(self.featurizer.out_features()) 22 | 23 | self.network = nn.Sequential( 24 | self.featurizer, self.classifier) 25 | 26 | def update(self, minibatches, opt, sch): 27 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 28 | all_y = torch.cat([data[1].cuda().long() for data in minibatches]) 29 | loss = F.cross_entropy(self.predict(all_x), all_y) 30 | 31 | opt.zero_grad() 32 | loss.backward() 33 | opt.step() 34 | if sch: 35 | sch.step() 36 | return {'class': loss.item()} 37 | 38 | def predict(self, x): 39 | return self.network(x) 40 | 41 | class GDRNet(ERM): 42 | def __init__(self, args): 43 | super(GDRNet, self).__init__(args) -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/GroupDRO.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import torch 3 | import torch.nn.functional as F 4 | from alg.algs.ERM import ERM 5 | 6 | class GroupDRO(ERM): 7 | """ 8 | Robust ERM minimizes the error at the worst minibatch 9 | Algorithm 1 from [https://arxiv.org/pdf/1911.08731.pdf] 10 | """ 11 | def __init__(self,args): 12 | super(GroupDRO, self).__init__(args) 13 | self.register_buffer("q", torch.Tensor()) 14 | self.args=args 15 | 16 | def update(self, minibatches, opt,sch): 17 | 18 | if not len(self.q): 19 | self.q = torch.ones(len(minibatches)).cuda() 20 | 21 | losses = torch.zeros(len(minibatches)).cuda() 22 | 23 | for m in range(len(minibatches)): 24 | x, y = minibatches[m][0].cuda().float(),minibatches[m][1].cuda().long() 25 | losses[m] = F.cross_entropy(self.predict(x), y) 26 | self.q[m] *= (self.args.groupdro_eta * losses[m].data).exp() 27 | 28 | self.q /= self.q.sum() 29 | 30 | loss = torch.dot(losses, self.q) 31 | 32 | opt.zero_grad() 33 | loss.backward() 34 | opt.step() 35 | if sch: 36 | sch.step() 37 | 38 | return {'group': loss.item()} -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/MLDG.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import copy 4 | import torch.nn.functional as F 5 | 6 | from alg.opt import * 7 | import torch.autograd as autograd 8 | from datautil.util import random_pairs_of_minibatches_by_domainperm 9 | from alg.algs.ERM import ERM 10 | 11 | 12 | class MLDG(ERM): 13 | def __init__(self, args): 14 | super(MLDG, self).__init__(args) 15 | self.args = args 16 | 17 | def update(self, minibatches, opt, sch): 18 | """ 19 | For computational efficiency, we do not compute second derivatives. 20 | """ 21 | num_mb = len(minibatches) 22 | objective = 0 23 | 24 | opt.zero_grad() 25 | for p in self.network.parameters(): 26 | if p.grad is None: 27 | p.grad = torch.zeros_like(p) 28 | 29 | for (xi, yi), (xj, yj) in random_pairs_of_minibatches_by_domainperm(minibatches): 30 | 31 | xi, yi, xj, yj = xi.cuda().float(), yi.cuda( 32 | ).long(), xj.cuda().float(), yj.cuda().long() 33 | inner_net = copy.deepcopy(self.network) 34 | 35 | inner_opt = get_optimizer(inner_net, self.args, True) 36 | inner_sch = get_scheduler(inner_opt, self.args) 37 | 38 | inner_obj = F.cross_entropy(inner_net(xi), yi) 39 | 40 | inner_opt.zero_grad() 41 | inner_obj.backward() 42 | inner_opt.step() 43 | if inner_sch: 44 | inner_sch.step() 45 | 46 | for p_tgt, p_src in zip(self.network.parameters(), 47 | inner_net.parameters()): 48 | if p_src.grad is not None: 49 | p_tgt.grad.data.add_(p_src.grad.data / num_mb) 50 | 51 | objective += inner_obj.item() 52 | 53 | loss_inner_j = F.cross_entropy(inner_net(xj), yj) 54 | grad_inner_j = autograd.grad(loss_inner_j, inner_net.parameters(), 55 | allow_unused=True) 56 | 57 | objective += (self.args.mldg_beta * loss_inner_j).item() 58 | 59 | for p, g_j in zip(self.network.parameters(), grad_inner_j): 60 | if g_j is not None: 61 | p.grad.data.add_( 62 | self.args.mldg_beta * g_j.data / num_mb) 63 | 64 | objective /= len(minibatches) 65 | 66 | opt.step() 67 | if sch: 68 | sch.step() 69 | return {'total': objective} 70 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/MMD.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn.functional as F 4 | 5 | from alg.algs.ERM import ERM 6 | 7 | 8 | class MMD(ERM): 9 | def __init__(self, args): 10 | super(MMD, self).__init__(args) 11 | self.args = args 12 | self.kernel_type = "gaussian" 13 | 14 | def my_cdist(self, x1, x2): 15 | x1_norm = x1.pow(2).sum(dim=-1, keepdim=True) 16 | x2_norm = x2.pow(2).sum(dim=-1, keepdim=True) 17 | res = torch.addmm(x2_norm.transpose(-2, -1), 18 | x1, 19 | x2.transpose(-2, -1), alpha=-2).add_(x1_norm) 20 | return res.clamp_min_(1e-30) 21 | 22 | def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100, 23 | 1000]): 24 | D = self.my_cdist(x, y) 25 | K = torch.zeros_like(D) 26 | 27 | for g in gamma: 28 | K.add_(torch.exp(D.mul(-g))) 29 | 30 | return K 31 | 32 | def mmd(self, x, y): 33 | Kxx = self.gaussian_kernel(x, x).mean() 34 | Kyy = self.gaussian_kernel(y, y).mean() 35 | Kxy = self.gaussian_kernel(x, y).mean() 36 | return Kxx + Kyy - 2 * Kxy 37 | 38 | def update(self, minibatches, opt, sch): 39 | objective = 0 40 | penalty = 0 41 | nmb = len(minibatches) 42 | 43 | features = [self.featurizer( 44 | data[0].cuda().float()) for data in minibatches] 45 | classifs = [self.classifier(fi) for fi in features] 46 | targets = [data[1].cuda().long() for data in minibatches] 47 | 48 | for i in range(nmb): 49 | objective += F.cross_entropy(classifs[i], targets[i]) 50 | for j in range(i + 1, nmb): 51 | penalty += self.mmd(features[i], features[j]) 52 | 53 | objective /= nmb 54 | if nmb > 1: 55 | penalty /= (nmb * (nmb - 1) / 2) 56 | 57 | opt.zero_grad() 58 | (objective + (self.args.mmd_gamma*penalty)).backward() 59 | opt.step() 60 | if sch: 61 | sch.step() 62 | if torch.is_tensor(penalty): 63 | penalty = penalty.item() 64 | 65 | return {'class': objective.item(), 'mmd': penalty, 'total': (objective.item() + (self.args.mmd_gamma*penalty))} 66 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/Mixup.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import torch.nn.functional as F 4 | 5 | from datautil.util import random_pairs_of_minibatches 6 | from alg.algs.ERM import ERM 7 | 8 | 9 | class Mixup(ERM): 10 | def __init__(self, args): 11 | super(Mixup, self).__init__(args) 12 | self.args = args 13 | 14 | def update(self, minibatches, opt, sch): 15 | objective = 0 16 | 17 | for (xi, yi, di), (xj, yj, dj) in random_pairs_of_minibatches(self.args, minibatches): 18 | lam = np.random.beta(self.args.mixupalpha, self.args.mixupalpha) 19 | 20 | x = (lam * xi + (1 - lam) * xj).cuda().float() 21 | 22 | predictions = self.predict(x) 23 | 24 | objective += lam * F.cross_entropy(predictions, yi.cuda().long()) 25 | objective += (1 - lam) * \ 26 | F.cross_entropy(predictions, yj.cuda().long()) 27 | 28 | objective /= len(minibatches) 29 | 30 | opt.zero_grad() 31 | objective.backward() 32 | opt.step() 33 | if sch: 34 | sch.step() 35 | return {'class': objective.item()} 36 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/RSC.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import torch 4 | import torch.nn.functional as F 5 | import torch.autograd as autograd 6 | 7 | from alg.algs.ERM import ERM 8 | 9 | 10 | class RSC(ERM): 11 | def __init__(self, args): 12 | super(RSC, self).__init__(args) 13 | self.drop_f = (1 - args.rsc_f_drop_factor) * 100 14 | self.drop_b = (1 - args.rsc_b_drop_factor) * 100 15 | self.num_classes = args.num_classes 16 | 17 | def update(self, minibatches, opt, sch): 18 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 19 | all_y = torch.cat([data[1].cuda().long() for data in minibatches]) 20 | all_o = torch.nn.functional.one_hot(all_y, self.num_classes) 21 | all_f = self.featurizer(all_x) 22 | all_p = self.classifier(all_f) 23 | 24 | # Equation (1): compute gradients with respect to representation 25 | all_g = autograd.grad((all_p * all_o).sum(), all_f)[0] 26 | 27 | # Equation (2): compute top-gradient-percentile mask 28 | percentiles = np.percentile(all_g.cpu(), self.drop_f, axis=1) 29 | percentiles = torch.Tensor(percentiles) 30 | percentiles = percentiles.unsqueeze(1).repeat(1, all_g.size(1)) 31 | mask_f = all_g.lt(percentiles.cuda()).float() 32 | 33 | # Equation (3): mute top-gradient-percentile activations 34 | all_f_muted = all_f * mask_f 35 | 36 | # Equation (4): compute muted predictions 37 | all_p_muted = self.classifier(all_f_muted) 38 | 39 | # Section 3.3: Batch Percentage 40 | all_s = F.softmax(all_p, dim=1) 41 | all_s_muted = F.softmax(all_p_muted, dim=1) 42 | changes = (all_s * all_o).sum(1) - (all_s_muted * all_o).sum(1) 43 | percentile = np.percentile(changes.detach().cpu(), self.drop_b) 44 | mask_b = changes.lt(percentile).float().view(-1, 1) 45 | mask = torch.logical_or(mask_f, mask_b).float() 46 | 47 | # Equations (3) and (4) again, this time mutting over examples 48 | all_p_muted_again = self.classifier(all_f * mask) 49 | 50 | # Equation (5): update 51 | loss = F.cross_entropy(all_p_muted_again, all_y) 52 | opt.zero_grad() 53 | loss.backward() 54 | opt.step() 55 | if sch: 56 | sch.step() 57 | 58 | return {'class': loss.item()} 59 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/VREx.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | import torch.nn.functional as F 4 | from alg.algs.ERM import ERM 5 | 6 | 7 | class VREx(ERM): 8 | """V-REx algorithm from http://arxiv.org/abs/2003.00688""" 9 | 10 | def __init__(self, args): 11 | super(VREx, self).__init__(args) 12 | self.register_buffer('update_count', torch.tensor([0])) 13 | self.args = args 14 | 15 | def update(self, minibatches, opt, sch): 16 | if self.update_count >= self.args.anneal_iters: 17 | penalty_weight = self.args.lam 18 | else: 19 | penalty_weight = 1.0 20 | 21 | nll = 0. 22 | 23 | all_x = torch.cat([data[0].cuda().float() for data in minibatches]) 24 | all_logits = self.network(all_x) 25 | all_logits_idx = 0 26 | losses = torch.zeros(len(minibatches)) 27 | 28 | for i, data in enumerate(minibatches): 29 | logits = all_logits[all_logits_idx:all_logits_idx + 30 | data[0].shape[0]] 31 | all_logits_idx += data[0].shape[0] 32 | nll = F.cross_entropy(logits, data[1].cuda().long()) 33 | losses[i] = nll 34 | 35 | mean = losses.mean() 36 | penalty = ((losses - mean) ** 2).mean() 37 | loss = mean + penalty_weight * penalty 38 | 39 | opt.zero_grad() 40 | loss.backward() 41 | opt.step() 42 | if sch: 43 | sch.step() 44 | 45 | self.update_count += 1 46 | return {'loss': loss.item(), 'nll': nll.item(), 47 | 'penalty': penalty.item()} 48 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/algs/base.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | 4 | 5 | class Algorithm(torch.nn.Module): 6 | 7 | def __init__(self, args): 8 | super(Algorithm, self).__init__() 9 | 10 | def update(self, minibatches, opt, sch): 11 | raise NotImplementedError 12 | 13 | def predict(self, x): 14 | raise NotImplementedError 15 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/modelopera.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | from network import img_network 4 | 5 | def get_fea(args): 6 | if args.dataset == 'dg5': 7 | net = img_network.DTNBase() 8 | elif args.net.startswith('res'): 9 | net = img_network.ResBase(args.net) 10 | elif args.net.startswith('vgg'): 11 | net = img_network.VGGBase(args.net) 12 | elif args.net.startswith('ViT'): 13 | net = img_network.ViTBase(args.net) 14 | elif args.net.startswith('Eff'): 15 | net = img_network.EfficientBase(args.net) 16 | elif args.net.startswith('Mix'): 17 | net = img_network.MLPMixer(args.net) 18 | return net 19 | 20 | 21 | def accuracy(network, loader): 22 | correct = 0 23 | total = 0 24 | 25 | network.eval() 26 | with torch.no_grad(): 27 | for data in loader: 28 | x = data[0].cuda().float() 29 | y = data[1].cuda().long() 30 | p = network.predict(x) 31 | 32 | if p.size(1) == 1: 33 | correct += (p.gt(0).eq(y).float()).sum().item() 34 | else: 35 | correct += (p.argmax(1).eq(y).float()).sum().item() 36 | total += len(x) 37 | network.train() 38 | return correct / total 39 | -------------------------------------------------------------------------------- /methods/TSD/code/alg/opt.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch 3 | 4 | 5 | def get_params(alg, args, inner=False, alias=True): 6 | if args.schuse: 7 | if args.schusech == 'cos': 8 | initlr = args.lr 9 | else: 10 | initlr = 1.0 11 | else: 12 | if inner: 13 | initlr = args.inner_lr 14 | else: 15 | initlr = args.lr 16 | if inner: 17 | params = [ 18 | {'params': alg[0].parameters(), 'lr': args.lr_decay1 * 19 | initlr}, 20 | {'params': alg[1].parameters(), 'lr': args.lr_decay2 * 21 | initlr} 22 | ] 23 | elif alias: 24 | params = [ 25 | {'params': alg.featurizer.parameters(), 'lr': args.lr_decay1 * initlr}, 26 | {'params': alg.classifier.parameters(), 'lr': args.lr_decay2 * initlr} 27 | ] 28 | else: 29 | params = [ 30 | {'params': alg[0].parameters(), 'lr': args.lr_decay1 * initlr}, 31 | {'params': alg[1].parameters(), 'lr': args.lr_decay2 * initlr} 32 | ] 33 | if ('DANN' in args.algorithm) or ('CDANN' in args.algorithm): 34 | params.append({'params': alg.discriminator.parameters(), 35 | 'lr': args.lr_decay2 * initlr}) 36 | if ('CDANN' in args.algorithm): 37 | params.append({'params': alg.class_embeddings.parameters(), 38 | 'lr': args.lr_decay2 * initlr}) 39 | return params 40 | 41 | 42 | def get_optimizer(alg, args,inner=False, alias=True): 43 | params = get_params(alg, args, inner, alias) 44 | if args.opt_type=='SGD': 45 | optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) 46 | elif args.opt_type=='Adam': 47 | optimizer = torch.optim.Adam(params,lr=args.lr) 48 | return optimizer 49 | 50 | 51 | def get_scheduler(optimizer, args): 52 | if not args.schuse: 53 | return None 54 | if args.schusech == 'cos': 55 | scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( 56 | optimizer, args.max_epoch * args.steps_per_epoch) 57 | else: 58 | scheduler = torch.optim.lr_scheduler.LambdaLR( 59 | optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay)) 60 | return scheduler 61 | -------------------------------------------------------------------------------- /methods/TSD/code/datautil/getdataloader.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import sklearn.model_selection as ms 4 | from torch.utils.data import DataLoader 5 | 6 | import datautil.imgdata.util as imgutil 7 | from datautil.imgdata.imgdataload import ImageDataset 8 | from datautil.mydataloader import InfiniteDataLoader 9 | 10 | 11 | def get_img_dataloader(args): 12 | rate = 0.2 13 | trdatalist, tedatalist = [], [] 14 | 15 | names = args.img_dataset[args.dataset] 16 | args.domain_num = len(names) 17 | for i in range(len(names)): 18 | if i in args.test_envs: 19 | tedatalist.append(ImageDataset(args.dataset, args.task, args.data_dir, 20 | names[i], i, transform=imgutil.image_test(args.dataset), test_envs=args.test_envs)) 21 | else: 22 | tmpdatay = ImageDataset(args.dataset, args.task, args.data_dir, 23 | names[i], i, transform=imgutil.image_train(args.dataset), test_envs=args.test_envs).labels 24 | l = len(tmpdatay) 25 | if args.split_style == 'strat': 26 | lslist = np.arange(l) 27 | stsplit = ms.StratifiedShuffleSplit( 28 | 2, test_size=rate, train_size=1-rate, random_state=args.seed) 29 | stsplit.get_n_splits(lslist, tmpdatay) 30 | indextr, indexte = next(stsplit.split(lslist, tmpdatay)) 31 | else: 32 | indexall = np.arange(l) 33 | np.random.seed(args.seed) 34 | np.random.shuffle(indexall) 35 | ted = int(l*rate) 36 | indextr, indexte = indexall[:-ted], indexall[-ted:] 37 | 38 | trdatalist.append(ImageDataset(args.dataset, args.task, args.data_dir, 39 | names[i], i, transform=imgutil.image_train(args.dataset), indices=indextr, test_envs=args.test_envs)) 40 | tedatalist.append(ImageDataset(args.dataset, args.task, args.data_dir, 41 | names[i], i, transform=imgutil.image_test(args.dataset), indices=indexte, test_envs=args.test_envs)) 42 | 43 | train_loaders = [InfiniteDataLoader( 44 | dataset=env, 45 | weights=None, 46 | batch_size=args.batch_size, 47 | num_workers=args.N_WORKERS) 48 | for env in trdatalist] 49 | 50 | eval_loaders = [DataLoader( 51 | dataset=env, 52 | batch_size=64, 53 | num_workers=args.N_WORKERS, 54 | drop_last=False, 55 | shuffle=False) 56 | for env in trdatalist+tedatalist] 57 | 58 | return train_loaders, eval_loaders 59 | -------------------------------------------------------------------------------- /methods/TSD/code/datautil/imgdata/imgdataload.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import os 3 | import numpy as np 4 | from torch.utils.data import Dataset 5 | from torchvision.datasets import ImageFolder 6 | from torchvision.datasets.folder import default_loader 7 | 8 | from datautil.util import Nmax 9 | from datautil.imgdata.util import rgb_loader, l_loader 10 | 11 | class ImageDataset(object): 12 | def __init__(self, dataset, task, root_dir, domain_name, domain_label=-1, labels=None, transform=None, target_transform=None, indices=None, test_envs=[], mode='Default'): 13 | #self.imgs = ImageFolder(root_dir+domain_name).imgs 14 | self.imgs = ImageFolder(os.path.join(root_dir,domain_name)).imgs 15 | self.domain_num = 0 16 | self.task = task 17 | self.dataset = dataset 18 | imgs = [item[0] for item in self.imgs] 19 | labels = [item[1] for item in self.imgs] 20 | self.labels = np.array(labels) 21 | self.x = imgs 22 | self.transform = transform 23 | self.target_transform = target_transform 24 | if indices is None: 25 | self.indices = np.arange(len(imgs)) 26 | else: 27 | self.indices = indices 28 | if mode == 'Default': 29 | self.loader = default_loader 30 | elif mode == 'RGB': 31 | self.loader = rgb_loader 32 | elif mode == 'L': 33 | self.loader = l_loader 34 | self.dlabels = np.ones(self.labels.shape) * \ 35 | (domain_label-Nmax(test_envs, domain_label)) 36 | 37 | def set_labels(self, tlabels=None, label_type='domain_label'): 38 | assert len(tlabels) == len(self.x) 39 | if label_type == 'domain_label': 40 | self.dlabels = tlabels 41 | elif label_type == 'class_label': 42 | self.labels = tlabels 43 | 44 | def target_trans(self, y): 45 | if self.target_transform is not None: 46 | return self.target_transform(y) 47 | else: 48 | return y 49 | 50 | def input_trans(self, x): 51 | if self.transform is not None: 52 | return self.transform(x) 53 | else: 54 | return x 55 | 56 | def __getitem__(self, index): 57 | index = self.indices[index] 58 | img = self.input_trans(self.loader(self.x[index])) 59 | ctarget = self.target_trans(self.labels[index]) 60 | dtarget = self.target_trans(self.dlabels[index]) 61 | return img, ctarget, dtarget 62 | 63 | def __len__(self): 64 | return len(self.indices) 65 | -------------------------------------------------------------------------------- /methods/TSD/code/datautil/imgdata/util.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from torchvision import transforms 3 | from PIL import Image, ImageFile 4 | ImageFile.LOAD_TRUNCATED_IMAGES = True 5 | 6 | 7 | def image_train(dataset, resize_size=256, crop_size=224): 8 | if dataset == 'dg5': 9 | return transforms.Compose([ 10 | transforms.Resize((32, 32)), 11 | transforms.ToTensor(), 12 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) 13 | ]) 14 | 15 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 16 | std=[0.229, 0.224, 0.225]) 17 | 18 | return transforms.Compose([ 19 | transforms.RandomResizedCrop(224, scale=(0.7, 1.0)), 20 | transforms.RandomHorizontalFlip(), 21 | transforms.ColorJitter(0.3, 0.3, 0.3, 0.3), 22 | transforms.RandomGrayscale(), 23 | transforms.ToTensor(), 24 | normalize 25 | ]) 26 | 27 | 28 | def image_test(dataset, resize_size=256, crop_size=224): 29 | if dataset == 'dg5': 30 | return transforms.Compose([ 31 | transforms.Resize((32, 32)), 32 | transforms.ToTensor(), 33 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) 34 | ]) 35 | 36 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 37 | std=[0.229, 0.224, 0.225]) 38 | return transforms.Compose([ 39 | transforms.Resize((224, 224)), 40 | transforms.ToTensor(), 41 | normalize 42 | ]) 43 | 44 | 45 | def rgb_loader(path): 46 | with open(path, 'rb') as f: 47 | with Image.open(f) as img: 48 | return img.convert('RGB') 49 | 50 | 51 | def l_loader(path): 52 | with open(path, 'rb') as f: 53 | with Image.open(f) as img: 54 | return img.convert('L') 55 | -------------------------------------------------------------------------------- /methods/TSD/code/datautil/mydataloader.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import torch 4 | 5 | 6 | class _InfiniteSampler(torch.utils.data.Sampler): 7 | """Wraps another Sampler to yield an infinite stream.""" 8 | 9 | def __init__(self, sampler): 10 | self.sampler = sampler 11 | 12 | def __iter__(self): 13 | while True: 14 | for batch in self.sampler: 15 | yield batch 16 | 17 | 18 | class InfiniteDataLoader: 19 | def __init__(self, dataset, weights, batch_size, num_workers): 20 | super().__init__() 21 | 22 | if weights: 23 | sampler = torch.utils.data.WeightedRandomSampler(weights, 24 | replacement=True, 25 | num_samples=batch_size) 26 | else: 27 | sampler = torch.utils.data.RandomSampler(dataset, 28 | replacement=True) 29 | 30 | if weights == None: 31 | weights = torch.ones(len(dataset)) 32 | 33 | batch_sampler = torch.utils.data.BatchSampler( 34 | sampler, 35 | batch_size=batch_size, 36 | drop_last=True) 37 | 38 | self._infinite_iterator = iter(torch.utils.data.DataLoader( 39 | dataset, 40 | num_workers=num_workers, 41 | batch_sampler=_InfiniteSampler(batch_sampler) 42 | )) 43 | 44 | def __iter__(self): 45 | while True: 46 | yield next(self._infinite_iterator) 47 | 48 | def __len__(self): 49 | raise ValueError 50 | -------------------------------------------------------------------------------- /methods/TSD/code/datautil/util.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def Nmax(test_envs, d): 7 | for i in range(len(test_envs)): 8 | if d < test_envs[i]: 9 | return i 10 | return len(test_envs) 11 | 12 | 13 | def random_pairs_of_minibatches_by_domainperm(minibatches): 14 | perm = torch.randperm(len(minibatches)).tolist() 15 | pairs = [] 16 | 17 | for i in range(len(minibatches)): 18 | j = i + 1 if i < (len(minibatches) - 1) else 0 19 | 20 | xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1] 21 | xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1] 22 | 23 | min_n = min(len(xi), len(xj)) 24 | 25 | pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n]))) 26 | 27 | return pairs 28 | 29 | 30 | def random_pairs_of_minibatches(args, minibatches): 31 | ld = len(minibatches) 32 | pairs = [] 33 | tdlist = np.arange(ld) 34 | txlist = np.arange(args.batch_size) 35 | for i in range(ld): 36 | for j in range(args.batch_size): 37 | (tdi, tdj), (txi, txj) = np.random.choice(tdlist, 2, 38 | replace=False), np.random.choice(txlist, 2, replace=True) 39 | if j == 0: 40 | xi, yi, di = torch.unsqueeze( 41 | minibatches[tdi][0][txi], dim=0), minibatches[tdi][1][txi], minibatches[tdi][2][txi] 42 | xj, yj, dj = torch.unsqueeze( 43 | minibatches[tdj][0][txj], dim=0), minibatches[tdj][1][txj], minibatches[tdj][2][txj] 44 | else: 45 | xi, yi, di = torch.vstack((xi, torch.unsqueeze(minibatches[tdi][0][txi], dim=0))), torch.hstack( 46 | (yi, minibatches[tdi][1][txi])), torch.hstack((di, minibatches[tdi][2][txi])) 47 | xj, yj, dj = torch.vstack((xj, torch.unsqueeze(minibatches[tdj][0][txj], dim=0))), torch.hstack( 48 | (yj, minibatches[tdj][1][txj])), torch.hstack((dj, minibatches[tdj][2][txj])) 49 | pairs.append(((xi, yi, di), (xj, yj, dj))) 50 | return pairs 51 | -------------------------------------------------------------------------------- /methods/TSD/code/modeling/mixstyle.py: -------------------------------------------------------------------------------- 1 | import random 2 | from contextlib import contextmanager 3 | import torch 4 | import torch.nn as nn 5 | 6 | 7 | def deactivate_mixstyle(m): 8 | if type(m) == MixStyle: 9 | m.set_activation_status(False) 10 | 11 | 12 | def activate_mixstyle(m): 13 | if type(m) == MixStyle: 14 | m.set_activation_status(True) 15 | 16 | 17 | def random_mixstyle(m): 18 | if type(m) == MixStyle: 19 | m.update_mix_method('random') 20 | 21 | 22 | def crossdomain_mixstyle(m): 23 | if type(m) == MixStyle: 24 | m.update_mix_method('crossdomain') 25 | 26 | 27 | @contextmanager 28 | def run_without_mixstyle(model): 29 | # Assume MixStyle was initially activated 30 | try: 31 | model.apply(deactivate_mixstyle) 32 | yield 33 | finally: 34 | model.apply(activate_mixstyle) 35 | 36 | 37 | @contextmanager 38 | def run_with_mixstyle(model, mix=None): 39 | # Assume MixStyle was initially deactivated 40 | if mix == 'random': 41 | model.apply(random_mixstyle) 42 | 43 | elif mix == 'crossdomain': 44 | model.apply(crossdomain_mixstyle) 45 | 46 | try: 47 | model.apply(activate_mixstyle) 48 | yield 49 | finally: 50 | model.apply(deactivate_mixstyle) 51 | 52 | 53 | class MixStyle(nn.Module): 54 | """MixStyle. 55 | 56 | Reference: 57 | Zhou et al. Domain Generalization with MixStyle. ICLR 2021. 58 | """ 59 | 60 | def __init__(self, p=0.5, alpha=0.1, eps=1e-6, mix='random'): 61 | """ 62 | Args: 63 | p (float): probability of using MixStyle. 64 | alpha (float): parameter of the Beta distribution. 65 | eps (float): scaling parameter to avoid numerical issues. 66 | mix (str): how to mix. 67 | """ 68 | super().__init__() 69 | self.p = p 70 | self.beta = torch.distributions.Beta(alpha, alpha) 71 | self.eps = eps 72 | self.alpha = alpha 73 | self.mix = mix 74 | self._activated = True 75 | 76 | def __repr__(self): 77 | return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})' 78 | 79 | def set_activation_status(self, status=True): 80 | self._activated = status 81 | 82 | def update_mix_method(self, mix='random'): 83 | self.mix = mix 84 | 85 | def forward(self, x): 86 | if not self.training or not self._activated: 87 | return x 88 | 89 | if random.random() > self.p: 90 | return x 91 | 92 | B = x.size(0) 93 | 94 | mu = x.mean(dim=[2, 3], keepdim=True) 95 | var = x.var(dim=[2, 3], keepdim=True) 96 | sig = (var + self.eps).sqrt() 97 | mu, sig = mu.detach(), sig.detach() 98 | x_normed = (x-mu) / sig 99 | 100 | lmda = self.beta.sample((B, 1, 1, 1)) 101 | lmda = lmda.to(x.device) 102 | 103 | if self.mix == 'random': 104 | # random shuffle 105 | perm = torch.randperm(B) 106 | 107 | elif self.mix == 'crossdomain': 108 | # split into two halves and swap the order 109 | perm = torch.arange(B - 1, -1, -1) # inverse index 110 | perm_b, perm_a = perm.chunk(2) 111 | perm_b = perm_b[torch.randperm(B // 2)] 112 | perm_a = perm_a[torch.randperm(B // 2)] 113 | perm = torch.cat([perm_b, perm_a], 0) 114 | 115 | else: 116 | raise NotImplementedError 117 | 118 | mu2, sig2 = mu[perm], sig[perm] 119 | mu_mix = mu*lmda + mu2 * (1-lmda) 120 | sig_mix = sig*lmda + sig2 * (1-lmda) 121 | 122 | return x_normed*sig_mix + mu_mix 123 | -------------------------------------------------------------------------------- /methods/TSD/code/network/Adver_network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Function 4 | 5 | 6 | class ReverseLayerF(Function): 7 | @staticmethod 8 | def forward(ctx, x, alpha): 9 | ctx.alpha = alpha 10 | return x.view_as(x) 11 | 12 | @staticmethod 13 | def backward(ctx, grad_output): 14 | output = grad_output.neg() * ctx.alpha 15 | return output, None 16 | 17 | 18 | class Discriminator(nn.Module): 19 | def __init__(self, input_dim=256, hidden_dim=256, num_domains=4): 20 | super(Discriminator, self).__init__() 21 | self.input_dim = input_dim 22 | self.hidden_dim = hidden_dim 23 | layers = [ 24 | nn.Linear(input_dim, hidden_dim), 25 | nn.BatchNorm1d(hidden_dim), 26 | nn.ReLU(), 27 | nn.Linear(hidden_dim, hidden_dim), 28 | nn.BatchNorm1d(hidden_dim), 29 | nn.ReLU(), 30 | nn.Linear(hidden_dim, num_domains), 31 | ] 32 | self.layers = torch.nn.Sequential(*layers) 33 | 34 | def forward(self, x): 35 | return self.layers(x) 36 | -------------------------------------------------------------------------------- /methods/TSD/code/network/common_network.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch.nn as nn 3 | from network.util import init_weights 4 | import torch.nn.utils.weight_norm as weightNorm 5 | 6 | 7 | class feat_bottleneck(nn.Module): 8 | def __init__(self, feature_dim, bottleneck_dim=256, type="ori"): 9 | super(feat_bottleneck, self).__init__() 10 | self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True) 11 | self.relu = nn.ReLU(inplace=True) 12 | self.dropout = nn.Dropout(p=0.5) 13 | self.bottleneck = nn.Linear(feature_dim, bottleneck_dim) 14 | # self.bottleneck.apply(init_weights) 15 | self.type = type 16 | 17 | def forward(self, x): 18 | x = self.bottleneck(x) 19 | if self.type == "bn": 20 | x = self.bn(x) 21 | return x 22 | 23 | 24 | class feat_classifier(nn.Module): 25 | def __init__(self, class_num, bottleneck_dim=256, type="linear"): 26 | super(feat_classifier, self).__init__() 27 | self.type = type 28 | if type == 'wn': 29 | self.fc = weightNorm( 30 | nn.Linear(bottleneck_dim, class_num), name="weight") 31 | # self.fc.apply(init_weights) 32 | else: 33 | self.fc = nn.Linear(bottleneck_dim, class_num) 34 | # self.fc.apply(init_weights) 35 | 36 | def forward(self, x): 37 | x = self.fc(x) 38 | return x 39 | 40 | 41 | class feat_classifier_two(nn.Module): 42 | def __init__(self, class_num, input_dim, bottleneck_dim=256): 43 | super(feat_classifier_two, self).__init__() 44 | self.type = type 45 | self.fc0 = nn.Linear(input_dim, bottleneck_dim) 46 | # self.fc0.apply(init_weights) 47 | self.fc1 = nn.Linear(bottleneck_dim, class_num) 48 | # self.fc1.apply(init_weights) 49 | 50 | def forward(self, x): 51 | x = self.fc0(x) 52 | x = self.fc1(x) 53 | return x 54 | 55 | 56 | class Identity(nn.Module): 57 | """An identity layer""" 58 | def __init__(self): 59 | super(Identity, self).__init__() 60 | 61 | def forward(self, x): 62 | return x 63 | -------------------------------------------------------------------------------- /methods/TSD/code/network/util.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): 7 | return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low) 8 | 9 | 10 | def init_weights(m): 11 | classname = m.__class__.__name__ 12 | if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1: 13 | nn.init.kaiming_uniform_(m.weight) 14 | nn.init.zeros_(m.bias) 15 | elif classname.find('BatchNorm') != -1: 16 | nn.init.normal_(m.weight, 1.0, 0.02) 17 | nn.init.zeros_(m.bias) 18 | elif classname.find('Linear') != -1: 19 | nn.init.xavier_normal_(m.weight) 20 | nn.init.zeros_(m.bias) 21 | -------------------------------------------------------------------------------- /run/table_1_target_DG_DRTiD.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python ../retigen/main_retigen.py seed=2024 port=20001 memo='target' project='RetiGen_20240324' data.data_root='../GDRBench_Data/' data.workers=8 optim.lr=2e-4 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, MFIDDR_1view]' data.target_domains='[DRTiD_2views]' model_src.algorithm=GDRNet dg_mode=DG output='../trained_models/Result_2view/DG_DRTiD_2views/GDRNet' -------------------------------------------------------------------------------- /run/table_1_target_DG_MFIDDR.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=GDRNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/GDRNet' name='GDRNet' 3 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=GREEN dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/GREEN' name='GREEN' 4 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=ERM dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/ERM' name='ERM' 5 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=MixStyleNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/MixStyleNet' name='MixStyleNet' 6 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=Fishr dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/Fishr' name='Fishr' 7 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=MixupNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/MixupNet' name='MixupNet' 8 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=CABNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/CABNet' name='CABNet' 9 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240407' data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=GDRNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/GDRNet' name='GDRNet' 10 | 11 | -------------------------------------------------------------------------------- /run/table_1_target_DG_MFIDDR_quarter.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=GDRNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/GDRNet' name='GDRNet' 3 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=GREEN dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/GREEN' name='GREEN' 4 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=ERM dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/ERM' name='ERM' 5 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=MixStyleNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/MixStyleNet' name='MixStyleNet' 6 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=Fishr dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/Fishr' name='Fishr' 7 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=MixupNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/MixupNet' name='MixupNet' 8 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_GDRNet' curve=True data.data_root='../datasets/' data.workers=8 learn.alpha=0.1 learn.eta=1 optim.lr=0.001 data.source_domains='[APTOS, IDRID, DEEPDR, RLDR, DRTiD_1view]' data.target_domains='[MFIDDR_4viewss]' model_src.algorithm=CABNet dg_mode=DG output='../trained_models/Result_4view/DG_MFIDDR_4viewss/CABNet' name='CABNet' 9 | 10 | -------------------------------------------------------------------------------- /run/table_1_target_DG_MFIDDR_quarter_patient_level.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/run/table_1_target_DG_MFIDDR_quarter_patient_level.sh -------------------------------------------------------------------------------- /run/table_2_tta_compare_TENT_Adacontrast_ours.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/run/table_2_tta_compare_TENT_Adacontrast_ours.sh -------------------------------------------------------------------------------- /run/table_3_ablation_study.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/run/table_3_ablation_study.sh -------------------------------------------------------------------------------- /run/table_4.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RViMLab/RetiGen/d3a01b0464a8604eab90f5ac2db66f76f68090e7/run/table_4.sh -------------------------------------------------------------------------------- /run/table_s1_target_ESDG_DRTiD.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240324' data.data_root='../GDRBench_Data/' data.workers=8 optim.lr=2e-4 data.source_domains='[APTOS]' data.target_domains='[DRTiD_2views]' model_src.algorithm=GDRNet dg_mode=DG output='/media/raid/gongyu/projects/MVDRG/Result_4view/DG_MFIDDR_4viewss/GDRNet' -------------------------------------------------------------------------------- /run/table_s1_target_ESDG_MFIDDR.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python ../retigen/main_retigen.py seed=2024 port=20000 memo='target' project='RetiGen_20240324' name='GDRNet_APTOS' data.data_root='../GDRBench_Data/' data.workers=8 optim.lr=2e-4 data.source_domains='[APTOS]' data.target_domains='[MFIDDR_4views]' model_src.algorithm=GDRNet dg_mode=DG output='/media/raid/gongyu/projects/MVDRG/Result_202403/APTOS/GDRNet' --------------------------------------------------------------------------------