├── .cirrus.yml ├── .github └── workflows │ ├── automatic_rebase.yml │ └── pre-commit.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── AUTHORS.rst ├── LICENSE ├── Pipfile.lock ├── Poster_Slide ├── CVPR-2021 │ ├── ProSelfLC_Poster.pdf │ ├── ProSelfLC_Poster.pptx │ └── ProSelfLC_Slide.pdf └── Talks │ ├── 2022-05-17-XW-Loughborough.pdf │ ├── 2022-08-12-XW-SUSTECH-Poster.pdf │ ├── 2022-08-12-XW-SUSTECH.pdf │ └── 2022-12-23-ShanghaiDianJiUniversity.pdf ├── README.md ├── Reviews ├── CVPR2021 │ ├── Rebuttal.pdf │ ├── final_reviews-Meta.pdf │ ├── final_reviews.pdf │ └── initial_reviews.pdf └── NeurIPS2020 │ ├── CMT_Review.pdf │ └── ProSelfLC_personal_response.pdf ├── dataset ├── CASP12_HHblits.csv ├── CB513_HHblits.csv ├── TS115_HHblits.csv ├── Train_HHblits.csv ├── Validation_HHblits.csv ├── deeploc_eda_reports │ ├── MS-with-unknown_train.pdf │ ├── MS-with-unknown_valid or test: the same dataset.pdf │ ├── MS_train.pdf │ ├── MS_valid or test: the same dataset.pdf │ ├── deeploc_prottransMS-with-unknown_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf │ ├── deeploc_prottransMS-with-unknown_classnum_2_train.pdf │ ├── deeploc_prottransMS_classnum_2_maxseqlen_1022_ratio_960_1087_val.pdf │ ├── deeploc_prottransMS_classnum_2_maxseqlen_1534_ratio_1046_1087_val.pdf │ ├── deeploc_prottransMS_classnum_2_maxseqlen_2046_ratio_1073_1087_val.pdf │ ├── deeploc_prottransMS_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf │ ├── deeploc_prottransMS_classnum_2_maxseqlen_510_ratio_644_1087_val.pdf │ └── deeploc_prottransMS_classnum_2_train.pdf ├── deeploc_per_protein_test.csv ├── deeploc_per_protein_train.csv └── eda_reports │ ├── deeploc_prottransMS_classnum_2_train.pdf │ └── deeploc_prottransMS_classnum_2_val.pdf ├── demos_jupyter_notebooks ├── bert_deeploc │ ├── dataset │ │ └── deeploc_eda_reports │ │ │ └── MS-with-unknown_train.pdf │ ├── trainer_2MSwithunknown_bert_proselflc.ipynb │ └── trainer_2MSwithunknown_bert_proselflc.pdf └── convnets_cifar100 │ ├── trainer_cifar100_resnet18_proselflc.ipynb │ ├── trainer_cifar100_shufflenetv2_labelsmoothing.ipynb │ └── trainer_cifar100_shufflenetv2_proselflc.ipynb ├── docs ├── Makefile ├── _static │ └── .gitignore ├── authors.rst ├── changelog.rst ├── conf.py ├── index.rst └── license.rst ├── experiments_records ├── cifar100_symmetric_noise_rate_0.4 │ ├── resnet18 │ │ ├── 002_crossentropy_warm0_20220402-223020 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 008_confidencepenalty_warm0_20220328-130116 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 008_labelcorrection_warm0_20220328-125437 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 008_labelsmoothing_warm0_20220328-125731 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ └── 138_proselflc_warm0_20220402-173610 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ └── shufflenetv2 │ │ ├── 003_crossentropy_warm0_20220403-011907 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 012_confidencepenalty_warm0_20220328-184933 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 012_labelcorrection_warm0_20220328-184721 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 012_labelsmoothing_warm0_20220328-184409 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ └── 502_proselflc_warm0_20220606-150113 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv ├── clothing1m_withbs_symmetric_noise_rate_0.0 │ └── resnet50_tv │ │ └── 790_proselflc_warm0.18_20220618-070235 │ │ ├── .~lock.accuracy_loss_normalised_entropy_max_p_metadata.xlsx# │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv ├── deeploc_prottrans_symmetric_noise_rate_0.0 │ └── Rostlab_prot_bert_bfd_seq │ │ ├── MS-with-unknown │ │ ├── 001_labelsmoothing_warm0_20220501-145915 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 002_labelsmoothing_warm0_20220501-145917 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 005_proselflc_warm0_20220430-142316 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ ├── 009_confidencepenalty_warm0_20220501-145913 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ └── 057_labelcorrection_warm0_20220619-032243 │ │ │ ├── accuracy.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ │ ├── loss.pdf │ │ │ ├── max_p.pdf │ │ │ ├── metadata.pdf │ │ │ ├── normalised_entropy.pdf │ │ │ └── params.csv │ │ └── MS │ │ ├── 010_proselflc_warm0_20220430-215151 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 021_labelsmoothing_warm0_20220501-195700 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 022_labelsmoothing_warm0_20220501-222438 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ ├── 029_confidencepenalty_warm0_20220501-210635 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv │ │ └── 039_labelcorrection_warm0_20220501-233934 │ │ ├── accuracy.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ │ ├── loss.pdf │ │ ├── max_p.pdf │ │ ├── metadata.pdf │ │ ├── normalised_entropy.pdf │ │ └── params.csv └── food101n_symmetric_noise_rate_0.0 │ └── resnet50_tv │ └── 014_proselflc_warm2_20220515-005356 │ ├── accuracy.pdf │ ├── accuracy_loss_normalised_entropy_max_p_metadata.pdf │ ├── accuracy_loss_normalised_entropy_max_p_metadata.xlsx │ ├── loss.pdf │ ├── max_p.pdf │ ├── metadata.pdf │ ├── normalised_entropy.pdf │ └── params.csv ├── pyproject.toml ├── script_tools └── html2pdf.ipynb ├── setup.cfg ├── setup.py ├── src ├── __init__.py └── proselflc │ ├── __init__.py │ ├── exceptions.py │ ├── optim │ └── sgd_multistep.py │ ├── slicegetter │ ├── __init__.py │ ├── get_dataloader.py │ ├── get_lossfunction.py │ └── get_network.py │ ├── slices │ ├── __init__.py │ ├── datain │ │ ├── __init__.py │ │ ├── dataloaders │ │ │ ├── cifar100dataloader.py │ │ │ ├── clothing1mdataloader.py │ │ │ ├── deeplocdataloaders.py │ │ │ └── food101n.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── cifar100dataset.py │ │ │ ├── clothing1m.py │ │ │ ├── clothing1m_getlabels.py │ │ │ ├── deeplocdatasets.py │ │ │ ├── food101n.py │ │ │ └── food101n_utils.py │ │ ├── transforms │ │ │ ├── __init__.py │ │ │ ├── cifar100transforms.py │ │ │ └── clothing1mtransforms.py │ │ └── utils.py │ ├── losses │ │ ├── __init__.py │ │ ├── confidencepenalty.py │ │ ├── crossentropy.py │ │ ├── labelcorrection.py │ │ ├── lablesmoothing.py │ │ ├── mean_absolute_error.py │ │ └── proselflc.py │ └── networks │ │ ├── PreResNet.py │ │ ├── __init__.py │ │ ├── mobilenetv2.py │ │ ├── nasnet.py │ │ ├── resnet.py │ │ ├── resnet50_tv.py │ │ ├── resnet_plc.py │ │ ├── senet.py │ │ ├── shufflenet.py │ │ ├── shufflenetv2.py │ │ ├── stochasticdepth.py │ │ ├── transformers │ │ ├── prot_bert_bfd_seqlevel.py │ │ └── prot_bert_bfd_tokenlevel.py │ │ ├── vit_modelconfig │ │ ├── configs.py │ │ ├── modeling.py │ │ └── modeling_resnet.py │ │ └── vit_models.py │ └── trainer │ ├── __init__.py │ ├── trainer_cnn_vision_derivedgrad.py │ ├── trainer_cnn_vision_derivedgrad_adaptedfordeeploc.py │ └── utils.py └── tests ├── convnets_cifar100 ├── trainer_calibration_vision_cifar100_covnets_otherlosses.py └── trainer_calibration_vision_cifar100_covnets_proselflc.py ├── protbertbfd_deeploc ├── MS-with-unknown │ ├── test_trainer_2MSwithunknown_others.py │ └── test_trainer_2MSwithunknown_proselflc.py └── MS │ ├── test_trainer_2MS_others.py │ └── test_trainer_2MS_proselflc.py ├── resnet50_clothing1M └── c18976 │ └── test_trainer_cnn_vision_proselflc_covnets_bs18976_b32_.py └── resnet50_food101n └── test_trainer_resnet50_proselflc.py /.cirrus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Default values to be merged into tasks: 3 | auto_cancellation: false 4 | env: 5 | PATH: ${HOME}/.local/bin:${PATH} 6 | # ^ add user paths 7 | COVERALLS_REPO_TOKEN: ENCRYPTED[] 8 | # ^ ADD YOUR ENCRYPTED TOKEN HERE: https://cirrus-ci.org/guide/writing-tasks/#encrypted-variables 9 | PIP_CACHE: ${HOME}/.cache/pip 10 | LC_ALL: C.UTF-8 11 | LANG: C.UTF-8 12 | 13 | # This template is used in all tasks 14 | .regular_task_template: ®ULAR_TASK_TEMPLATE 15 | tox_install_script: 16 | - python -m pip install --upgrade pip setuptools tox 17 | clean_workspace_script: 18 | # Avoid information carried from one run to the other 19 | - rm -rf .coverage junit-*.xml .tox 20 | test_script: &test 21 | # This script is also used in Windows, so the shell is not POSIX 22 | - python -m tox -- -rfEx --durations 10 --color yes 23 | # ^ tox is better if invoked as a module on Windows/OSX 24 | 25 | 26 | # Task definitions: 27 | linux_mac_task: 28 | # Use custom cloning since otherwise git tags are missing 29 | clone_script: &clone | 30 | if [ -z "$CIRRUS_PR" ]; then 31 | git clone --recursive --branch=$CIRRUS_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR 32 | git reset --hard $CIRRUS_CHANGE_IN_REPO 33 | else 34 | git clone --recursive https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR 35 | git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR 36 | git reset --hard $CIRRUS_CHANGE_IN_REPO 37 | fi 38 | matrix: 39 | - name: test (Linux - 3.6) 40 | container: {image: "python:3.6-buster"} 41 | pip_cache: &pip-cache 42 | folder: $PIP_CACHE 43 | install_script: &debian-install 44 | - apt-get install -y git 45 | - name: test (Linux - 3.7) 46 | container: {image: "python:3.7-buster"} 47 | pip_cache: *pip-cache 48 | install_script: *debian-install 49 | - name: test (Linux - 3.8) 50 | container: {image: "python:3.8-buster"} 51 | pip_cache: *pip-cache 52 | install_script: *debian-install 53 | - name: test (Linux - Anaconda) 54 | container: {image: "continuumio/anaconda3:2019.03"} 55 | pip_cache: *pip-cache 56 | install_script: *debian-install 57 | - name: test (OS X) 58 | osx_instance: {image: "catalina-xcode"} 59 | env: 60 | PYTHON_VERSION: 3.7 61 | # ^ update when the default version of python in homebrew changes 62 | PATH: "${HOME}/.local/bin:${HOME}/Library/Python/${PYTHON_VERSION}/bin:/usr/local/opt/python/libexec/bin:${PATH}" 63 | # ^ add user and homebrew paths 64 | PIP_CACHE: "${HOME}/Library/Caches/pip" 65 | brew_cache: 66 | folder: "$HOME/Library/Caches/Homebrew" 67 | pip_cache: *pip-cache 68 | install_script: 69 | - brew install python gnu-tar 70 | - brew cleanup 71 | <<: *REGULAR_TASK_TEMPLATE 72 | 73 | 74 | # If you find some problems with long paths on Windows, 75 | # please check the .cirrus.yml file of PyScaffold itself for some workarounds. 76 | windows_task: 77 | name: test (Windows) 78 | tools_cache: 79 | folder: 'C:\tools' 80 | fingerprint_script: 81 | - ps: echo "$env:CIRRUS_OS - nuget v5.6.0 - git v2.27.0" 82 | populate_script: 83 | - ps: (mkdir 'C:\tools') 84 | # ^ use parentheses to suppress errors 85 | - ps: Invoke-WebRequest -OutFile 'C:\tools\nuget.exe' 'https://dist.nuget.org/win-x86-commandline/v5.6.0/nuget.exe' 86 | - ps: nuget install GitForWindows -Version 2.27.0 -NonInteractive -OutputDirectory 'C:\tools' 87 | clone_script: 88 | # Use custom cloning since otherwise git tags are missing 89 | CMD.exe /C ECHO ON & 90 | IF NOT DEFINED CIRRUS_PR ( 91 | git clone --recursive --branch=%CIRRUS_BRANCH% https://x-access-token:%CIRRUS_REPO_CLONE_TOKEN%@github.com/%CIRRUS_REPO_FULL_NAME%.git %CIRRUS_WORKING_DIR% & 92 | git reset --hard %CIRRUS_CHANGE_IN_REPO% 93 | ) ELSE ( 94 | git clone --recursive https://x-access-token:%CIRRUS_REPO_CLONE_TOKEN%@github.com/%CIRRUS_REPO_FULL_NAME%.git %CIRRUS_WORKING_DIR% & 95 | git fetch origin pull/%CIRRUS_PR%/head:pull/%CIRRUS_PR% & 96 | git reset --hard %CIRRUS_CHANGE_IN_REPO% 97 | ) 98 | windows_container: 99 | image: "python:3.8-windowsservercore" 100 | os_version: 2019 101 | env: 102 | # Single quotes are used bellow to escape Windows backslash and % (YAML restrictions). 103 | PYTHON_HOME: 'C:\Python' 104 | PYTHON_APPDATA: '%APPDATA%\Python\Python38' 105 | # ^ update when python version changes 106 | GIT_HOME: 'C:\tools\GitForWindows.2.27.0\tools' 107 | # ^ update when git version changes 108 | HOME: '%USERPROFILE%' 109 | USERNAME: ContainerAdministrator 110 | # ^ ensure USERNAME is set in Windows, so the getpass module doesn't raise exceptions 111 | PATH: '%HOME%\.local\bin\;%PYTHON_APPDATA%\Scripts\;%PYTHON_HOME%\;%PYTHON_HOME%\Scripts\;C:\tools\;%GIT_HOME%\cmd\;%PATH%' 112 | # ^ add user paths (if POSIX tools are needed you can try also adding %GIT_HOME\usr\bin\) 113 | PIP_CACHE: '%LocalAppData%\pip\Cache' 114 | PIP_TRUSTED_HOST: 'pypi.org pypi.python.org files.pythonhosted.org' 115 | PIP_CONFIG_FILE: '%AppData%\pip\pip.ini' 116 | pip_cache: 117 | folder: '%PIP_CACHE%' 118 | install_script: 119 | - python -m ensurepip 120 | - python -m pip install --upgrade --user pip setuptools certifi tox 121 | clean_workspace_script: 122 | # Avoid information carried from one run to the other 123 | # CMD is not capable of globbing, so we have to use PowerShell 124 | - ps: (rm -Recurse -Force -ErrorAction SilentlyContinue .tox,junit-*.xml) 125 | test_script: *test 126 | 127 | 128 | coverage_task: 129 | name: coverage (Linux) 130 | clone_script: *clone 131 | container: {image: "python:3.6-buster"} 132 | pip_cache: *pip-cache 133 | depends_on: 134 | - test (Linux - 3.6) 135 | - test (Linux - 3.7) 136 | - test (Linux - 3.8) 137 | - test (Linux - Anaconda) 138 | - test (OS X) 139 | - test (Windows) 140 | install_script: *debian-install 141 | pip_install_script: 142 | pip install --user --upgrade coverage coveralls pre-commit 143 | precommit_script: 144 | - pre-commit install 145 | - pre-commit run --all-files 146 | <<: *REGULAR_TASK_TEMPLATE 147 | coverage_script: 148 | - coveralls 149 | -------------------------------------------------------------------------------- /.github/workflows/automatic_rebase.yml: -------------------------------------------------------------------------------- 1 | name: Automatic Rebase 2 | on: 3 | issue_comment: 4 | types: [created] 5 | jobs: 6 | rebase: 7 | name: Rebase 8 | runs-on: ubuntu-latest 9 | if: >- 10 | github.event.issue.pull_request != '' && 11 | ( 12 | contains(github.event.comment.body, '/rebase') 13 | ) 14 | steps: 15 | - name: Checkout the latest code 16 | uses: actions/checkout@v3 17 | with: 18 | token: ${{ secrets.GITHUB_TOKEN }} 19 | fetch-depth: 0 # otherwise, you will fail to push refs to dest repo 20 | - name: Automatic Rebase 21 | uses: cirrus-actions/rebase@1.7 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | jobs: 9 | pre-commit: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | - uses: actions/setup-python@v3 14 | - uses: pre-commit/action@v3.0.0 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary and binary files 2 | *~ 3 | *.py[cod] 4 | *.so 5 | *.cfg 6 | !.isort.cfg 7 | !setup.cfg 8 | *.orig 9 | *.log 10 | *.pot 11 | __pycache__/* 12 | .cache/* 13 | .*.swp 14 | */.ipynb_checkpoints/* 15 | .DS_Store 16 | *_glog 17 | 18 | # Project files 19 | .ropeproject 20 | .project 21 | .pydevproject 22 | .settings 23 | .idea 24 | .vscode 25 | tags 26 | pyproject.toml 27 | 28 | # Package files 29 | *.egg 30 | *.eggs/ 31 | .installed.cfg 32 | *.egg-info 33 | 34 | # Unittest and coverage 35 | htmlcov/* 36 | .coverage 37 | .tox 38 | junit.xml 39 | coverage.xml 40 | .pytest_cache/ 41 | .coverage* 42 | 43 | # Build and docs folder/files 44 | build/* 45 | dist/* 46 | sdist/* 47 | docs/api/* 48 | docs/_rst/* 49 | docs/_build/* 50 | cover/* 51 | MANIFEST 52 | .*.pyc 53 | *.pyc 54 | 55 | # Per-project virtualenvs 56 | .venv*/ 57 | .python-version 58 | Pipfile 59 | Pipfile.lock 60 | 61 | # data files 62 | /datasets/* 63 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile = black 3 | extra_standard_library = setuptools,pkg_resources 4 | known_test = pytest 5 | known_first_party = proselflc 6 | sections = FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^docs/conf.py' 2 | 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.3.0 6 | hooks: 7 | - id: trailing-whitespace 8 | # - id: check-added-large-files 9 | - id: check-ast 10 | - id: check-json 11 | - id: check-merge-conflict 12 | - id: check-xml 13 | - id: check-yaml 14 | - id: debug-statements 15 | - id: end-of-file-fixer 16 | - id: requirements-txt-fixer 17 | - id: mixed-line-ending 18 | args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows 19 | 20 | ## If you want to avoid flake8 errors due to unused vars or imports: 21 | # - repo: https://github.com/myint/autoflake.git 22 | # rev: v1.4 23 | # hooks: 24 | # - id: autoflake 25 | # args: [ 26 | # --in-place, 27 | # --remove-all-unused-imports, 28 | # --remove-unused-variables, 29 | # ] 30 | 31 | - repo: https://github.com/timothycrosley/isort 32 | rev: 5.10.1 33 | hooks: 34 | - id: isort 35 | 36 | - repo: https://github.com/psf/black 37 | rev: 22.10.0 38 | hooks: 39 | - id: black 40 | language_version: python3 41 | 42 | ## If like to embrace black styles even in the docs: 43 | # - repo: https://github.com/asottile/blacken-docs 44 | # rev: v1.8.0 45 | # hooks: 46 | # - id: blacken-docs 47 | # additional_dependencies: [black] 48 | 49 | - repo: https://gitlab.com/pycqa/flake8 50 | rev: 3.9.2 51 | hooks: 52 | - id: flake8 53 | args: ['--max-line-length=88', '--ignore=E731'] 54 | ## You can add flake8 plugins via `additional_dependencies`: 55 | # additional_dependencies: [flake8-bugbear] 56 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributors 3 | ============ 4 | 5 | * Xinshao Wang 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2022 Xinshao Wang 2 | 3 | Permission is hereby granted to any person obtaining a copy of this software and associated 4 | documentation files (the "Software") for research and educational use, provided that the above 5 | copyright notice and this permission notice shall be included in all copies or substantial 6 | portions of the software. 7 | 8 | To use this software for commercial purposes, please contact Xinshao Wang (xinshaowang@gmail.com). 9 | 10 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING 11 | BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 13 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 14 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "949e6274ff1dbf02dad28333a096b55d95f0f60c7fc18cfd2e632de982947082" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.8" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "proselflc": { 20 | "editable": true, 21 | "path": "." 22 | }, 23 | "pyscaffold": { 24 | "hashes": [ 25 | "sha256:1c3a2b76e60319b6ffc2a8b54e240382109c6241576bf0a47ea476c7194f6a69", 26 | "sha256:24c334bd3550ff4caefdc0e962213c3184a5c7ba5f3bb3bb1c994da4fdbaa529" 27 | ], 28 | "markers": "python_version >= '3.4'", 29 | "version": "==3.3.1" 30 | } 31 | }, 32 | "develop": {} 33 | } 34 | -------------------------------------------------------------------------------- /Poster_Slide/CVPR-2021/ProSelfLC_Poster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/CVPR-2021/ProSelfLC_Poster.pdf -------------------------------------------------------------------------------- /Poster_Slide/CVPR-2021/ProSelfLC_Poster.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/CVPR-2021/ProSelfLC_Poster.pptx -------------------------------------------------------------------------------- /Poster_Slide/CVPR-2021/ProSelfLC_Slide.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/CVPR-2021/ProSelfLC_Slide.pdf -------------------------------------------------------------------------------- /Poster_Slide/Talks/2022-05-17-XW-Loughborough.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/Talks/2022-05-17-XW-Loughborough.pdf -------------------------------------------------------------------------------- /Poster_Slide/Talks/2022-08-12-XW-SUSTECH-Poster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/Talks/2022-08-12-XW-SUSTECH-Poster.pdf -------------------------------------------------------------------------------- /Poster_Slide/Talks/2022-08-12-XW-SUSTECH.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/Talks/2022-08-12-XW-SUSTECH.pdf -------------------------------------------------------------------------------- /Poster_Slide/Talks/2022-12-23-ShanghaiDianJiUniversity.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Poster_Slide/Talks/2022-12-23-ShanghaiDianJiUniversity.pdf -------------------------------------------------------------------------------- /Reviews/CVPR2021/Rebuttal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/CVPR2021/Rebuttal.pdf -------------------------------------------------------------------------------- /Reviews/CVPR2021/final_reviews-Meta.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/CVPR2021/final_reviews-Meta.pdf -------------------------------------------------------------------------------- /Reviews/CVPR2021/final_reviews.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/CVPR2021/final_reviews.pdf -------------------------------------------------------------------------------- /Reviews/CVPR2021/initial_reviews.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/CVPR2021/initial_reviews.pdf -------------------------------------------------------------------------------- /Reviews/NeurIPS2020/CMT_Review.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/NeurIPS2020/CMT_Review.pdf -------------------------------------------------------------------------------- /Reviews/NeurIPS2020/ProSelfLC_personal_response.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/Reviews/NeurIPS2020/ProSelfLC_personal_response.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/MS-with-unknown_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/MS-with-unknown_train.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/MS-with-unknown_valid or test: the same dataset.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/MS-with-unknown_valid or test: the same dataset.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/MS_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/MS_train.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/MS_valid or test: the same dataset.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/MS_valid or test: the same dataset.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS-with-unknown_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS-with-unknown_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS-with-unknown_classnum_2_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS-with-unknown_classnum_2_train.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_1022_ratio_960_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_1022_ratio_960_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_1534_ratio_1046_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_1534_ratio_1046_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_2046_ratio_1073_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_2046_ratio_1073_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_434_ratio_539_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_510_ratio_644_1087_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_maxseqlen_510_ratio_644_1087_val.pdf -------------------------------------------------------------------------------- /dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/deeploc_eda_reports/deeploc_prottransMS_classnum_2_train.pdf -------------------------------------------------------------------------------- /dataset/eda_reports/deeploc_prottransMS_classnum_2_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/eda_reports/deeploc_prottransMS_classnum_2_train.pdf -------------------------------------------------------------------------------- /dataset/eda_reports/deeploc_prottransMS_classnum_2_val.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/dataset/eda_reports/deeploc_prottransMS_classnum_2_val.pdf -------------------------------------------------------------------------------- /demos_jupyter_notebooks/bert_deeploc/dataset/deeploc_eda_reports/MS-with-unknown_train.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/demos_jupyter_notebooks/bert_deeploc/dataset/deeploc_eda_reports/MS-with-unknown_train.pdf -------------------------------------------------------------------------------- /demos_jupyter_notebooks/bert_deeploc/trainer_2MSwithunknown_bert_proselflc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/demos_jupyter_notebooks/bert_deeploc/trainer_2MSwithunknown_bert_proselflc.pdf -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | AUTODOCDIR = api 11 | 12 | # User-friendly check for sphinx-build 13 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $?), 1) 14 | $(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/") 15 | endif 16 | 17 | .PHONY: help clean Makefile 18 | 19 | # Put it first so that "make" without argument is like "make help". 20 | help: 21 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | 23 | clean: 24 | rm -rf $(BUILDDIR)/* $(AUTODOCDIR) 25 | 26 | # Catch-all target: route all unknown targets to Sphinx using the new 27 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 28 | %: Makefile 29 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 30 | -------------------------------------------------------------------------------- /docs/_static/.gitignore: -------------------------------------------------------------------------------- 1 | # Empty directory 2 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. _authors: 2 | .. include:: ../AUTHORS.rst 3 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changes: 2 | .. include:: ../CHANGELOG.rst 3 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | ProSelfLC 3 | ========= 4 | 5 | This is the documentation of **ProSelfLC**. 6 | 7 | .. note:: 8 | 9 | This is the main page of your project's `Sphinx`_ documentation. 10 | It is formatted in `reStructuredText`_. Add additional pages 11 | by creating rst-files in ``docs`` and adding them to the `toctree`_ below. 12 | Use then `references`_ in order to link them from this page, e.g. 13 | :ref:`authors` and :ref:`changes`. 14 | 15 | It is also possible to refer to the documentation of other Python packages 16 | with the `Python domain syntax`_. By default you can reference the 17 | documentation of `Sphinx`_, `Python`_, `NumPy`_, `SciPy`_, `matplotlib`_, 18 | `Pandas`_, `Scikit-Learn`_. You can add more by extending the 19 | ``intersphinx_mapping`` in your Sphinx's ``conf.py``. 20 | 21 | The pretty useful extension `autodoc`_ is activated by default and lets 22 | you include documentation from docstrings. Docstrings can be written in 23 | `Google style`_ (recommended!), `NumPy style`_ and `classical style`_. 24 | 25 | 26 | Contents 27 | ======== 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | 32 | License 33 | Authors 34 | Changelog 35 | Module Reference 36 | 37 | 38 | Indices and tables 39 | ================== 40 | 41 | * :ref:`genindex` 42 | * :ref:`modindex` 43 | * :ref:`search` 44 | 45 | .. _toctree: http://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html 46 | .. _reStructuredText: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html 47 | .. _references: http://www.sphinx-doc.org/en/stable/markup/inline.html 48 | .. _Python domain syntax: http://sphinx-doc.org/domains.html#the-python-domain 49 | .. _Sphinx: http://www.sphinx-doc.org/ 50 | .. _Python: http://docs.python.org/ 51 | .. _Numpy: http://docs.scipy.org/doc/numpy 52 | .. _SciPy: http://docs.scipy.org/doc/scipy/reference/ 53 | .. _matplotlib: https://matplotlib.org/contents.html# 54 | .. _Pandas: http://pandas.pydata.org/pandas-docs/stable 55 | .. _Scikit-Learn: http://scikit-learn.org/stable 56 | .. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html 57 | .. _Google style: https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings 58 | .. _NumPy style: https://numpydoc.readthedocs.io/en/latest/format.html 59 | .. _classical style: http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists 60 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | .. _license: 2 | 3 | ======= 4 | License 5 | ======= 6 | 7 | .. include:: ../LICENSE.txt 8 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 crossentropy WarmupMultiStepSchedule BalancedBatchSampler 0.4 resnet18 0.002 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/resnet18/002_crossentropy_warm0_20220402-223020 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 confidencepenalty WarmupMultiStepSchedule BalancedBatchSampler 0.4 resnet18 0.002 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 1.0 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/resnet18/008_confidencepenalty_warm0_20220328-130116 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 labelcorrection WarmupMultiStepSchedule BalancedBatchSampler 0.4 resnet18 0.002 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 0.5 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/resnet18/008_labelcorrection_warm0_20220328-125437 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 labelsmoothing WarmupMultiStepSchedule BalancedBatchSampler 0.4 resnet18 0.002 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/resnet18/008_labelsmoothing_warm0_20220328-125731 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name batch_size momentum weight_decay transit_time_ratio warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode exp_base logit_soften_T summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 proselflc WarmupMultiStepSchedule BalancedBatchSampler 0.4 resnet18 128 0.9 0.002 0.5 0 0.2 10 [20000, 30000] 0.1 cross entropy global*max_p 12 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/resnet18/138_proselflc_warm0_20220402-173610 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 crossentropy WarmupMultiStepSchedule BalancedBatchSampler 0.4 shufflenetv2 0.001 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/shufflenetv2/003_crossentropy_warm0_20220403-011907 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 confidencepenalty WarmupMultiStepSchedule BalancedBatchSampler 0.4 shufflenetv2 0.001 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 1.0 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_confidencepenalty_warm0_20220328-184933 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 labelcorrection WarmupMultiStepSchedule BalancedBatchSampler 0.4 shufflenetv2 0.001 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 0.6 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelcorrection_warm0_20220328-184721 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler symmetric_noise_rate network_name weight_decay batch_size momentum warmup_epochs lr batch_accumu_steps milestones gamma loss_mode trust_mode logit_soften_T epsilon summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 labelsmoothing WarmupMultiStepSchedule BalancedBatchSampler 0.4 shufflenetv2 0.001 128 0.9 0 0.2 10 [20000, 30000] 0.1 cross entropy global*(1-H(p)/H(u)) 0.5 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/shufflenetv2/012_labelsmoothing_warm0_20220328-184409 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/loss.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name lr_scheduler sampler network_name weight_decay trust_mode symmetric_noise_rate batch_size momentum transit_time_ratio warmup_epochs lr batch_accumu_steps milestones gamma loss_mode exp_base logit_soften_T summary_writer_dir train total_iterations warmup_iterations 2 | cifar100 100 gpu 8 iteration 64 123 100 500 proselflc WarmupMultiStepSchedule BalancedBatchSampler shufflenetv2 0.001 global*max_p 0.4 128 0.9 0.5 0 0.2 10 [20000, 30000] 0.1 cross entropy 8 0.4 /home/xinshao/tpami_proselflc_experiments_calibration//cifar100_symmetric_noise_rate_0.4/shufflenetv2/502_proselflc_warm0_20220606-150113 False 39000 0 3 | -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/.~lock.accuracy_loss_normalised_entropy_max_p_metadata.xlsx#: -------------------------------------------------------------------------------- 1 | ,amos,amos-personal-xps15,24.06.2022 14:04,file:///home/amos/.config/libreoffice/4; 2 | -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/loss.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235/params.csv: -------------------------------------------------------------------------------- 1 | data_name data_root cls_size num_classes device num_workers counter classes_per_batch seed total_epochs eval_interval loss_name network_name pretrained symmetric_noise_rate train_transform lr_scheduler sampler layer_split_lr_scale batch_size momentum transit_time_ratio lr weight_decay batch_accumu_steps dropout milestones gamma warmup_epochs min_scale rotation freeze_bn loss_mode trust_mode logit_soften_T epsilon exp_base summary_writer_dir train split test_transform total_iterations warmup_iterations 2 | clothing1m_withbs /home/xinshao/tpami_proselflc_experiments_calibration/input_dir/clothing1m 18976 15 gpu 16 iteration 8 123 2 100 proselflc resnet50_tv True 0.0 train_rrcsr WarmupMultiStepSchedule BalancedBatchSampler [-2, 2] 32 0.9 0.5 0.01 0.02 8 0.15 [5000, 8500, 9200, 11700] 0.2 0.18 0.75 15 False cross entropy global*max_p 0.42 12 /home/xinshao/tpami_proselflc_experiments_calibration//clothing1m_withbs_symmetric_noise_rate_0.0/resnet50_tv/790_proselflc_warm0.18_20220618-070235 True test resizecrop 16604 1494 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/001_labelsmoothing_warm0_20220501-145915/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS-with-unknown gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelsmoothing 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.01 [5000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/001_labelsmoothing_warm0_20220501-145915 True test 8240 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/002_labelsmoothing_warm0_20220501-145917/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS-with-unknown gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelsmoothing 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.01 [5000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.125 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/002_labelsmoothing_warm0_20220501-145917 True test 8240 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/005_proselflc_warm0_20220430-142316/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler momentum weight_decay transit_time_ratio warmup_epochs lr batch_accumu_steps milestones gamma total_epochs eval_interval loss_mode trust_mode logit_soften_T exp_base summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS-with-unknown gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 proselflc 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.9 0.0001 0.5 0 0.01 10 [5000] 0.1 40 100 cross entropy global*(1-H(p)/H(u)) 0.2 12 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/005_proselflc_warm0_20220430-142316 True test 8240 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/009_confidencepenalty_warm0_20220501-145913/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS-with-unknown gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 confidencepenalty 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.01 [5000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.4 0.25 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/009_confidencepenalty_warm0_20220501-145913 True test 8240 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS-with-unknown/057_labelcorrection_warm0_20220619-032243/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS-with-unknown gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelcorrection 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.01 [5000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.5 0.125 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/057_labelcorrection_warm0_20220619-032243 True test 8240 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/010_proselflc_warm0_20220430-215151/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler momentum weight_decay transit_time_ratio warmup_epochs lr batch_accumu_steps milestones gamma total_epochs eval_interval loss_mode exp_base logit_soften_T trust_mode summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 proselflc 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.9 0.0001 0.5 0 0.02 10 [3000] 0.1 40 100 cross entropy 12 0.2 global*(1-H(p)/H(u)) /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/010_proselflc_warm0_20220430-215151 True test 4680 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/021_labelsmoothing_warm0_20220501-195700/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelsmoothing 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.02 [3000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/021_labelsmoothing_warm0_20220501-195700 True test 4680 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/022_labelsmoothing_warm0_20220501-222438/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelsmoothing 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.02 [3000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.125 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/022_labelsmoothing_warm0_20220501-222438 True test 4680 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/029_confidencepenalty_warm0_20220501-210635/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 confidencepenalty 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.02 [3000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.4 0.25 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/029_confidencepenalty_warm0_20220501-210635 True test 4680 0 3 | -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/loss.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/MS/039_labelcorrection_warm0_20220501-233934/params.csv: -------------------------------------------------------------------------------- 1 | data_name num_classes task_name device num_workers network_name num_hidden_layers num_attention_heads counter batch_size classes_per_batch max_seq_length seed loss_name symmetric_noise_rate lr_scheduler sampler lr milestones gamma momentum weight_decay transit_time_ratio warmup_epochs batch_accumu_steps total_epochs eval_interval loss_mode logit_soften_T epsilon summary_writer_dir train split total_iterations warmup_iterations 2 | deeploc_prottrans 2 MS gpu 1 Rostlab_prot_bert_bfd_seq 6 16 iteration 32 2 434 123 labelcorrection 0.0 WarmupMultiStepSchedule BalancedBatchSampler 0.02 [3000] 0.1 0.9 0.0001 0.5 0 10 40 100 cross entropy 0.2 0.25 /home/xinshao/tpami_proselflc_experiments_calibration//deeploc_prottrans_symmetric_noise_rate_0.0/Rostlab_prot_bert_bfd_seq/039_labelcorrection_warm0_20220501-233934 True test 4680 0 3 | -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy_loss_normalised_entropy_max_p_metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy_loss_normalised_entropy_max_p_metadata.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy_loss_normalised_entropy_max_p_metadata.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/accuracy_loss_normalised_entropy_max_p_metadata.xlsx -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/loss.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/loss.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/max_p.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/max_p.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/metadata.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/metadata.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/normalised_entropy.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/normalised_entropy.pdf -------------------------------------------------------------------------------- /experiments_records/food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356/params.csv: -------------------------------------------------------------------------------- 1 | data_name data_root num_classes device num_workers counter seed network_name pretrained symmetric_noise_rate train_transform lr_scheduler sampler loss_name logit_soften_T epsilon exp_base milestones gamma total_epochs eval_interval layer_split_lr_scale batch_size classes_per_batch momentum transit_time_ratio warmup_epochs lr weight_decay batch_accumu_steps min_scale rotation freeze_bn dropout loss_mode trust_mode summary_writer_dir train split test_transform total_iterations warmup_iterations 2 | food101n /home/xinshao/tpami_proselflc_experiments_calibration/input_dir/Food101-N/image_list 101 gpu 16 iteration 123 resnet50_tv True 0.0 train_rrcsr WarmupMultiStepSchedule BalancedBatchSampler proselflc 0.4 8 [28000, 44000, 60000] 0.2 30 1000 [-2, 1] 128 32 0.9 0.5 2 0.2 0.0002 10 0.2 15 False 0.2 cross entropy global*max_p /home/xinshao/tpami_proselflc_experiments_calibration//food101n_symmetric_noise_rate_0.0/resnet50_tv/014_proselflc_warm2_20220515-005356 True test resizecrop 72630 4842 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta:__legacy__" 4 | 5 | [tool.tox] 6 | legacy_tox_ini = """ 7 | [tox] 8 | envlist = py38 9 | 10 | [testenv:docs] 11 | changedir = doc 12 | deps = sphinx 13 | commands = sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html 14 | """ 15 | -------------------------------------------------------------------------------- /script_tools/html2pdf.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "## List all html files" 7 | ], 8 | "metadata": { 9 | "collapsed": false 10 | } 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "outputs": [], 16 | "source": [ 17 | "!pip install rootpath" 18 | ], 19 | "metadata": { 20 | "collapsed": false 21 | } 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "outputs": [], 27 | "source": [ 28 | "import rootpath\n", 29 | "import subprocess\n", 30 | "from os.path import dirname, join\n", 31 | "import glob\n", 32 | "\n", 33 | "project_dir = rootpath.detect()\n", 34 | "print(project_dir)\n", 35 | "\n", 36 | "html_list = glob.glob(join(project_dir, \"**\", \"*.html\"), recursive=True)\n", 37 | "print(html_list)\n", 38 | "print(len(html_list))" 39 | ], 40 | "metadata": { 41 | "collapsed": false 42 | } 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": { 48 | "collapsed": true 49 | }, 50 | "outputs": [], 51 | "source": [ 52 | "for html in html_list:\n", 53 | " subprocess.run(\n", 54 | " [\n", 55 | " 'google-chrome',\n", 56 | " \"--no-margins\",\n", 57 | " \"--print-to-pdf-no-header\",\n", 58 | " \"--headless\",\n", 59 | " \"--disable-gpu\",\n", 60 | " \"--print-to-pdf={}\".format(html.replace(\".html\", \".pdf\")),\n", 61 | " html,\n", 62 | " ]\n", 63 | " )\n", 64 | " subprocess.run(\n", 65 | " [\n", 66 | " 'rm',\n", 67 | " html,\n", 68 | " ]\n", 69 | " )\n" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "outputs": [], 76 | "source": [], 77 | "metadata": { 78 | "collapsed": false 79 | } 80 | } 81 | ], 82 | "metadata": { 83 | "kernelspec": { 84 | "display_name": "Python 3", 85 | "language": "python", 86 | "name": "python3" 87 | }, 88 | "language_info": { 89 | "codemirror_mode": { 90 | "name": "ipython", 91 | "version": 2 92 | }, 93 | "file_extension": ".py", 94 | "mimetype": "text/x-python", 95 | "name": "python", 96 | "nbconvert_exporter": "python", 97 | "pygments_lexer": "ipython2", 98 | "version": "2.7.6" 99 | } 100 | }, 101 | "nbformat": 4, 102 | "nbformat_minor": 0 103 | } 104 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # This file is used to configure your project. 2 | # Read more about the various options under: 3 | # http://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files 4 | 5 | [metadata] 6 | name = ProSelfLC 7 | description = Deep Critical Learning. Implementation of ProSelfLC, IMAE, DM, etc. 8 | Progressive Self Label Correction, CVPR 2021. It has been extended as a TPAMI submission using my personal time. 9 | author = Dr (Amos) Xinshao Wang 10 | author-email = xinshaowang@gmail.com 11 | license = gpl3 12 | long-description = file: README.md 13 | long-description-content-type = text/markdown; charset=UTF-8 14 | url = https://github.com/XinshaoAmosWang/DeepCriticalLearning 15 | project-urls = 16 | Documentation = https://github.com/XinshaoAmosWang/DeepCriticalLearning 17 | # Change if running only on Windows, Mac or Linux (comma-separated) 18 | platforms = any 19 | # Add here all kinds of additional classifiers as defined under 20 | # https://pypi.python.org/pypi?%3Aaction=list_classifiers 21 | classifiers = 22 | Development Status :: 4 - Beta 23 | Programming Language :: Python 24 | 25 | [options] 26 | zip_safe = False 27 | packages = find: 28 | include_package_data = True 29 | package_dir = 30 | =src 31 | # DON'T CHANGE THE FOLLOWING LINE! IT WILL BE UPDATED BY PYSCAFFOLD! 32 | setup_requires = pyscaffold>=3.3a0,<4 33 | # Add here dependencies of your project (semicolon/line-separated), e.g. 34 | # install option 1: pipenv install -e . --skip-lock 35 | # install option 2: pipenv shell && pip install -e . 36 | install_requires = requests>=2.17.3 37 | pandas==1.3.2 38 | xlsxwriter==3.0.1 39 | matplotlib==3.4.3 40 | numpy==1.19.2 41 | tqdm==4.50.2 42 | scipy==1.7.1 43 | plotly==5.3.1 44 | ml_collections==0.1.0 45 | torch @ https://download.pytorch.org/whl/cu111/torch-1.9.0%%2Bcu111-cp38-cp38-linux_x86_64.whl 46 | torchvision @ https://download.pytorch.org/whl/cu111/torchvision-0.10.0%%2Bcu111-cp38-cp38-linux_x86_64.whl 47 | xlrd==1.2.0 # the version is important 48 | pyyaml==5.4.1 49 | transformers==4.12.3 50 | seqeval==1.2.2 51 | jinja2==2.11.3 52 | markupsafe==2.0.1 53 | dataprep==0.3.0 54 | nltk==3.5 55 | regex<2021.0.0,>=2020.10.15 56 | openpyxl==3.0.9 57 | pre_commit==2.20.0 58 | 59 | dependency_links = https://download.pytorch.org/whl/cu111/torch-1.9.0%%2Bcu111-cp38-cp38-linux_x86_64.whl#egg=torch 60 | https://download.pytorch.org/whl/cu111/torchvision-0.10.0%%2Bcu111-cp38-cp38-linux_x86_64.whl#egg=torchvision 61 | 62 | # The usage of test_requires is discouraged, see `Dependency Management` docs 63 | # tests_require = pytest; pytest-cov 64 | # Require a specific Python version, e.g. Python 2.7 or >= 3.4 65 | # python_requires = >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* 66 | 67 | [options.packages.find] 68 | where = src 69 | exclude = 70 | tests 71 | 72 | [options.extras_require] 73 | # Add here additional requirements for extra features, to install with: 74 | # `pip install ProSelfLC[PDF]` like: 75 | # PDF = ReportLab; RXP 76 | # Add here test requirements (semicolon/line-separated) 77 | testing = 78 | pytest 79 | pytest-cov 80 | 81 | [options.entry_points] 82 | # Add here console scripts like: 83 | # console_scripts = 84 | # script_name = proselflc.module:function 85 | # For example: 86 | # console_scripts = 87 | # fibonacci = proselflc.skeleton:run 88 | # And any other entry points, for example: 89 | # pyscaffold.cli = 90 | # awesome = pyscaffoldext.awesome.extension:AwesomeExtension 91 | 92 | [test] 93 | # py.test options when running `python setup.py test` 94 | # addopts = --verbose 95 | extras = True 96 | 97 | [tool:pytest] 98 | # Options for py.test: 99 | # Specify command line options as you would do when invoking py.test directly. 100 | # e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml 101 | # in order to write a coverage file that can be read by Jenkins. 102 | addopts = 103 | --cov proselflc --cov-report term-missing 104 | --verbose 105 | norecursedirs = 106 | dist 107 | build 108 | .tox 109 | testpaths = tests 110 | # Use pytest markers to select/deselect specific tests 111 | # markers = 112 | # slow: mark tests as slow (deselect with '-m "not slow"') 113 | 114 | [aliases] 115 | dists = bdist_wheel 116 | 117 | [bdist_wheel] 118 | # Use this option if your package is pure-python 119 | universal = 1 120 | 121 | [build_sphinx] 122 | source_dir = docs 123 | build_dir = build/sphinx 124 | 125 | [devpi:upload] 126 | # Options for the devpi: PyPI server and packaging tool 127 | # VCS export must be deactivated since we are using setuptools-scm 128 | no-vcs = 1 129 | formats = bdist_wheel 130 | 131 | [flake8] 132 | # Some sane defaults for the code style checker flake8 133 | max-line-length = 88 134 | extend-ignore = E203, W503 135 | # ^ Black-compatible 136 | # E203 and W503 have edge cases handled by black 137 | exclude = 138 | .tox 139 | build 140 | dist 141 | .eggs 142 | docs/conf.py 143 | 144 | [pyscaffold] 145 | # PyScaffold's parameters when the project was created. 146 | # This will be used when updating. Do not change! 147 | version = 3.3 148 | package = proselflc 149 | extensions = 150 | pre_commit 151 | tox 152 | cirrus 153 | pre_commit 154 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Setup file for proselflc. 4 | Use setup.cfg to configure your project. 5 | 6 | This file was generated with PyScaffold 3.3. 7 | PyScaffold helps you to put up the scaffold of your new Python project. 8 | Learn more under: https://pyscaffold.org/ 9 | """ 10 | import sys 11 | from pkg_resources import VersionConflict, require 12 | from setuptools import setup 13 | 14 | try: 15 | require("setuptools>=38.3") 16 | except VersionConflict: 17 | print("Error: version of setuptools is too old (<38.3)!") 18 | sys.exit(1) 19 | 20 | 21 | if __name__ == "__main__": 22 | setup(use_pyscaffold=True) 23 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/__init__.py -------------------------------------------------------------------------------- /src/proselflc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/__init__.py -------------------------------------------------------------------------------- /src/proselflc/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | global exceptions 3 | """ 4 | 5 | 6 | class ParamException(Exception): 7 | """ 8 | Invalid parameter exception 9 | """ 10 | 11 | def __init__(self, msg, fields=None): 12 | self.fields = fields 13 | self.msg = msg 14 | 15 | def __str__(self): 16 | return self.msg 17 | -------------------------------------------------------------------------------- /src/proselflc/slicegetter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slicegetter/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slicegetter/get_dataloader.py: -------------------------------------------------------------------------------- 1 | from proselflc.exceptions import ParamException 2 | from proselflc.slices.datain.dataloaders.cifar100dataloader import CIFAR100DataLoader 3 | from proselflc.slices.datain.dataloaders.clothing1mdataloader import Clothing1MLoader 4 | from proselflc.slices.datain.dataloaders.deeplocdataloaders import ( 5 | DeepLocTrainLoaderTestset, 6 | ) 7 | from proselflc.slices.datain.dataloaders.food101n import Food101NLoader 8 | 9 | 10 | class DataLoaderPool: 11 | """ 12 | Collection for validated data loaders 13 | 14 | A dictionary of data_name (key) and DataLoader (not initialised). 15 | 16 | """ 17 | 18 | validated_dataloaders = { 19 | "cifar100": CIFAR100DataLoader, 20 | "clothing1m_withbs": Clothing1MLoader, 21 | "deeploc_prottrans": DeepLocTrainLoaderTestset, 22 | "food101n": Food101NLoader, 23 | } 24 | 25 | @classmethod 26 | def get_dataloader(cls, params={}): 27 | """ 28 | Returns: 29 | DataLoader, preprocessed, iterable and directly feeded into network 30 | 31 | Inputs: A dictionary of params 32 | params["data_name"]: str = "cifar100", a predefined dataset name. 33 | params["train"]: bool, true or false 34 | params["num_workers"]: int 35 | params["batch_size"]: int 36 | 37 | TODO: 38 | More dataloaders added and tested. 39 | """ 40 | 41 | # sanity check for params["data_name"] 42 | if "data_name" not in params.keys(): 43 | error_msg = ( 44 | "The input params have no key of data_name. " 45 | + "params[" 46 | + "data_name" 47 | + "] " 48 | + " has to be provided." 49 | ) 50 | raise (ParamException(error_msg)) 51 | 52 | if not isinstance(params["data_name"], str): 53 | error_msg = "The given data_name is not a string." 54 | raise (ParamException(error_msg)) 55 | # sanity check for params["train"] 56 | if "train" not in params.keys(): 57 | error_msg = ( 58 | "The input params have no key of train. " 59 | + "params[" 60 | + "train" 61 | + "] " 62 | + " has to be provided." 63 | ) 64 | raise (ParamException(error_msg)) 65 | 66 | if not isinstance(params["train"], bool): 67 | error_msg = "The given train is not a bool type." 68 | raise (ParamException(error_msg)) 69 | 70 | if params["data_name"] in cls.validated_dataloaders.keys(): 71 | dataloader_class = cls.validated_dataloaders[params["data_name"]] 72 | # num_workers, batch size are not well sanity checked. 73 | return dataloader_class(params) 74 | else: 75 | error_msg = ( 76 | "The given data_name is " 77 | + params["data_name"] 78 | + ", which is not supported yet." 79 | + "Please choose from " 80 | + str(cls.validated_dataloaders.keys()) 81 | ) 82 | raise (ParamException(error_msg)) 83 | -------------------------------------------------------------------------------- /src/proselflc/slicegetter/get_lossfunction.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from proselflc.exceptions import ParamException 4 | from proselflc.slices.losses.confidencepenalty import ConfidencePenalty 5 | from proselflc.slices.losses.crossentropy import CrossEntropy 6 | from proselflc.slices.losses.labelcorrection import LabelCorrection 7 | from proselflc.slices.losses.lablesmoothing import LabelSmoothing 8 | from proselflc.slices.losses.mean_absolute_error import MeanAbsoluteError 9 | from proselflc.slices.losses.proselflc import ProSelfLC 10 | 11 | 12 | class LossPool: 13 | """ 14 | Collection for validated losses 15 | 16 | A dictionary of loss_name (key) and nn.Module (not initialised). 17 | 18 | """ 19 | 20 | validated_losses = { 21 | "confidencepenalty": ConfidencePenalty, 22 | "crossentropy": CrossEntropy, 23 | "labelcorrection": LabelCorrection, 24 | "labelsmoothing": LabelSmoothing, 25 | "proselflc": ProSelfLC, 26 | "dm_exp_pi": MeanAbsoluteError, 27 | "dm_standard_mae": MeanAbsoluteError, 28 | "dm_standard_mse": MeanAbsoluteError, 29 | "dm_gce": MeanAbsoluteError, 30 | "dm_sce": MeanAbsoluteError, 31 | } 32 | 33 | @classmethod 34 | def get_lossfunction(cls, params={}) -> nn.Module: 35 | """ 36 | Returns: 37 | nn.Module, a predefined loss 38 | 39 | Inputs: A dictionary of params 40 | params["loss_name"]: str = "proselflc", a predefined network name. 41 | For proselflc: 42 | params["totoal_time"]: int, which is the total iterations or epochs 43 | params["exp_base"]: float 44 | params["counter"]: str, eithor "iteration or "epoch" 45 | 46 | TODO: 47 | More losses added and tested. 48 | """ 49 | 50 | # sanity check for network_name 51 | if "loss_name" not in params.keys(): 52 | error_msg = ( 53 | "The input params have no key of loss_name. " 54 | + "params[" 55 | + "loss_name" 56 | + "] " 57 | + " has to be provided." 58 | ) 59 | raise (ParamException(error_msg)) 60 | 61 | if not isinstance(params["loss_name"], str): 62 | error_msg = "The given loss_name is not a string." 63 | raise (ParamException(error_msg)) 64 | 65 | if params["loss_name"] in cls.validated_losses.keys(): 66 | loss_class = cls.validated_losses[params["loss_name"]] 67 | return loss_class(params=params) 68 | else: 69 | error_msg = ( 70 | "The given loss_name is " 71 | + params["loss_name"] 72 | + ", which is not supported yet." 73 | + "Please choose from " 74 | + str(cls.validated_losses.keys()) 75 | ) 76 | raise (ParamException(error_msg)) 77 | -------------------------------------------------------------------------------- /src/proselflc/slicegetter/get_network.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from proselflc.exceptions import ParamException 4 | from proselflc.slices.networks.mobilenetv2 import mobilenetv2 5 | from proselflc.slices.networks.resnet import resnet18, resnet34, resnet50 6 | from proselflc.slices.networks.resnet50_tv import resnet50_tv 7 | from proselflc.slices.networks.resnet_plc import resnet50_plc 8 | from proselflc.slices.networks.senet import seresnet18, seresnet50 9 | from proselflc.slices.networks.shufflenetv2 import shufflenetv2 10 | from proselflc.slices.networks.transformers.prot_bert_bfd_seqlevel import ( 11 | prot_bert_bfd_seqclassifier, 12 | ) 13 | from proselflc.slices.networks.transformers.prot_bert_bfd_tokenlevel import ( 14 | prot_bert_bfd_tokenclassifier, 15 | ) 16 | from proselflc.slices.networks.vit_models import ViT_B_16 17 | 18 | 19 | class NetworkPool: 20 | """ 21 | Collection for validated networks 22 | 23 | A dictionary of network_name (key) and nn.Module (not initialised). 24 | 25 | TODO: 26 | Now the networks fix class_num = 100 by default. 27 | To change this and make it flexible for more use cases. 28 | """ 29 | 30 | validated_networks = { 31 | # "shufflenet": shufflenet, 32 | # "preact_resnet18": preact_resnet18, 33 | # 34 | "shufflenetv2": shufflenetv2, # faster than shufflenet 35 | "mobilenetv2": mobilenetv2, 36 | # "nasnet": nasnet, 37 | "resnet18": resnet18, 38 | "resnet34": resnet34, 39 | "resnet50": resnet50, 40 | "resnet50_plc": resnet50_plc, 41 | "resnet50_tv": resnet50_tv, 42 | "seresnet18": seresnet18, 43 | "seresnet50": seresnet50, 44 | # 45 | "ViT-B_16": ViT_B_16, 46 | "Rostlab_prot_bert_bfd_token": prot_bert_bfd_tokenclassifier, 47 | "Rostlab_prot_bert_bfd_seq": prot_bert_bfd_seqclassifier, 48 | } 49 | 50 | untested_networks = {} 51 | 52 | @classmethod 53 | def get_network(cls, params={}) -> nn.Module: 54 | """ 55 | Returns: 56 | nn.Module, a predefined network archiecture. 57 | 58 | Inputs: A dictionary of params 59 | params["network_name"]: str = "shufflenetv2", a predefined network name. 60 | 61 | TODO: 62 | Tested current networks. 63 | More networks added and tested. 64 | """ 65 | 66 | # sanity check for network_name 67 | if "network_name" not in params.keys(): 68 | error_msg = ( 69 | "The input params have no key of network_name. " 70 | + "params[" 71 | + "network_name" 72 | + "] " 73 | + " has to be provided." 74 | ) 75 | raise (ParamException(error_msg)) 76 | 77 | if not isinstance(params["network_name"], str): 78 | error_msg = "The given network_name is not a string." 79 | raise (ParamException(error_msg)) 80 | 81 | if params["network_name"] in cls.validated_networks.keys(): 82 | # TODO: more params to config the returned network 83 | return cls.validated_networks[params["network_name"]]( 84 | params=params, 85 | ) 86 | else: 87 | error_msg = ( 88 | "The given network_name is " 89 | + params["network_name"] 90 | + ", which is not supported yet." 91 | + "Please choose from " 92 | + str(cls.validated_networks.keys()) 93 | ) 94 | raise (ParamException(error_msg)) 95 | -------------------------------------------------------------------------------- /src/proselflc/slices/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/datain/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/datain/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/datain/dataloaders/cifar100dataloader.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import torch 4 | from torch.utils.data import DataLoader 5 | 6 | from proselflc.exceptions import ParamException 7 | from proselflc.trainer.utils import intlabel2onehot 8 | 9 | from ..datasets.cifar100dataset import CIFAR100Dataset 10 | from ..transforms.cifar100transforms import ( 11 | cifar100_transform_test_data, 12 | cifar100_transform_train_data, 13 | ) 14 | from ..utils import BalancedBatchSampler, set_torch_seed 15 | 16 | 17 | class CIFAR100DataLoader(DataLoader): 18 | """ 19 | CIFAR100 Dataloader with customed settings. 20 | 21 | What is special here versus DataLoader: 22 | 1. train is bool and required. 23 | 1.1. which dataset 24 | 1.2. shuffle=train accordingly. 25 | 1.3. set data_transform and target_tranform accordingly. 26 | 27 | Args: 28 | train (bool, required): 29 | If true, it is a training dataloader. 30 | Otherwise, it is a testing dataloader 31 | 1. shuffle(bool, not required): 32 | It is hidden in this class. 33 | being equeal to train (bool, required). 34 | 2. which dataset will be set accordingly. 35 | 3. transform will be set accordingly. 36 | num_workers: 37 | inherited from DataLoader. 38 | batch_size: 39 | inherited from DataLoader 40 | """ 41 | 42 | # overwrite 43 | def __init__( 44 | self, 45 | params: dict = { 46 | "train": True, 47 | "num_workers": 4, 48 | "batch_size": 128, 49 | "symmetric_noise_rate": 0, 50 | }, 51 | ) -> None: 52 | cifar100_transform_intlabel2onehot = partial( 53 | intlabel2onehot, 54 | class_num=params["num_classes"], 55 | ) 56 | if params["sampler"] not in ["BalancedBatchSampler", None]: 57 | error_msg = "params['sampler']: {}, not in {}".format( 58 | params["sampler"], 59 | ["BalancedBatchSampler", None], 60 | ) 61 | raise ParamException(error_msg) 62 | if params["train"] not in [True, False]: 63 | error_msg = "params['train']: {}, not in {}".format( 64 | params["train"], 65 | [True, False], 66 | ) 67 | raise ParamException(error_msg) 68 | 69 | if params["train"]: 70 | self._dataset = CIFAR100Dataset( 71 | params, 72 | data_transform=cifar100_transform_train_data, 73 | target_transform=cifar100_transform_intlabel2onehot, 74 | ) 75 | else: 76 | self._dataset = CIFAR100Dataset( 77 | params, 78 | data_transform=cifar100_transform_test_data, 79 | target_transform=cifar100_transform_intlabel2onehot, 80 | ) 81 | 82 | if params["train"]: 83 | if "seed" in params.keys() and params["seed"] is not None: 84 | import random 85 | 86 | import numpy 87 | 88 | set_torch_seed( 89 | numpy=numpy, 90 | torch=torch, 91 | random=random, 92 | seed=params["seed"], 93 | ) 94 | g = torch.Generator() 95 | g.manual_seed(params["seed"]) 96 | 97 | def seed_worker(worker_id): 98 | worker_seed = torch.initial_seed() % 2**32 99 | numpy.random.seed(worker_seed) 100 | random.seed(worker_seed) 101 | 102 | # 103 | train_batch_sampler = BalancedBatchSampler( 104 | labels=self._dataset.targets, 105 | n_classes=params["classes_per_batch"], 106 | n_samples=params["batch_size"] // params["classes_per_batch"], 107 | seed=params["seed"], 108 | ) 109 | super().__init__( 110 | dataset=self._dataset, 111 | num_workers=params["num_workers"], 112 | batch_sampler=train_batch_sampler, 113 | # 114 | worker_init_fn=seed_worker, 115 | generator=g, 116 | ) 117 | else: 118 | super().__init__( 119 | dataset=self._dataset, 120 | shuffle=True, 121 | num_workers=params["num_workers"], 122 | batch_size=params["batch_size"], 123 | ) 124 | else: 125 | # val or test data loaders 126 | super().__init__( 127 | dataset=self._dataset, 128 | shuffle=False, 129 | num_workers=params["num_workers"], 130 | batch_size=params["batch_size"] * 2, 131 | ) 132 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/dataloaders/deeplocdataloaders.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | from torch.utils.data import DataLoader 4 | 5 | from proselflc.exceptions import ParamException 6 | from proselflc.slices.datain.datasets.deeplocdatasets import DeepLocDataset 7 | from proselflc.slices.datain.utils import BalancedBatchSampler, set_torch_seed 8 | from proselflc.trainer.utils import intlabel2onehot 9 | 10 | # from transformers import Trainer, TrainingArguments 11 | # from proselflc.slices.networks.transformers.prot_bert_bfd_seqlevel import ( 12 | # prot_bert_bfd_seqclassifier, 13 | # ) 14 | 15 | # def deeploc_traindataloader( 16 | # params: dict = { 17 | # "output_dir": "./results", 18 | # "batch_size": 8, 19 | # # 20 | # "compute_metrics": compute_metrics, 21 | # } 22 | # ): 23 | # if "compute_metrics" not in params.keys(): 24 | # params["compute_metrics"] = compute_metrics 25 | # if "seed" not in params.keys(): 26 | # params["seed"] = 123 27 | # 28 | # training_args = TrainingArguments( 29 | # output_dir=params["output_dir"], # output directory 30 | # per_device_train_batch_size=params[ 31 | # "batch_size" 32 | # ], # batch size per device during training 33 | # per_device_eval_batch_size=params["batch_size"], # batch size for evaluation 34 | # seed=params["seed"], # Seed for experiment reproducibility 35 | # ) 36 | # 37 | # tfmer_trainer = Trainer( 38 | # model=prot_bert_bfd_seqclassifier( 39 | # # Note here, the only reason to use tfmer_trainer is to 40 | # # use its get_train_dataloader(), 41 | # # there I will change params 42 | # # to reduce resources consumption 43 | # { 44 | # "network_name": params["network_name"], 45 | # "num_hidden_layers": 1, 46 | # "num_attention_heads": 1, 47 | # # 48 | # "num_classes": params["num_classes"], 49 | # }, 50 | # ), # the instantiated 🤗 Transformers model to be trained 51 | # compute_metrics=params["compute_metrics"], # evaluation metrics 52 | # # 53 | # # for generating train loader 54 | # train_dataset=DeepLocDataset( 55 | # split="train", 56 | # tokenizer_name=params["network_name"], 57 | # max_length=params["max_seq_length"], 58 | # task_name=params["task_name"], 59 | # ), 60 | # # 61 | # # args mainly for batch size 62 | # args=training_args, 63 | # ) 64 | # 65 | # return tfmer_trainer.get_train_dataloader() 66 | 67 | 68 | def DeepLocTrainLoaderTestset( 69 | params: dict = { 70 | "split": None, 71 | "batch_size": 128, 72 | "task_name": "MS", 73 | }, 74 | ): 75 | if (params["task_name"], params["num_classes"]) not in zip( 76 | ("MS-with-unknown", "MS", "SubcellularLoc"), (2, 2, 10) 77 | ): 78 | error_msg = "task_name={}, num_classes={}, not in {}".format( 79 | params["task_name"], 80 | params["num_classes"], 81 | # 82 | [pair for pair in zip(("MS", "SubcellularLoc"), (2, 10))], 83 | ) 84 | raise ParamException("non-matched task name and num_classes: " + error_msg) 85 | 86 | tokenizer_name = params["network_name"] 87 | transform_intlabel2onehot = partial( 88 | intlabel2onehot, 89 | class_num=params["num_classes"], 90 | ) 91 | 92 | if params["split"] == "train": 93 | # return deeploc_traindataloader( 94 | # params=params, 95 | # ) 96 | train_dataset = DeepLocDataset( 97 | split="train", 98 | tokenizer_name=tokenizer_name, 99 | max_length=params["max_seq_length"], 100 | task_name=params["task_name"], 101 | target_transform=transform_intlabel2onehot, 102 | seed=params["seed"], 103 | ) 104 | train_batch_sampler = BalancedBatchSampler( 105 | labels=train_dataset.labels, 106 | n_classes=params["classes_per_batch"], 107 | n_samples=params["batch_size"] // params["classes_per_batch"], 108 | seed=params["seed"], 109 | ) 110 | 111 | # operation to keep deterministic reproducibility 112 | import random 113 | 114 | import numpy 115 | import torch 116 | 117 | set_torch_seed( 118 | torch=torch, 119 | seed=params["seed"], 120 | ) 121 | g = torch.Generator() 122 | g.manual_seed(params["seed"]) 123 | 124 | def seed_worker(worker_id): 125 | worker_seed = torch.initial_seed() % 2**32 126 | numpy.random.seed(worker_seed) 127 | random.seed(worker_seed) 128 | 129 | return DataLoader( 130 | dataset=train_dataset, 131 | num_workers=params["num_workers"], 132 | # 133 | batch_sampler=train_batch_sampler, 134 | # 135 | worker_init_fn=seed_worker, 136 | generator=g, 137 | ) 138 | elif params["split"] in ["valid", "val", "test"]: 139 | return DeepLocDataset( 140 | split="valid or test: the same dataset", 141 | tokenizer_name=tokenizer_name, 142 | max_length=params["max_seq_length"], 143 | task_name=params["task_name"], 144 | target_transform=transform_intlabel2onehot, 145 | seed=params["seed"], 146 | ) 147 | else: 148 | error_msg = ( 149 | "incorrect dataset config for DeepLocTrainLoaderTestset: " 150 | "split={}, not in {}".format( 151 | params["split"], ["train", "val", "valid", "test"] 152 | ) 153 | ) 154 | raise ParamException(error_msg) 155 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/datain/datasets/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/datain/datasets/clothing1m.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Callable, Optional 3 | 4 | import numpy as np 5 | import torch 6 | import torch.utils.data as data 7 | from PIL import Image 8 | 9 | from proselflc.slices.datain.utils import set_torch_seed 10 | 11 | # adapted according to 12 | # https://github.com/pxiangwu/PLC/blob/master/clothing1m/data_clothing1m.py 13 | 14 | 15 | class Clothing1M(data.Dataset): 16 | def __init__( 17 | self, 18 | params={ 19 | "data_root": None, 20 | "split": "train", # "val", or "test", 21 | "cls_size": 18976, # balanced samples per class 22 | }, 23 | data_transform: Optional[Callable] = None, 24 | target_transform: Optional[Callable] = None, 25 | ): 26 | # It is fine to always force reproducibility 27 | # for dataset, without input dependence. 28 | if "seed" in params.keys() and params["seed"] is not None: 29 | set_torch_seed( 30 | numpy=np, 31 | torch=torch, 32 | os=os, 33 | seed=params["seed"], 34 | ) 35 | # TODO: to add sanity checks 36 | self.data_root = params["data_root"] 37 | self.split = params["split"] 38 | self.cls_size = params["cls_size"] 39 | 40 | self.train = False 41 | self.data_transform = data_transform 42 | self.target_transform = target_transform 43 | 44 | if self.split == "train": 45 | self.train = True 46 | file_path = os.path.join( 47 | self.data_root, "annotations/noisy_train_key_list.txt" 48 | ) 49 | label_path = os.path.join(self.data_root, "annotations/my_train_label.txt") 50 | elif self.split == "val": 51 | file_path = os.path.join( 52 | self.data_root, "annotations/clean_val_key_list.txt" 53 | ) 54 | label_path = os.path.join(self.data_root, "annotations/my_val_label.txt") 55 | else: 56 | file_path = os.path.join( 57 | self.data_root, "annotations/clean_test_key_list.txt" 58 | ) 59 | label_path = os.path.join(self.data_root, "annotations/my_test_label.txt") 60 | 61 | with open(file_path) as fid: 62 | image_list = [line.strip() for line in fid.readlines()] 63 | 64 | with open(label_path) as fid: 65 | label_list = [int(line.strip()) for line in fid.readlines()] 66 | 67 | if self.split == "train" and self.cls_size is not None: 68 | # balanced sampling !!!! 69 | # TODO: be cautious and provide discussion about this 70 | 71 | self.image_list = np.array(image_list) 72 | self.label_list = np.array(label_list) 73 | 74 | np_labels = np.array(self.label_list) 75 | x = np.unique(np_labels) 76 | 77 | res_img_list = [] 78 | res_label_list = [] 79 | 80 | for i in x: 81 | idx = np.where(np_labels == i)[0] 82 | idx = np.random.permutation(idx) 83 | idx = idx[: self.cls_size] 84 | 85 | res_img_list.append(self.image_list[idx]) 86 | res_label_list.append(self.label_list[idx]) 87 | 88 | self.image_list = np.concatenate(res_img_list).tolist() 89 | self.label_list = np.concatenate(res_label_list).tolist() 90 | else: 91 | self.image_list = image_list 92 | self.label_list = label_list 93 | 94 | self.targets = self.label_list # this is for backward code compatibility 95 | 96 | def __getitem__(self, index): 97 | image_file_name = self.image_list[index] 98 | image_path = os.path.join(self.data_root, image_file_name) 99 | image = Image.open(image_path) 100 | image = image.convert("RGB") 101 | 102 | target = self.label_list[index] 103 | target = np.array(target).astype(np.int64) 104 | 105 | if self.data_transform is not None: 106 | image = self.data_transform(image) 107 | if self.target_transform is not None: 108 | target = self.target_transform(target) 109 | 110 | return image, target 111 | 112 | def __len__(self): 113 | return len(self.label_list) 114 | 115 | def make_weights_for_balanced_classes(self): 116 | nclasses = np.max(np.array(self.label_list)) + 1 117 | count = [0] * nclasses 118 | weight_per_class = [0.0] * nclasses 119 | 120 | for target in self.label_list: 121 | count[np.array(target).astype(np.int64)] += 1 122 | 123 | for i in range(nclasses): 124 | weight_per_class[i] = float(self.__len__()) / float(count[i]) 125 | 126 | self.weight_per_sample_list = [0] * self.__len__() 127 | for idx in range(self.__len__()): 128 | target = self.label_list[idx] 129 | self.weight_per_sample_list[idx] = weight_per_class[ 130 | np.array(target).astype(np.int64) 131 | ] 132 | return torch.DoubleTensor(self.weight_per_sample_list) 133 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/datasets/clothing1m_getlabels.py: -------------------------------------------------------------------------------- 1 | # Copy From https://github.com/pxiangwu/PLC/ 2 | # blob/8c2bcfd25e538fdcdb637a820ac340e9ee084e59/ 3 | # clothing1m/data_clothing1m.py#L9 4 | 5 | 6 | def get_train_labels( 7 | data_root="", 8 | ): 9 | train_file_list = data_root + "/annotations/noisy_train_key_list.txt" 10 | noise_label_file = data_root + "/annotations/noisy_label_kv.txt" 11 | 12 | # read train images 13 | fid = open(train_file_list) 14 | train_list = [line.strip() for line in fid.readlines()] 15 | fid.close() 16 | 17 | fid = open(noise_label_file) 18 | label_list = [line.strip().split(" ") for line in fid.readlines()] 19 | 20 | label_map = dict() 21 | for m in label_list: 22 | label_map[m[0]] = m[1] 23 | 24 | train_labels = [] 25 | for t in train_list: 26 | label = label_map[t] 27 | train_labels.append(label) 28 | 29 | with open(data_root + "/annotations/my_train_label.txt", "w") as fid: 30 | for p in train_labels: 31 | fid.write("{}\n".format(p)) 32 | 33 | return train_labels 34 | 35 | 36 | def get_val_test_labels( 37 | data_root="", 38 | ): 39 | val_file_list = data_root + "/annotations/clean_val_key_list.txt" 40 | test_file_list = data_root + "/annotations/clean_test_key_list.txt" 41 | clean_label_file = data_root + "/annotations/clean_label_kv.txt" 42 | 43 | # read val images 44 | fid = open(val_file_list) 45 | val_list = [line.strip() for line in fid.readlines()] 46 | fid.close() 47 | 48 | # read test images 49 | fid = open(test_file_list) 50 | test_list = [line.strip() for line in fid.readlines()] 51 | fid.close() 52 | 53 | fid = open(clean_label_file) 54 | label_list = [line.strip().split(" ") for line in fid.readlines()] 55 | fid.close() 56 | 57 | label_map = dict() 58 | for m in label_list: 59 | label_map[m[0]] = m[1] 60 | 61 | val_labels = [] 62 | for t in val_list: 63 | label = label_map[t] 64 | val_labels.append(label) 65 | 66 | test_labels = [] 67 | for t in test_list: 68 | label = label_map[t] 69 | test_labels.append(label) 70 | 71 | with open(data_root + "/annotations/my_val_label.txt", "w") as fid: 72 | for p in val_labels: 73 | fid.write("{}\n".format(p)) 74 | 75 | with open(data_root + "/annotations/my_test_label.txt", "w") as fid: 76 | for p in test_labels: 77 | fid.write("{}\n".format(p)) 78 | 79 | 80 | if __name__ == "__main__": 81 | data_root = "/home/xinshao/tpami_proselflc_experiments/input_dir/clothing1m" 82 | 83 | # may be noisy 84 | get_train_labels( 85 | data_root=data_root, 86 | ) 87 | 88 | # clean labels 89 | get_val_test_labels( 90 | data_root=data_root, 91 | ) 92 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/datasets/food101n.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Callable, Optional 3 | 4 | import numpy as np 5 | import torch 6 | import torch.utils.data as data 7 | from PIL import Image 8 | 9 | from proselflc.slices.datain.utils import set_torch_seed 10 | 11 | # https://github.com/kuangliu/pytorch-retinanet/blob/master/transform.py 12 | # TODO: transform: figure out resize keeping the ratio. 13 | 14 | 15 | class Food101N(data.Dataset): 16 | def __init__( 17 | self, 18 | params={ 19 | "data_root": None, 20 | "split": "train", # "val", or "test", 21 | }, 22 | data_transform: Optional[Callable] = None, 23 | target_transform: Optional[Callable] = None, 24 | ): 25 | # It is fine to always force reproducibility 26 | # for dataset, without input dependence. 27 | if "seed" in params.keys() and params["seed"] is not None: 28 | set_torch_seed( 29 | numpy=np, 30 | torch=torch, 31 | os=os, 32 | seed=params["seed"], 33 | ) 34 | # TODO: to add sanity checks 35 | self.data_root = params["data_root"] 36 | self.split = params["split"] 37 | 38 | self.train = False 39 | self.data_transform = data_transform 40 | self.target_transform = target_transform 41 | 42 | if self.split == "train": 43 | self.train = True 44 | self.image_list = np.load(os.path.join(self.data_root, "train_images.npy")) 45 | self.targets = np.load(os.path.join(self.data_root, "train_targets.npy")) 46 | else: 47 | self.train = False 48 | self.image_list = np.load(os.path.join(self.data_root, "test_images.npy")) 49 | self.targets = np.load(os.path.join(self.data_root, "test_targets.npy")) 50 | self.label_list = list(self.targets) 51 | 52 | def __getitem__(self, index): 53 | image_path = self.image_list[index] 54 | image = Image.open(image_path) 55 | image = image.convert("RGB") 56 | 57 | target = self.label_list[index] 58 | target = np.array(target).astype(np.int64) 59 | 60 | if self.data_transform is not None: 61 | image = self.data_transform(image) 62 | if self.target_transform is not None: 63 | target = self.target_transform(target) 64 | 65 | return image, target 66 | 67 | def __len__(self): 68 | return len(self.targets) 69 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/datasets/food101n_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | 5 | import numpy as np 6 | 7 | DATA_ROOT = ( 8 | "/home/xinshao/tpami_proselflc_experiments_calibration/" "input_dir/Food101-N" 9 | ) 10 | TRAIN_PATH = DATA_ROOT + "/Food-101N_release" 11 | TEST_PATH = DATA_ROOT + "/food-101" 12 | 13 | 14 | def check_folder(save_dir): 15 | if not os.path.exists(save_dir): 16 | os.makedirs(save_dir) 17 | return save_dir 18 | 19 | 20 | def gen_train_list(): 21 | root_data_path = TRAIN_PATH + "/meta/imagelist.tsv" 22 | class_list_path = TRAIN_PATH + "/meta/classes.txt" 23 | file_path_prefix = TRAIN_PATH + "/images" 24 | 25 | map_name2cat = dict() 26 | with open(class_list_path) as fp: 27 | fp.readline() # skip first line 28 | 29 | for i, line in enumerate(fp): 30 | row = line.strip() 31 | map_name2cat[row] = i 32 | num_class = len(map_name2cat) 33 | print(map_name2cat) 34 | print("Num Classes: ", num_class) 35 | 36 | targets = [] 37 | img_list = [] 38 | with open(root_data_path) as fp: 39 | fp.readline() # skip first line 40 | 41 | for line in fp: 42 | row = line.strip().split("/") 43 | class_name = row[0] 44 | targets.append(map_name2cat[class_name]) 45 | img_list.append(os.path.join(file_path_prefix, line.strip())) 46 | 47 | targets = np.array(targets) 48 | img_list = np.array(img_list) 49 | assert len(img_list) == len(targets) 50 | print(targets[-1]) 51 | print(img_list[-1]) 52 | print("Num Train Images: ", len(img_list)) 53 | print() 54 | 55 | save_dir = check_folder(DATA_ROOT + "/image_list") 56 | np.save(os.path.join(save_dir, "train_images"), img_list) 57 | np.save(os.path.join(save_dir, "train_targets"), targets) 58 | 59 | return map_name2cat 60 | 61 | 62 | def gen_test_list(arg_map_name2cat): 63 | map_name2cat = arg_map_name2cat 64 | root_data_path = TEST_PATH + "/meta/test.txt" 65 | 66 | file_path_prefix = TEST_PATH + "/images" 67 | 68 | targets = [] 69 | img_list = [] 70 | with open(root_data_path) as fp: 71 | for line in fp: 72 | row = line.strip().split("/") 73 | class_name = row[0] 74 | targets.append(map_name2cat[class_name]) 75 | img_list.append(os.path.join(file_path_prefix, line.strip() + ".jpg")) 76 | 77 | targets = np.array(targets) 78 | img_list = np.array(img_list) 79 | assert len(img_list) == len(targets) 80 | print(targets[-1]) 81 | print(img_list[-1]) 82 | print("Num Test Images: ", len(img_list)) 83 | print() 84 | 85 | save_dir = check_folder(DATA_ROOT + "/image_list") 86 | np.save(os.path.join(save_dir, "test_images"), img_list) 87 | np.save(os.path.join(save_dir, "test_targets"), targets) 88 | 89 | 90 | if __name__ == "__main__": 91 | map_name2cat = gen_train_list() 92 | gen_test_list(map_name2cat) 93 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/transforms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/datain/transforms/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/datain/transforms/cifar100transforms.py: -------------------------------------------------------------------------------- 1 | import torchvision.transforms as transforms 2 | 3 | # mean and std of cifar100 dataset 4 | CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343) 5 | CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404) 6 | 7 | 8 | cifar100_transform_train_data = transforms.Compose( 9 | [ 10 | transforms.RandomCrop(32, padding=4), # standard 11 | # transforms.Pad(padding=[4, 4, 4, 4]), 12 | # transforms.RandomResizedCrop( 13 | # 32, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) 14 | # ), 15 | transforms.RandomHorizontalFlip(), # flip 16 | transforms.RandomRotation(15), # rotation 17 | transforms.ToTensor(), 18 | transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD), 19 | ] 20 | ) 21 | 22 | cifar100_transform_test_data = transforms.Compose( 23 | [ 24 | # no pad 25 | # no crop 26 | # no flip 27 | # no rotation 28 | transforms.ToTensor(), 29 | transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD), 30 | ] 31 | ) 32 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/transforms/clothing1mtransforms.py: -------------------------------------------------------------------------------- 1 | import torchvision.transforms as transforms 2 | from torchvision.transforms import InterpolationMode 3 | 4 | # mean and std 5 | TRAIN_MEAN = (0.6959, 0.6537, 0.6371) 6 | TRAIN_STD = (0.3113, 0.3192, 0.3214) 7 | 8 | 9 | def clothing1m_transform_train_rrcsr(params=None): 10 | if params is None: 11 | return transforms.Compose( 12 | [ 13 | transforms.RandomResizedCrop( 14 | 224, 15 | scale=(0.7777777, 1.0), 16 | ratio=(3.0 / 4.0, 4.0 / 3.0), 17 | interpolation=InterpolationMode.BICUBIC, 18 | ), 19 | transforms.RandomHorizontalFlip(), # flip 20 | transforms.RandomRotation(15), # rotation 21 | transforms.ToTensor(), 22 | transforms.Normalize(TRAIN_MEAN, TRAIN_STD), 23 | ] 24 | ) 25 | else: 26 | return transforms.Compose( 27 | [ 28 | transforms.RandomResizedCrop( 29 | 224, 30 | scale=(params["min_scale"], 1.0), 31 | ratio=(3.0 / 4.0, 4.0 / 3.0), 32 | interpolation=InterpolationMode.BICUBIC, 33 | ), 34 | transforms.RandomHorizontalFlip(), # flip 35 | transforms.RandomRotation(params["rotation"]), # rotation 36 | transforms.ToTensor(), 37 | transforms.Normalize(TRAIN_MEAN, TRAIN_STD), 38 | ] 39 | ) 40 | 41 | 42 | clothing1m_transform_train_rc = transforms.Compose( 43 | [ 44 | transforms.Resize(size=(256, 256), interpolation=InterpolationMode.BICUBIC), 45 | transforms.RandomCrop(size=(224, 224)), 46 | transforms.RandomHorizontalFlip(), # flip 47 | transforms.RandomRotation(15), # rotation 48 | transforms.ToTensor(), 49 | transforms.Normalize(TRAIN_MEAN, TRAIN_STD), 50 | ] 51 | ) 52 | 53 | clothing1m_transform_test_resizeonly = transforms.Compose( 54 | [ 55 | transforms.Resize(size=(224, 224), interpolation=InterpolationMode.BICUBIC), 56 | transforms.ToTensor(), 57 | transforms.Normalize(TRAIN_MEAN, TRAIN_STD), 58 | ] 59 | ) 60 | clothing1m_transform_test_resizecrop = transforms.Compose( 61 | [ 62 | transforms.Resize(size=(256, 256), interpolation=InterpolationMode.BICUBIC), 63 | transforms.CenterCrop(size=(224, 224)), 64 | transforms.ToTensor(), 65 | transforms.Normalize(TRAIN_MEAN, TRAIN_STD), 66 | ] 67 | ) 68 | -------------------------------------------------------------------------------- /src/proselflc/slices/datain/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data.sampler import BatchSampler 3 | 4 | 5 | def set_torch_seed(random=None, numpy=None, torch=None, os=None, seed=0): 6 | if random is not None: 7 | random.seed(seed) 8 | if numpy is not None: 9 | numpy.random.seed(seed) 10 | if torch is not None: 11 | torch.manual_seed(seed) 12 | torch.backends.cudnn.deterministic = True 13 | torch.backends.cudnn.benchmark = False 14 | torch.cuda.manual_seed_all(seed) 15 | torch.use_deterministic_algorithms(True) 16 | if os is not None: 17 | os.environ["PYTHONHASHSEED"] = str(seed) 18 | 19 | 20 | # From https://github.com/adambielski/ 21 | # siamese-triplet/blob/master/datasets.py 22 | class BalancedBatchSampler(BatchSampler): 23 | """ 24 | BatchSampler - 25 | from a MNIST-like dataset, 26 | samples n_classes and within these classes samples n_samples. 27 | Returns batches of size n_classes * n_samples 28 | """ 29 | 30 | def __init__(self, labels, n_classes, n_samples, seed=0): 31 | # It is fine to always force reproducibility 32 | # for numpy and batch sampler, without input dependence. 33 | np.random.seed(seed) 34 | 35 | self.labels = np.array(labels).astype(np.int64) 36 | self.labels_set = list(set(self.labels)) 37 | self.label_to_indices = { 38 | label: np.where(self.labels == label)[0] for label in self.labels_set 39 | } 40 | for _label in self.labels_set: 41 | np.random.shuffle(self.label_to_indices[_label]) 42 | self.used_label_indices_count = {label: 0 for label in self.labels_set} 43 | self.count = 0 44 | self.n_classes = n_classes 45 | self.n_samples = n_samples 46 | self.n_dataset = len(self.labels) 47 | self.batch_size = self.n_samples * self.n_classes 48 | 49 | def __iter__(self): 50 | self.count = 0 51 | while self.count + self.batch_size <= self.n_dataset: 52 | classes = np.random.choice(self.labels_set, self.n_classes, replace=False) 53 | indices = [] 54 | for class_ in classes: 55 | indices.extend( 56 | self.label_to_indices[class_][ 57 | range( 58 | self.used_label_indices_count[class_], 59 | self.used_label_indices_count[class_] + self.n_samples, 60 | ) 61 | ] 62 | ) 63 | self.used_label_indices_count[class_] += self.n_samples 64 | if self.used_label_indices_count[class_] + self.n_samples > len( 65 | self.label_to_indices[class_] 66 | ): 67 | np.random.shuffle(self.label_to_indices[class_]) 68 | self.used_label_indices_count[class_] = 0 69 | yield indices 70 | self.count += self.batch_size 71 | 72 | def __len__(self): 73 | return self.n_dataset // self.batch_size 74 | -------------------------------------------------------------------------------- /src/proselflc/slices/losses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/losses/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/losses/confidencepenalty.py: -------------------------------------------------------------------------------- 1 | from torch import Tensor 2 | 3 | from .crossentropy import CrossEntropy 4 | 5 | 6 | class ConfidencePenalty(CrossEntropy): 7 | """ 8 | The implementation for confidence penalty. 9 | The target probability will be subtracted by 10 | a predicted distributions, i.e., self knowledge. 11 | 12 | Inputs: two tensors for predictions and target. 13 | 1. predicted probability distributions of shape (N, C) 14 | 2. target probability distributions of shape (N, C) 15 | 3. epsilon, which controls the degree of confidence penalty. 16 | 17 | Outputs: scalar tensor, normalised by the number of examples. 18 | """ 19 | 20 | def __init__(self, params: dict = None) -> None: 21 | super().__init__() 22 | self.epsilon = params["epsilon"] 23 | 24 | def forward( 25 | self, 26 | pred_probs: Tensor, 27 | target_probs: Tensor, 28 | calibrated_pred_probs: Tensor = None, 29 | epsilon: float = None, 30 | ) -> Tensor: 31 | """ 32 | Inputs: 33 | pred_probs: predictions of shape (N, C). 34 | target_probs: targets of shape (N, C). 35 | epsilon: which controls the degree of confidence penalty. 36 | 37 | Outputs: 38 | Loss: a scalar tensor, normalised by N. 39 | """ 40 | if epsilon is None: 41 | epsilon = self.epsilon 42 | if calibrated_pred_probs is None: 43 | new_target_probs = (1 - epsilon) * target_probs - epsilon * pred_probs 44 | else: 45 | new_target_probs = ( 46 | 1 - epsilon 47 | ) * target_probs - epsilon * calibrated_pred_probs 48 | # reuse CrossEntropy's forward computation 49 | return super().forward(pred_probs, new_target_probs.detach()) 50 | -------------------------------------------------------------------------------- /src/proselflc/slices/losses/crossentropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch import Tensor 4 | 5 | from proselflc.exceptions import ParamException 6 | 7 | 8 | class CrossEntropy(nn.Module): 9 | """ 10 | The new implementation of cross entropy using two distributions. 11 | This can be a base class for other losses: 12 | 1. label smoothing; 13 | 2. bootsoft (self label correction), joint-soft,etc. 14 | 3. proselflc 15 | ... 16 | 17 | Inputs: two tensors for predictions and target. 18 | 1. predicted probability distributions of shape (N, C) 19 | 2. target probability distributions of shape (N, C) 20 | 21 | Outputs: scalar tensor, normalised by the number of examples. 22 | """ 23 | 24 | def __init__(self, params: dict = None) -> None: 25 | super().__init__() 26 | 27 | def forward( 28 | self, 29 | pred_probs: Tensor, 30 | target_probs: Tensor, 31 | calibrated_pred_probs: Tensor = None, 32 | ) -> Tensor: 33 | """ 34 | Inputs: 35 | pred_probs: predictions of shape (N, C). 36 | target_probs: targets of shape (N, C). 37 | 38 | Outputs: 39 | Loss: a scalar tensor, normalised by N. 40 | """ 41 | if not (pred_probs.shape == target_probs.shape): 42 | error_msg = ( 43 | "pred_probs.shape = " + str(pred_probs.shape) + ". " 44 | "target_probs.shape = " 45 | + str(target_probs.shape) 46 | + ". " 47 | + "Their shape has to be identical. " 48 | ) 49 | raise (ParamException(error_msg)) 50 | # TODO: to assert values in the range of [0, 1] 51 | 52 | num_examples = pred_probs.shape[0] 53 | # loss = torch.sum(target_probs * (-torch.log(pred_probs + 1e-8)), 1) 54 | loss = torch.sum( 55 | target_probs 56 | * (torch.log(target_probs + 1e-8) - torch.log(pred_probs + 1e-8)), 57 | 1, 58 | ) 59 | loss = torch.sum(loss) / num_examples 60 | return loss 61 | -------------------------------------------------------------------------------- /src/proselflc/slices/losses/labelcorrection.py: -------------------------------------------------------------------------------- 1 | from torch import Tensor 2 | 3 | from .crossentropy import CrossEntropy 4 | 5 | 6 | class LabelCorrection(CrossEntropy): 7 | """ 8 | The implementation for label correction. 9 | The target probability will be smoothed by 10 | a predicted distributions, i.e., self knowledge. 11 | 1. a.k.a. Bootsoft 12 | 2. a.k.a. Joint-soft 13 | 3. share principle with Pesudo-labelling 14 | 15 | Inputs: two tensors for predictions and target. 16 | 1. predicted probability distributions of shape (N, C) 17 | 2. target probability distributions of shape (N, C) 18 | 3. epsilon, which controls the degree of label correction. 19 | 20 | Outputs: scalar tensor, normalised by the number of examples. 21 | """ 22 | 23 | def __init__(self, params: dict = None) -> None: 24 | super().__init__() 25 | self.epsilon = params["epsilon"] 26 | 27 | def forward( 28 | self, 29 | pred_probs: Tensor, 30 | target_probs: Tensor, 31 | calibrated_pred_probs: Tensor = None, 32 | epsilon: float = None, 33 | ) -> Tensor: 34 | """ 35 | Inputs: 36 | pred_probs: predictions of shape (N, C). 37 | target_probs: targets of shape (N, C). 38 | epsilon: which controls the degree of label correction. 39 | 40 | Outputs: 41 | Loss: a scalar tensor, normalised by N. 42 | """ 43 | if epsilon is None: 44 | epsilon = self.epsilon 45 | if calibrated_pred_probs is None: 46 | new_target_probs = (1 - epsilon) * target_probs + epsilon * pred_probs 47 | else: 48 | new_target_probs = ( 49 | 1 - epsilon 50 | ) * target_probs + epsilon * calibrated_pred_probs 51 | # reuse CrossEntropy's forward computation 52 | return super().forward(pred_probs, new_target_probs.detach()) 53 | -------------------------------------------------------------------------------- /src/proselflc/slices/losses/lablesmoothing.py: -------------------------------------------------------------------------------- 1 | from torch import Tensor 2 | 3 | from .crossentropy import CrossEntropy 4 | 5 | 6 | class LabelSmoothing(CrossEntropy): 7 | """ 8 | The implementation for label smoothing. 9 | The target probability will be smoothed by 10 | a uniform distribution, defined by the total class number. 11 | 12 | Inputs: two tensors for predictions and target. 13 | 1. predicted probability distributions of shape (N, C) 14 | 2. target probability distributions of shape (N, C) 15 | 3. epsilon, which controls the degree of smoothing 16 | 17 | Outputs: scalar tensor, normalised by the number of examples. 18 | """ 19 | 20 | def __init__(self, params: dict = None) -> None: 21 | super().__init__() 22 | self.epsilon = params["epsilon"] 23 | 24 | def forward( 25 | self, 26 | pred_probs: Tensor, 27 | target_probs: Tensor, 28 | epsilon: float = None, 29 | calibrated_pred_probs: Tensor = None, 30 | ) -> Tensor: 31 | """ 32 | Inputs: 33 | pred_probs: predictions of shape (N, C). 34 | target_probs: targets of shape (N, C). 35 | epsilon: which controls the degree of smoothing 36 | 37 | Outputs: 38 | Loss: a scalar tensor, normalised by N. 39 | """ 40 | if epsilon is None: 41 | epsilon = self.epsilon 42 | class_num = pred_probs.shape[1] 43 | new_target_probs = (1 - epsilon) * target_probs + epsilon * 1.0 / class_num 44 | # reuse CrossEntropy's forward computation 45 | return super().forward(pred_probs, new_target_probs.detach()) 46 | -------------------------------------------------------------------------------- /src/proselflc/slices/losses/mean_absolute_error.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch import Tensor 4 | 5 | from proselflc.exceptions import ParamException 6 | 7 | 8 | class MeanAbsoluteError(nn.Module): 9 | """ 10 | The new implementation of MAE using two distributions. 11 | 12 | Inputs: two tensors for predictions and target. 13 | 1. predicted probability distributions of shape (N, C) 14 | 2. target probability distributions of shape (N, C) 15 | 16 | Outputs: scalar tensor, normalised by the number of examples. 17 | """ 18 | 19 | def __init__(self, params: dict = None) -> None: 20 | super().__init__() 21 | 22 | @torch.no_grad() 23 | def forward(self, pred_probs: Tensor, target_probs: Tensor) -> Tensor: 24 | """ 25 | Inputs: 26 | pred_probs: predictions of shape (N, C). 27 | target_probs: targets of shape (N, C). 28 | 29 | Outputs: 30 | Loss: a scalar tensor, normalised by N. 31 | """ 32 | if not (pred_probs.shape == target_probs.shape): 33 | error_msg = ( 34 | "pred_probs.shape = " + str(pred_probs.shape) + ". " 35 | "target_probs.shape = " 36 | + str(target_probs.shape) 37 | + ". " 38 | + "Their shape has to be identical. " 39 | ) 40 | raise (ParamException(error_msg)) 41 | # TODO: to assert values in the range of [0, 1] 42 | 43 | num_examples = pred_probs.shape[0] 44 | loss = torch.sum(torch.abs(pred_probs - target_probs), 1) 45 | loss = torch.sum(loss) / num_examples 46 | return loss 47 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/slices/networks/__init__.py -------------------------------------------------------------------------------- /src/proselflc/slices/networks/mobilenetv2.py: -------------------------------------------------------------------------------- 1 | """mobilenetv2 in pytorch 2 | 3 | 4 | 5 | [1] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen 6 | 7 | MobileNetV2: Inverted Residuals and Linear Bottlenecks 8 | https://arxiv.org/abs/1801.04381 9 | """ 10 | 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | 14 | 15 | class LinearBottleNeck(nn.Module): 16 | def __init__(self, in_channels, out_channels, stride, t=6, class_num=100): 17 | super().__init__() 18 | 19 | self.residual = nn.Sequential( 20 | nn.Conv2d(in_channels, in_channels * t, 1), 21 | nn.BatchNorm2d(in_channels * t), 22 | nn.ReLU6(inplace=True), 23 | nn.Conv2d( 24 | in_channels * t, 25 | in_channels * t, 26 | 3, 27 | stride=stride, 28 | padding=1, 29 | groups=in_channels * t, 30 | ), 31 | nn.BatchNorm2d(in_channels * t), 32 | nn.ReLU6(inplace=True), 33 | nn.Conv2d(in_channels * t, out_channels, 1), 34 | nn.BatchNorm2d(out_channels), 35 | ) 36 | 37 | self.stride = stride 38 | self.in_channels = in_channels 39 | self.out_channels = out_channels 40 | 41 | def forward(self, x): 42 | 43 | residual = self.residual(x) 44 | 45 | if self.stride == 1 and self.in_channels == self.out_channels: 46 | residual += x 47 | 48 | return residual 49 | 50 | 51 | class MobileNetV2(nn.Module): 52 | def __init__(self, class_num=100): 53 | super().__init__() 54 | 55 | self.pre = nn.Sequential( 56 | nn.Conv2d(3, 32, 1, padding=1), nn.BatchNorm2d(32), nn.ReLU6(inplace=True) 57 | ) 58 | 59 | self.stage1 = LinearBottleNeck(32, 16, 1, 1) 60 | self.stage2 = self._make_stage(2, 16, 24, 2, 6) 61 | self.stage3 = self._make_stage(3, 24, 32, 2, 6) 62 | self.stage4 = self._make_stage(4, 32, 64, 2, 6) 63 | self.stage5 = self._make_stage(3, 64, 96, 1, 6) 64 | self.stage6 = self._make_stage(3, 96, 160, 1, 6) 65 | self.stage7 = LinearBottleNeck(160, 320, 1, 6) 66 | 67 | self.conv1 = nn.Sequential( 68 | nn.Conv2d(320, 1280, 1), nn.BatchNorm2d(1280), nn.ReLU6(inplace=True) 69 | ) 70 | 71 | self.conv2 = nn.Conv2d(1280, class_num, 1) 72 | 73 | def forward(self, x): 74 | x = self.pre(x) 75 | x = self.stage1(x) 76 | x = self.stage2(x) 77 | x = self.stage3(x) 78 | x = self.stage4(x) 79 | x = self.stage5(x) 80 | x = self.stage6(x) 81 | x = self.stage7(x) 82 | x = self.conv1(x) 83 | x = F.adaptive_avg_pool2d(x, 1) 84 | x = self.conv2(x) 85 | x = x.view(x.size(0), -1) 86 | 87 | return x 88 | 89 | def _make_stage(self, repeat, in_channels, out_channels, stride, t): 90 | 91 | layers = [] 92 | layers.append(LinearBottleNeck(in_channels, out_channels, stride, t)) 93 | 94 | while repeat - 1: 95 | layers.append(LinearBottleNeck(out_channels, out_channels, 1, t)) 96 | repeat -= 1 97 | 98 | return nn.Sequential(*layers) 99 | 100 | 101 | def mobilenetv2(params: dict = {"num_classes": 100}): 102 | return MobileNetV2(class_num=params["num_classes"]) 103 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/resnet50_tv.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torchvision.models as models 3 | 4 | 5 | def resnet50_tv( 6 | params: dict = { 7 | "num_classes": 14, 8 | "pretrained": True, 9 | "freeze_bn": True, 10 | "dropout": 0.2, 11 | } 12 | ): 13 | model = models.resnet50(pretrained=params["pretrained"]) 14 | # overwrite fc layer 15 | if "dropout" in params.keys() and params["dropout"] > 0.0: 16 | # dropout probability of an element to be zeroed. Default: 0.5 17 | model.fc = nn.Sequential( 18 | nn.Dropout(p=params["dropout"]), 19 | nn.Linear(2048, params["num_classes"]), 20 | ) 21 | else: 22 | model.fc = nn.Linear(2048, params["num_classes"]) 23 | # freeze_bn? 24 | if "freeze_bn" in params.keys() and params["freeze_bn"]: 25 | for _layer in model.modules(): 26 | if isinstance(_layer, nn.BatchNorm2d): 27 | # eval mode 28 | _layer.eval() 29 | _layer.weight.requires_grad = False 30 | _layer.bias.requires_grad = False 31 | return model 32 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/shufflenetv2.py: -------------------------------------------------------------------------------- 1 | """shufflenetv2 in pytorch 2 | 3 | 4 | 5 | [1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun 6 | 7 | ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design 8 | https://arxiv.org/abs/1807.11164 9 | """ 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | 15 | 16 | def channel_split(x, split): 17 | """split a tensor into two pieces along channel dimension 18 | Args: 19 | x: input tensor 20 | split:(int) channel size for each pieces 21 | """ 22 | assert x.size(1) == split * 2 23 | return torch.split(x, split, dim=1) 24 | 25 | 26 | def channel_shuffle(x, groups): 27 | """channel shuffle operation 28 | Args: 29 | x: input tensor 30 | groups: input branch number 31 | """ 32 | 33 | batch_size, channels, height, width = x.size() 34 | channels_per_group = int(channels // groups) 35 | 36 | x = x.view(batch_size, groups, channels_per_group, height, width) 37 | x = x.transpose(1, 2).contiguous() 38 | x = x.view(batch_size, -1, height, width) 39 | 40 | return x 41 | 42 | 43 | class ShuffleUnit(nn.Module): 44 | def __init__(self, in_channels, out_channels, stride): 45 | super().__init__() 46 | 47 | self.stride = stride 48 | self.in_channels = in_channels 49 | self.out_channels = out_channels 50 | 51 | if stride != 1 or in_channels != out_channels: 52 | self.residual = nn.Sequential( 53 | nn.Conv2d(in_channels, in_channels, 1), 54 | nn.BatchNorm2d(in_channels), 55 | nn.ReLU(inplace=True), 56 | nn.Conv2d( 57 | in_channels, 58 | in_channels, 59 | 3, 60 | stride=stride, 61 | padding=1, 62 | groups=in_channels, 63 | ), 64 | nn.BatchNorm2d(in_channels), 65 | nn.Conv2d(in_channels, int(out_channels / 2), 1), 66 | nn.BatchNorm2d(int(out_channels / 2)), 67 | nn.ReLU(inplace=True), 68 | ) 69 | 70 | self.shortcut = nn.Sequential( 71 | nn.Conv2d( 72 | in_channels, 73 | in_channels, 74 | 3, 75 | stride=stride, 76 | padding=1, 77 | groups=in_channels, 78 | ), 79 | nn.BatchNorm2d(in_channels), 80 | nn.Conv2d(in_channels, int(out_channels / 2), 1), 81 | nn.BatchNorm2d(int(out_channels / 2)), 82 | nn.ReLU(inplace=True), 83 | ) 84 | else: 85 | self.shortcut = nn.Sequential() 86 | 87 | in_channels = int(in_channels / 2) 88 | self.residual = nn.Sequential( 89 | nn.Conv2d(in_channels, in_channels, 1), 90 | nn.BatchNorm2d(in_channels), 91 | nn.ReLU(inplace=True), 92 | nn.Conv2d( 93 | in_channels, 94 | in_channels, 95 | 3, 96 | stride=stride, 97 | padding=1, 98 | groups=in_channels, 99 | ), 100 | nn.BatchNorm2d(in_channels), 101 | nn.Conv2d(in_channels, in_channels, 1), 102 | nn.BatchNorm2d(in_channels), 103 | nn.ReLU(inplace=True), 104 | ) 105 | 106 | def forward(self, x): 107 | 108 | if self.stride == 1 and self.out_channels == self.in_channels: 109 | shortcut, residual = channel_split(x, int(self.in_channels / 2)) 110 | else: 111 | shortcut = x 112 | residual = x 113 | 114 | shortcut = self.shortcut(shortcut) 115 | residual = self.residual(residual) 116 | x = torch.cat([shortcut, residual], dim=1) 117 | x = channel_shuffle(x, 2) 118 | 119 | return x 120 | 121 | 122 | class ShuffleNetV2(nn.Module): 123 | def __init__(self, ratio=1, class_num=100): 124 | super().__init__() 125 | if ratio == 0.5: 126 | out_channels = [48, 96, 192, 1024] 127 | elif ratio == 1: 128 | out_channels = [116, 232, 464, 1024] 129 | elif ratio == 1.5: 130 | out_channels = [176, 352, 704, 1024] 131 | elif ratio == 2: 132 | out_channels = [244, 488, 976, 2048] 133 | else: 134 | ValueError("unsupported ratio number") 135 | 136 | self.pre = nn.Sequential(nn.Conv2d(3, 24, 3, padding=1), nn.BatchNorm2d(24)) 137 | 138 | self.stage2 = self._make_stage(24, out_channels[0], 3) 139 | self.stage3 = self._make_stage(out_channels[0], out_channels[1], 7) 140 | self.stage4 = self._make_stage(out_channels[1], out_channels[2], 3) 141 | self.conv5 = nn.Sequential( 142 | nn.Conv2d(out_channels[2], out_channels[3], 1), 143 | nn.BatchNorm2d(out_channels[3]), 144 | nn.ReLU(inplace=True), 145 | ) 146 | 147 | self.fc = nn.Linear(out_channels[3], class_num) 148 | 149 | def forward(self, x): 150 | x = self.pre(x) 151 | x = self.stage2(x) 152 | x = self.stage3(x) 153 | x = self.stage4(x) 154 | x = self.conv5(x) 155 | x = F.adaptive_avg_pool2d(x, 1) 156 | x = x.view(x.size(0), -1) 157 | x = self.fc(x) 158 | 159 | return x 160 | 161 | def _make_stage(self, in_channels, out_channels, repeat): 162 | layers = [] 163 | layers.append(ShuffleUnit(in_channels, out_channels, 2)) 164 | 165 | while repeat: 166 | layers.append(ShuffleUnit(out_channels, out_channels, 1)) 167 | repeat -= 1 168 | 169 | return nn.Sequential(*layers) 170 | 171 | 172 | def shufflenetv2(params: dict = {"num_classes": 100}): 173 | return ShuffleNetV2(class_num=params["num_classes"]) 174 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/transformers/prot_bert_bfd_seqlevel.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoConfig, AutoModelForSequenceClassification 2 | 3 | from proselflc.exceptions import ParamException 4 | 5 | 6 | def prot_bert_bfd_seqclassifier( 7 | params: dict = { 8 | "network_name": "Rostlab/prot_bert_bfd", 9 | "num_hidden_layers": 5, 10 | "num_attention_heads": 8, 11 | # 12 | "num_classes": 2, 13 | } 14 | ): 15 | if params["network_name"] == "Rostlab_prot_bert_bfd_seq": 16 | network_name = "Rostlab/prot_bert_bfd" 17 | config = AutoConfig.from_pretrained(network_name) 18 | config.num_hidden_layers = params["num_hidden_layers"] 19 | config.num_attention_heads = params["num_attention_heads"] 20 | 21 | config.num_labels = params["num_classes"] 22 | 23 | return AutoModelForSequenceClassification.from_pretrained( 24 | network_name, 25 | config=config, 26 | ) 27 | else: 28 | error_msg = "network_name != Rostlab_prot_bert_bfd_seq" 29 | raise ParamException(error_msg) 30 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/transformers/prot_bert_bfd_tokenlevel.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoConfig, AutoModelForTokenClassification 2 | 3 | from proselflc.exceptions import ParamException 4 | 5 | 6 | # hard to decoupled due to datasets 7 | # leave it now 8 | def prot_bert_bfd_tokenclassifier( 9 | params: dict = { 10 | "network_name": "Rostlab/prot_bert_bfd", 11 | "num_hidden_layers": 5, 12 | "num_attention_heads": 8, 13 | # 14 | "num_classes": 3, 15 | } 16 | ): 17 | if params["network_name"] == "Rostlab_prot_bert_bfd_token": 18 | network_name = "Rostlab/prot_bert_bfd" 19 | config = AutoConfig.from_pretrained(network_name) 20 | config.num_hidden_layers = params["num_hidden_layers"] 21 | config.num_attention_heads = params["num_attention_heads"] 22 | 23 | config.num_labels = params["num_classes"] 24 | 25 | return AutoModelForTokenClassification.from_pretrained( 26 | network_name, 27 | config=config, 28 | ) 29 | else: 30 | error_msg = "network_name != Rostlab_prot_bert_bfd_token" 31 | raise ParamException(error_msg) 32 | 33 | # if "num_classes" not in params.keys(): 34 | # params["num_classes"] = len(unique_tags) 35 | # if "id2tag" not in params.keys(): 36 | # params["id2tag"] = id2tag 37 | # if "tag2id" not in params.keys(): 38 | # params["tag2id"] = tag2id 39 | 40 | # config = AutoConfig.from_pretrained(params["network_name"]) 41 | # config.num_hidden_layers = params["num_hidden_layers"] 42 | # config.num_attention_heads = params["num_attention_heads"] 43 | 44 | # config.num_labels = params["num_classes"] 45 | # config.id2label = params["id2tag"] 46 | # config.label2id = params["tag2id"] 47 | 48 | # return AutoModelForTokenClassification.from_pretrained( 49 | # params["network_name"], 50 | # config=config, 51 | # ) 52 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/vit_modelconfig/configs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import ml_collections 16 | 17 | 18 | def get_testing(): 19 | """Returns a minimal configuration for testing.""" 20 | config = ml_collections.ConfigDict() 21 | config.patches = ml_collections.ConfigDict({"size": (16, 16)}) 22 | config.hidden_size = 1 23 | config.transformer = ml_collections.ConfigDict() 24 | config.transformer.mlp_dim = 1 25 | config.transformer.num_heads = 1 26 | config.transformer.num_layers = 1 27 | config.transformer.attention_dropout_rate = 0.0 28 | config.transformer.dropout_rate = 0.1 29 | config.classifier = "token" 30 | config.representation_size = None 31 | return config 32 | 33 | 34 | def get_b16_config(): 35 | """Returns the ViT-B/16 configuration.""" 36 | config = ml_collections.ConfigDict() 37 | config.patches = ml_collections.ConfigDict({"size": (16, 16)}) 38 | config.hidden_size = 768 39 | config.transformer = ml_collections.ConfigDict() 40 | config.transformer.mlp_dim = 3072 41 | config.transformer.num_heads = 12 42 | config.transformer.num_layers = 12 43 | config.transformer.attention_dropout_rate = 0.0 44 | config.transformer.dropout_rate = 0.1 45 | config.classifier = "token" 46 | config.representation_size = None 47 | return config 48 | 49 | 50 | def get_r50_b16_config(): 51 | """Returns the Resnet50 + ViT-B/16 configuration.""" 52 | config = get_b16_config() 53 | del config.patches.size 54 | config.patches.grid = (14, 14) 55 | config.resnet = ml_collections.ConfigDict() 56 | config.resnet.num_layers = (3, 4, 9) 57 | config.resnet.width_factor = 1 58 | return config 59 | 60 | 61 | def get_b32_config(): 62 | """Returns the ViT-B/32 configuration.""" 63 | config = get_b16_config() 64 | config.patches.size = (32, 32) 65 | return config 66 | 67 | 68 | def get_l16_config(): 69 | """Returns the ViT-L/16 configuration.""" 70 | config = ml_collections.ConfigDict() 71 | config.patches = ml_collections.ConfigDict({"size": (16, 16)}) 72 | config.hidden_size = 1024 73 | config.transformer = ml_collections.ConfigDict() 74 | config.transformer.mlp_dim = 4096 75 | config.transformer.num_heads = 16 76 | config.transformer.num_layers = 24 77 | config.transformer.attention_dropout_rate = 0.0 78 | config.transformer.dropout_rate = 0.1 79 | config.classifier = "token" 80 | config.representation_size = None 81 | return config 82 | 83 | 84 | def get_l32_config(): 85 | """Returns the ViT-L/32 configuration.""" 86 | config = get_l16_config() 87 | config.patches.size = (32, 32) 88 | return config 89 | 90 | 91 | def get_h14_config(): 92 | """Returns the ViT-L/16 configuration.""" 93 | config = ml_collections.ConfigDict() 94 | config.patches = ml_collections.ConfigDict({"size": (14, 14)}) 95 | config.hidden_size = 1280 96 | config.transformer = ml_collections.ConfigDict() 97 | config.transformer.mlp_dim = 5120 98 | config.transformer.num_heads = 16 99 | config.transformer.num_layers = 32 100 | config.transformer.attention_dropout_rate = 0.0 101 | config.transformer.dropout_rate = 0.1 102 | config.classifier = "token" 103 | config.representation_size = None 104 | return config 105 | -------------------------------------------------------------------------------- /src/proselflc/slices/networks/vit_models.py: -------------------------------------------------------------------------------- 1 | # CONFIGS = { 2 | # 'ViT-B_16': configs.get_b16_config(), 3 | # 'ViT-B_32': configs.get_b32_config(), 4 | # 'ViT-L_16': configs.get_l16_config(), 5 | # 'ViT-L_32': configs.get_l32_config(), 6 | # 'ViT-H_14': configs.get_h14_config(), 7 | # 'R50-ViT-B_16': configs.get_r50_b16_config(), 8 | # 'testing': configs.get_testing(), 9 | # } 10 | import numpy as np 11 | 12 | from proselflc.slices.networks.vit_modelconfig.modeling import ( 13 | CONFIGS, 14 | VisionTransformer, 15 | ) 16 | 17 | 18 | def ViT_B_16(params: dict = {"num_classes": 100}): 19 | config = CONFIGS["ViT-B_16"] 20 | model = VisionTransformer( 21 | config, 22 | params["img_size"], 23 | zero_head=True, 24 | num_classes=params["num_classes"], 25 | ) 26 | model.load_from(np.load(params["pretrained_dir"])) 27 | 28 | return model 29 | -------------------------------------------------------------------------------- /src/proselflc/trainer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/XinshaoAmosWang/DeepCriticalLearning/17fb603ed9a464ed386b9462b9e3a74aedfb84ae/src/proselflc/trainer/__init__.py -------------------------------------------------------------------------------- /src/proselflc/trainer/utils.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import torch 4 | from torch import Tensor 5 | 6 | from proselflc.exceptions import ParamException 7 | 8 | 9 | def logits2probs_softmax(logits): 10 | """ 11 | Transform logits to probabilities using exp function and normalisation 12 | 13 | Input: 14 | logits with shape: (N, C) 15 | N means the batch size or the number of instances. 16 | C means the number of training classes. 17 | 18 | Output: 19 | probability vectors of shape (N, C) 20 | """ 21 | # reimplementation of F.softmax(logits) 22 | # or: torch.nn.Softmax(dim=1)(logits) 23 | # per instance: 24 | # subtract max logit for numerical issues 25 | # subtractmax_logits = logits - torch.max(logits, dim=1, keepdim=True).values 26 | # exp_logits = torch.exp(subtractmax_logits) 27 | # sum_logits = torch.sum(exp_logits, dim=1, keepdim=True) 28 | # return exp_logits / sum_logits 29 | return torch.nn.Softmax(dim=1)(logits) 30 | 31 | 32 | @torch.no_grad() 33 | def intlabels2onehotmatrix(device: str, class_num, intlabels) -> Tensor: 34 | target_probs = np.zeros((len(intlabels), class_num), dtype=np.float32) 35 | for i in range(len(intlabels)): 36 | # default ignore index 37 | if intlabels[i] == -100: 38 | pass 39 | else: 40 | target_probs[i][intlabels[i]] = 1 41 | target_probs = torch.tensor(target_probs) 42 | if device == "gpu": 43 | target_probs = target_probs.cuda() 44 | return target_probs 45 | 46 | 47 | def intlabel2onehot(intlabel, class_num) -> np.ndarray: 48 | """ 49 | intlabel in the class index: [0, class_num-1] 50 | """ 51 | if intlabel not in list(range(class_num)): 52 | error_msg = "intlabe: {}".format( 53 | intlabel 54 | ) + " not in the range of [0, {}]".format(class_num - 1) 55 | raise ParamException(error_msg) 56 | 57 | target_probs = np.zeros(class_num, dtype=np.float32) 58 | target_probs[intlabel] = 1 59 | return target_probs 60 | 61 | 62 | def save_figures(fig_save_path="", y_inputs=[], fig_legends=[], xlabel="", ylabel=""): 63 | colors = ["r", "b"] 64 | linestyles = ["solid", "dashdot"] 65 | x = torch.arange(len(y_inputs[0])) 66 | # 67 | fig, ax = plt.subplots() 68 | for y_input, color, linestyle, fig_legend in zip( 69 | y_inputs, colors, linestyles, fig_legends 70 | ): 71 | ax.plot(x, y_input, color=color, linestyle=linestyle, label=fig_legend) 72 | # legend = ax.legend(loc="upper right") 73 | ax.legend(loc="upper right") 74 | plt.xlabel(xlabel) 75 | plt.ylabel(ylabel) 76 | # 77 | plt.savefig(fig_save_path, dpi=100) 78 | -------------------------------------------------------------------------------- /tests/convnets_cifar100/trainer_calibration_vision_cifar100_covnets_proselflc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import unittest 4 | from itertools import product 5 | 6 | import pandas 7 | import torch 8 | 9 | from proselflc.optim.sgd_multistep import SGDMultiStep 10 | from proselflc.trainer.trainer_cnn_vision_derivedgrad import Trainer 11 | 12 | 13 | class TestTrainer(unittest.TestCase): 14 | WORK_DIR = None 15 | 16 | def set_params_vision(self): 17 | self.params.update( 18 | { 19 | "data_name": "cifar100", 20 | "num_classes": 100, # 1000 21 | "device": "gpu", 22 | # 23 | "num_workers": 8, 24 | # 25 | "counter": "iteration", 26 | "classes_per_batch": 8, 27 | # 28 | "seed": 123, 29 | } 30 | ) 31 | 32 | def setUp(self): 33 | """ 34 | This function is an init for all tests 35 | """ 36 | self.params = {} 37 | self.set_params_vision() 38 | 39 | self.params["total_epochs"] = 80 40 | self.params["eval_interval"] = 400 41 | 42 | # 43 | # 44 | self.params["lr_scheduler"] = "WarmupMultiStepSchedule" 45 | self.params["sampler"] = "BalancedBatchSampler" 46 | 47 | def test_trainer_cifar100(self): 48 | k = 0 49 | 50 | for ( 51 | self.params["symmetric_noise_rate"], 52 | (self.params["network_name"], self.params["weight_decay"]), 53 | # 54 | # 55 | (self.params["batch_size"], self.params["classes_per_batch"]), 56 | # 57 | self.params["momentum"], 58 | # 59 | self.params["warmup_epochs"], 60 | self.params["lr"], 61 | self.params["batch_accumu_steps"], 62 | # 63 | (self.params["milestones"], self.params["gamma"]), 64 | # 65 | ( 66 | self.params["total_epochs"], 67 | self.params["eval_interval"], 68 | ), 69 | # 70 | self.params["loss_mode"], 71 | self.params["trust_mode"], 72 | # 73 | self.params["loss_name"], 74 | self.params["transit_time_ratio"], 75 | self.params["exp_base"], 76 | self.params["logit_soften_T"], 77 | ) in product( 78 | [ 79 | 0.4, 80 | 0.2, 81 | 0.0, 82 | 0.6, 83 | ], 84 | [ 85 | ("resnet18", 2e-3), 86 | ("shufflenetv2", 1e-3), 87 | ], 88 | # 89 | # 90 | # 2022/03/04: (128, 8) converges much slower!!! 91 | [(128, 64)], # fix this 92 | # 93 | [0.9], # fix this 94 | # 95 | # last time, it was 8 96 | # will this be better? 97 | [0], # warmup epochs 98 | [0.2], # lr 99 | # sensitive or not? 100 | [10], # batch accumu 101 | # 102 | # fix lr scheduler? 103 | [([20000, 30000], 0.1)], 104 | # 105 | # fix epoch 106 | [(100, 500)], 107 | [ 108 | "cross entropy", 109 | ], 110 | [ 111 | "global*(1-H(p)/H(u))", 112 | "global only", 113 | "global*max_p", 114 | ], 115 | # 116 | ["proselflc"], 117 | [0.50], # transit ratio 118 | [16, 12, 8], # exp base 119 | # 120 | [1.0, 0.5, 0.8, 0.6, 0.4], # < 1, entropy decreases 121 | ): 122 | k = k + 1 123 | print(k) 124 | 125 | dt_string = time.strftime("%Y%m%d-%H%M%S") 126 | summary_writer_dir = ( 127 | "{:0>3}_".format(k) 128 | + self.params["loss_name"] 129 | + "_warm" 130 | + str(self.params["warmup_epochs"]) 131 | # + "_gamma" 132 | # + str(self.params["gamma"]) 133 | # + "_" 134 | # + str(self.params["counter"]) 135 | # + "_epo" 136 | # + str(self.params["total_epochs"]) 137 | # + "_lr" 138 | # + str(self.params["lr"]) 139 | # + "_" 140 | # + str(self.params["milestones"]) 141 | + "_" 142 | + dt_string 143 | ) 144 | self.params["summary_writer_dir"] = ( 145 | self.WORK_DIR 146 | + "/" 147 | + self.params["data_name"] 148 | + "_symmetric_noise_rate_" 149 | + str(self.params["symmetric_noise_rate"]) 150 | + "/" 151 | + self.params["network_name"] 152 | + "/" 153 | + summary_writer_dir 154 | ) 155 | if not os.path.exists(self.params["summary_writer_dir"]): 156 | os.makedirs(self.params["summary_writer_dir"]) 157 | 158 | trainer = Trainer(params=self.params) 159 | self.assertTrue(isinstance(trainer, Trainer)) 160 | self.assertTrue(isinstance(trainer.optim, SGDMultiStep)) 161 | 162 | self.params["milestones"] = str(self.params["milestones"]) 163 | self.dataframe = pandas.DataFrame(self.params, index=[0]) 164 | self.dataframe.to_csv( 165 | self.params["summary_writer_dir"] + "/params.csv", 166 | encoding="utf-8", 167 | index=False, 168 | sep="\t", 169 | mode="w", # 170 | ) 171 | 172 | # some more test 173 | trainer.train() 174 | torch.save( 175 | trainer.network, 176 | self.params["summary_writer_dir"] + "/model.pt", 177 | ) 178 | 179 | 180 | if __name__ == "__main__": 181 | 182 | work_dir = os.getenv( 183 | "SM_CHANNEL_WORK_DIR", 184 | "/home/xinshao/tpami_proselflc_experiments_calibration/", 185 | ) 186 | TestTrainer.WORK_DIR = work_dir 187 | 188 | print(TestTrainer.WORK_DIR) 189 | 190 | unittest.main() 191 | --------------------------------------------------------------------------------