├── .gitignore
├── README.md
├── data
└── icbhi_dataset
│ ├── filename_differences.txt
│ ├── filename_format.txt
│ ├── metadata.txt
│ ├── official_split.txt
│ ├── patient_diagnosis.txt
│ └── patient_list_foldwise.txt
├── main.py
├── models
└── clap.py
├── requirements.txt
├── scripts
├── eval_bts.sh
├── icbhi_audio-clap_ce.sh
└── icbhi_bts_meta_all.sh
└── util
├── __init__.py
├── icbhi_dataset.py
├── icbhi_util.py
├── meta_description.py
└── misc.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # poetry
99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100 | # This is especially recommended for binary packages to ensure reproducibility, and is more
101 | # commonly ignored for libraries.
102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103 | #poetry.lock
104 |
105 | # pdm
106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107 | #pdm.lock
108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109 | # in version control.
110 | # https://pdm.fming.dev/#use-with-ide
111 | .pdm.toml
112 |
113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
114 | __pypackages__/
115 |
116 | # Celery stuff
117 | celerybeat-schedule
118 | celerybeat.pid
119 |
120 | # SageMath parsed files
121 | *.sage.py
122 |
123 | # Environments
124 | .env
125 | .venv
126 | env/
127 | venv/
128 | ENV/
129 | env.bak/
130 | venv.bak/
131 |
132 | # Spyder project settings
133 | .spyderproject
134 | .spyproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 | # mkdocs documentation
140 | /site
141 |
142 | # mypy
143 | .mypy_cache/
144 | .dmypy.json
145 | dmypy.json
146 |
147 | # Pyre type checker
148 | .pyre/
149 |
150 | # pytype static type analyzer
151 | .pytype/
152 |
153 | # Cython debug symbols
154 | cython_debug/
155 |
156 | # PyCharm
157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159 | # and can be added to the global gitignore or merged into this file. For a more nuclear
160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161 | #.idea/
162 |
163 | # macbook junk
164 | .DS_Store
165 |
166 | # debug outputs
167 | .outputs/
168 | .temp/
169 | .local/
170 |
171 | # Local folders
172 | .vscode/
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BTS: Bridging Text and Sound Modalities for Metadata-Aided Respiratory Sound Classification (INTERSPEECH 2024)
2 | [arXiv](https://arxiv.org/abs/2406.06786) | [Conference](https://www.isca-archive.org/interspeech_2024/kim24f_interspeech.html) | [BibTeX](#bibtex)
3 |
4 |
5 |
6 |
7 |
8 |
9 | Official Implementation of **BTS: Bridging Text and Sound Modalities for Metadata-Aided Respiratory Sound Classification.**
10 |
11 | **See you in INTERSPEECH 2024!**
12 |
13 | ## Updates
14 | The pretrained BTS checkpoint is available at [Google Drive](https://drive.google.com/file/d/1RziWDkqRTUictS_9zYWbJWj4i6C79RIy/view?usp=sharing).
15 |
16 | The model's performance on Specificity, Sensitivity, and ICBHI Score is **[82.58, 45.11, 63.85**].
17 |
18 | ## Prerequisites
19 | Please check environments and requirements before you start. If required, we recommend you to either upgrade versions or install them for smooth running.
20 |
21 | 
22 | 
23 | 
24 |
25 | ### Environments
26 | `Ubuntu xx.xx`
27 | `Python 3.8.xx`
28 |
29 | ## Environmental set-up
30 |
31 | Install the necessary packages with:
32 |
33 | run `requirements.txt`
34 |
35 | ```
36 | pip install torch torchvision torchaudio
37 | pip install -r requirements.txt
38 | ```
39 |
40 | For the reproducibility, we used torch=2.0.1+cu117 and torchaudio=2.0.1+cu117, so we highly recommend install as follow:
41 |
42 | ```
43 | pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2
44 | ```
45 |
46 | ## Datasets
47 | Download the ICBHI files and unzip it.
48 | All details is described in the [paper w/ code](https://paperswithcode.com/dataset/icbhi-respiratory-sound-database)
49 |
50 | ```
51 | wget https://bhichallenge.med.auth.gr/sites/default/files/ICBHI_final_database/ICBHI_final_database.zip
52 | or
53 | wget --no-check-certificate https://bhichallenge.med.auth.gr/sites/default/files/ICBHI_final_database/ICBHI_final_database.zip
54 | ```
55 |
56 | All `*.wav` and `*.txt` should be saved in data/icbhi_dataset/audio_test_data. (i.e., mkdir `audio_test_data` into `data/icbhi_dataset/` and move `*.wav` and `*.txt` into `data/icbhi_dataset/audio_test_data/`)
57 |
58 | Note that ICBHI dataset consists of a total of 6,898 respiratory cycles, of which 1,864 contain crackles, 886 contain wheezes, and 506 contain both crackles and wheezes, in 920 annotated audio samples from 126 subjects.
59 |
60 | ## Run
61 |
62 | ### Audio-CLAP for Respiratory Sound Classification
63 | ```
64 | $ ./scripts/icbhi_audio-clap_ce.sh
65 | ```
66 |
67 | ### BTS for Respiratory Sound Classification
68 | ```
69 | $ ./scripts/icbhi_bts_meta_all.sh
70 | ```
71 |
72 | ### Evaluation with BTS for Respiratory Sound Classification
73 | ```
74 | $ ./scripts/eval_bts.sh
75 | ```
76 | Note that change `--pretrained_ckpt` with your directory. (e.g. `--pretrained_ckpt /home2/jw/workspace/crisp/save/icbhi_laion/clap-htsat-unfused_ce_bs8_lr5e-5_ep50_seed1_check2/best.pth`)
77 |
78 |
79 | We will provide pretrained checkpoint into the camera-ready version
80 |
81 | ## ICBHI Data
82 |
83 | The database consists of a total of 5.5 hours of recordings containing 6898 respiratory cycles, of which 1864 contain crackles, 886 contain wheezes, and 506 contain both crackles and wheezes, in 920 annotated audio samples from 126 subjects.
84 |
85 | The downloaded data looks like [[kaggle](https://www.kaggle.com/datasets/vbookshelf/respiratory-sound-database), [paper w/ code](https://paperswithcode.com/dataset/icbhi-respiratory-sound-database)]:
86 |
87 |
88 | data/icbhi_dataset
89 | ├── metadata.txt
90 | │ ├── Patient number
91 | │ ├── Age
92 | │ ├── Sex
93 | │ ├── Adult BMI (kg/m2)
94 | │ ├── Adult Weight (kg)
95 | │ └── Child Height (cm)
96 | │
97 | ├── official_split.txt
98 | │ ├── Patient number_Recording index_Chest location_Acqiosotopm mode_Recording equipment
99 | │ | ├── Chest location
100 | │ | | ├── Trachea (Tc),Anterior left (Al),Anterior right (Ar),Posterior left (Pl)
101 | │ | | └── Posterior right (Pr),Lateral left (Ll),Lateral right (Lr)
102 | │ | |
103 | │ | ├── Acquisition mode
104 | │ | | └── sequential/single channel (sc), simultaneous/multichannel (mc)
105 | │ | |
106 | │ | └── Recording equipment
107 | │ | ├── AKG C417L Microphone (AKGC417L),
108 | │ | ├── 3M Littmann Classic II SE Stethoscope (LittC2SE),
109 | │ | ├── 3M Litmmann 3200 Electronic Stethoscope (Litt3200),
110 | │ | └── WelchAllyn Meditron Master Elite Electronic Stethoscope (Meditron)
111 | │ |
112 | │ └── Train/Test
113 | │
114 | ├── patient_diagnosis.txt
115 | │ ├── Patient number
116 | │ └── Diagnosis
117 | │ ├── COPD: Chronic Obstructive Pulmonary Disease
118 | │ ├── LRTI: Lower Respiratory Tract Infection
119 | │ └── URTI: Upper Respiratory Tract Infection
120 | │
121 | └── patient_list_foldwise.txt
122 |
123 |
124 | ## Result
125 | The proposed BTS achieves a 63.54% Score, which is the new state-of-the-art performance in ICBHI score.
126 |
127 |
128 |
129 |
130 |
131 |
132 | ## BibTeX
133 | If you find this repo useful for your research, please consider citing our paper:
134 |
135 | ```
136 | @inproceedings{kim24f_interspeech,
137 | title = {BTS: Bridging Text and Sound Modalities for Metadata-Aided Respiratory Sound Classification},
138 | author = {June-Woo Kim and Miika Toikkanen and Yera Choi and Seoung-Eun Moon and Ho-Young Jung},
139 | year = {2024},
140 | booktitle = {Interspeech 2024},
141 | pages = {1690--1694},
142 | doi = {10.21437/Interspeech.2024-492},
143 | issn = {2958-1796},
144 | }
145 | ```
146 |
--------------------------------------------------------------------------------
/data/icbhi_dataset/filename_differences.txt:
--------------------------------------------------------------------------------
1 | '101_1b1_Al_sc_AKGC417L'
2 | '101_1b1_Pr_sc_AKGC417L'
3 | '102_1b1_Ar_sc_AKGC417L'
4 | '105_1b1_Tc_sc_LittC2SE'
5 | '108_1b1_Al_sc_LittC2SE'
6 | '111_1b2_Tc_sc_LittC2SE'
7 | '111_1b3_Tc_sc_LittC2SE'
8 | '115_1b1_Ar_sc_LittC2SE'
9 | '116_1b2_Pl_sc_LittC2SE'
10 | '116_1b2_Tc_sc_LittC2SE'
11 | '119_1b1_Ar_sc_AKGC417L'
12 | '121_1b1_Tc_sc_LittC2SE'
13 | '121_1p1_Tc_sc_LittC2SE'
14 | '123_1b1_Al_sc_AKGC417L'
15 | '125_1b1_Tc_sc_LittC2SE'
16 | '126_1b1_Al_sc_AKGC417L'
17 | '127_1b1_Ar_sc_LittC2SE'
18 | '129_1b1_Ar_sc_LittC2SE'
19 | '131_1b1_Al_sc_LittC2SE'
20 | '136_1b1_Ar_sc_AKGC417L'
21 | '137_1b1_Ar_sc_LittC2SE'
22 | '137_1b1_Ll_sc_LittC2SE'
23 | '143_1b1_Al_sc_AKGC417L'
24 | '144_1b1_Al_sc_AKGC417L'
25 | '144_1b1_Tc_sc_AKGC417L'
26 | '148_1b1_Al_sc_LittC2SE'
27 | '149_1b1_Al_sc_LittC2SE'
28 | '149_1b1_Lr_sc_LittC2SE'
29 | '149_1b1_Pl_sc_LittC2SE'
30 | '150_1b2_Al_sc_AKGC417L'
31 | '152_1b1_Al_sc_LittC2SE'
32 | '153_1b1_Al_sc_LittC2SE'
33 | '159_1b1_Al_sc_AKGC417L'
34 | '159_1b1_Ar_sc_AKGC417L'
35 | '159_1b1_Ll_sc_AKGC417L'
36 | '159_1b1_Pr_sc_AKGC417L'
37 | '161_1b1_Al_sc_LittC2SE'
38 | '161_1b1_Pl_sc_LittC2SE'
39 | '164_1b1_Ll_sc_LittC2SE'
40 | '165_1b1_Ar_sc_AKGC417L'
41 | '165_1b1_Pl_sc_AKGC417L'
42 | '165_1b1_Pr_sc_AKGC417L'
43 | '167_1b1_Al_sc_LittC2SE'
44 | '167_1b1_Pr_sc_LittC2SE'
45 | '168_1b1_Al_sc_LittC2SE'
46 | '169_1b1_Lr_sc_AKGC417L'
47 | '169_1b2_Ll_sc_AKGC417L'
48 | '171_1b1_Al_sc_AKGC417L'
49 | '173_1b1_Al_sc_AKGC417L'
50 | '179_1b1_Al_sc_LittC2SE'
51 | '179_1b1_Tc_sc_LittC2SE'
52 | '182_1b1_Tc_sc_LittC2SE'
53 | '183_1b1_Pl_sc_AKGC417L'
54 | '183_1b1_Tc_sc_AKGC417L'
55 | '184_1b1_Ar_sc_LittC2SE'
56 | '187_1b1_Ll_sc_AKGC417L'
57 | '188_1b1_Al_sc_LittC2SE'
58 | '188_1b1_Ar_sc_LittC2SE'
59 | '188_1b1_Pl_sc_LittC2SE'
60 | '188_1b1_Tc_sc_LittC2SE'
61 | '190_1b1_Tc_sc_AKGC417L'
62 | '194_1b1_Lr_sc_AKGC417L'
63 | '194_1b1_Pr_sc_AKGC417L'
64 | '196_1b1_Pr_sc_LittC2SE'
65 | '197_1b1_Al_sc_AKGC417L'
66 | '197_1b1_Tc_sc_AKGC417L'
67 | '201_1b1_Al_sc_LittC2SE'
68 | '201_1b1_Ar_sc_LittC2SE'
69 | '201_1b2_Al_sc_LittC2SE'
70 | '201_1b2_Ar_sc_LittC2SE'
71 | '201_1b3_Al_sc_LittC2SE'
72 | '201_1b3_Ar_sc_LittC2SE'
73 | '202_1b1_Ar_sc_AKGC417L'
74 | '206_1b1_Ar_sc_LittC2SE'
75 | '206_1b1_Lr_sc_LittC2SE'
76 | '206_1b1_Pl_sc_LittC2SE'
77 | '208_1b1_Ll_sc_LittC2SE'
78 | '209_1b1_Tc_sc_LittC2SE'
79 | '210_1b1_Al_sc_LittC2SE'
80 | '210_1b1_Ar_sc_LittC2SE'
81 | '214_1b1_Ar_sc_AKGC417L'
82 | '215_1b2_Ar_sc_LittC2SE'
83 | '215_1b3_Tc_sc_LittC2SE'
84 | '216_1b1_Al_sc_AKGC417L'
85 | '216_1b1_Pl_sc_AKGC417L'
86 | '217_1b1_Tc_sc_LittC2SE'
87 | '224_1b1_Tc_sc_AKGC417L'
88 | '224_1b2_Al_sc_AKGC417L'
89 | '225_1b1_Pl_sc_AKGC417L'
90 | '226_1b1_Al_sc_LittC2SE'
91 | '226_1b1_Ll_sc_LittC2SE'
92 |
--------------------------------------------------------------------------------
/data/icbhi_dataset/filename_format.txt:
--------------------------------------------------------------------------------
1 | Elements contained in the filenames:
2 |
3 | Patient number (101,102,...,226)
4 | Recording index
5 | Chest location (Trachea (Tc), {Anterior (A), Posterior (P), Lateral (L)}{left (l), right (r)})
6 | Acquisition mode (sequential/single channel (sc), simultaneous/multichannel (mc))
7 | Recording equipment (AKG C417L Microphone, 3M Littmann Classic II SE Stethoscope, 3M Litmmann 3200 Electronic Stethoscope, WelchAllyn Meditron Master Elite Electronic Stethoscope)
--------------------------------------------------------------------------------
/data/icbhi_dataset/metadata.txt:
--------------------------------------------------------------------------------
1 | 101 3.0 F NA 19.0 99.0 Pr
2 | 102 0.75 F NA 9.8 73.0 Ar
3 | 103 70.0 F 33.0 NA NA Ar
4 | 104 70.0 F 28.47 NA NA Al
5 | 105 7.0 F NA 32.0 135.0 Tc
6 | 106 73.0 F 21.0 NA NA Pl
7 | 107 75.0 F 33.7 NA NA Ll
8 | 108 3.0 M NA NA NA Al
9 | 109 84.0 F 33.53 NA NA Lr
10 | 110 75.0 M 25.21 NA NA Pr
11 | 111 63.0 M 28.4 NA NA Tc
12 | 112 60.0 M 22.86 NA NA Pr
13 | 113 58.0 M 28.41 NA NA Lr
14 | 114 77.0 M 23.12 NA NA Pr
15 | 115 0.58 M NA 7.14 64.0 Ar
16 | 116 56.0 M 28.58 NA NA Pl
17 | 117 68.0 M 24.4 NA NA Tc
18 | 118 81.0 M 36.76 NA NA Pr
19 | 119 2.0 F NA 15.2 94.0 Ar
20 | 120 78.0 M 35.14 NA NA Ar
21 | 121 13.0 F NA 65.0 170.0 Tc
22 | 122 66.0 M 33.0 NA NA Ar
23 | 123 5.0 M NA 25.0 125.0 Al
24 | 124 65.0 M 29.07 NA NA Al
25 | 125 14.0 M NA 62.0 170.0 Tc
26 | 126 1.0 F NA 10.18 80.0 Al
27 | 127 2.0 M NA 12.6 98.0 Ar
28 | 128 65.0 F 24.3 NA NA Tc
29 | 129 6.0 M NA 23.0 119.0 Ar
30 | 130 85.0 F 17.1 NA NA Pl
31 | 131 3.0 M NA 14.0 97.0 Al
32 | 132 71.0 M 34.0 NA NA Lr
33 | 133 68.0 M 27.4 NA NA Pl
34 | 134 61.0 M 32.0 NA NA Al
35 | 135 70.0 M 21.0 NA NA Ar
36 | 136 5.0 M NA 16.2 110.0 Ar
37 | 137 4.0 M NA 18.0 104.0 Ar
38 | 138 56.0 F 21.6 NA NA Ll
39 | 139 61.0 M 28.68 NA NA Lr
40 | 140 79.0 F 23.0 NA NA Tc
41 | 141 66.0 M 22.4 NA NA Pr
42 | 142 78.0 M 26.1 NA NA Pl
43 | 143 0.25 F NA 8.24 68.0 Al
44 | 144 3.0 M NA 16.7 100.0 Al
45 | 145 69.0 M 23.4 NA NA Lr
46 | 146 67.0 M 28.0 NA NA Pl
47 | 147 77.0 M 25.7 NA NA Ar
48 | 148 4.0 M NA 33.0 110.0 Al
49 | 149 0.67 M NA 9.5 70.0 Lr
50 | 150 0.67 F NA 8.12 74.0 Al
51 | 151 75.0 M 28.4 NA NA Ll
52 | 152 16.0 M NA 70.0 183.0 Al
53 | 153 3.0 M NA 16.7 103.0 Al
54 | 154 65.0 M 28.1 NA NA Ll
55 | 155 69.0 M 26.0 NA NA Al
56 | 156 80.0 M 22.9 NA NA Al
57 | 157 62.0 M 53.5 NA NA Pr
58 | 158 63.0 M 16.5 NA NA Lr
59 | 159 0.83 F NA 11.0 80.0 Al
60 | 160 74.0 M 27.4 NA NA Tc
61 | 161 2.0 F NA 12.0 85.0 Pl
62 | 162 67.0 F 24.9 NA NA Ll
63 | 163 62.0 M 28.3 NA NA Pl
64 | 164 1.0 M NA 13.0 NA Ll
65 | 165 2.0 F NA 12.7 97.0 Pr
66 | 166 71.0 M 25.06 NA NA Ar
67 | 167 1.0 F NA 11.5 86.4 Pr
68 | 168 19.0 F 17.35 NA NA Al
69 | 169 50.0 F 28.81 NA NA Ll
70 | 170 79.0 M 22.6 NA NA Pr
71 | 171 9.0 M NA 32.0 133.0 Al
72 | 172 73.0 M 29.3 NA NA Lr
73 | 173 3.0 M NA 17.3 NA Al
74 | 174 68.0 M 26.4 NA NA Al
75 | 175 63.0 M 28.34 NA NA Pl
76 | 176 65.0 M 30.1 NA NA Pl
77 | 177 56.0 M 22.1 NA NA Pr
78 | 178 58.0 M 30.1 NA NA Tc
79 | 179 10.0 F NA 15.0 104.0 Al
80 | 180 93.0 M 29.03 NA NA Lr
81 | 181 65.0 M 26.4 NA NA Ar
82 | 182 11.0 M NA 33.0 136.0 Tc
83 | 183 14.0 F NA NA NA Tc
84 | 184 2.0 F NA 15.0 100.0 Ar
85 | 185 75.0 M 27.7 NA NA Pl
86 | 186 71.0 M 30.0 NA NA Al
87 | 187 0.5 F NA 8.26 71.0 Ll
88 | 188 3.0 M NA 16.0 100.0 Al
89 | 189 75.0 F 26.2 NA NA Lr
90 | 190 3.0 F NA NA NA Tc
91 | 191 74.0 F 36.0 NA NA Pr
92 | 192 69.0 M 28.0 NA NA Ar
93 | 193 77.0 M 26.3 NA NA Al
94 | 194 2.0 M NA 12.8 86.0 Pr
95 | 195 67.0 M 29.41 NA NA Al
96 | 196 21.0 F 25.5 NA NA Pr
97 | 197 16.0 F NA NA NA Al
98 | 198 71.0 M 18.6 NA NA Pr
99 | 199 71.0 M 20.0 NA NA Ll
100 | 200 72.0 F 27.8 NA NA Ar
101 | 201 73.0 F 28.52 NA NA Al
102 | 202 2.0 M NA 11.84 87.0 Ar
103 | 203 57.0 F 24.0 NA NA Pr
104 | 204 66.0 M 29.76 NA NA Ll
105 | 205 45.0 M 20.1 NA NA Al
106 | 206 3.0 M NA 13.0 92.0 Pl
107 | 207 63.0 F 29.6 NA NA Pl
108 | 208 5.0 F NA 24.1 117.0 Ll
109 | 209 14.0 F NA 80.0 183.0 Tc
110 | 210 1.0 F NA 12.96 76.0 Al
111 | 211 70.0 F 31.1 NA NA Ar
112 | 212 83.0 M 23.0 NA NA Tc
113 | 213 58.0 F 24.7 NA NA Al
114 | 214 5.0 M NA 30.0 118.0 Ar
115 | 215 56.0 F 25.35 NA NA Ar
116 | 216 1.0 M NA 10.25 78.0 Pl
117 | 217 12.0 F NA NA NA Tc
118 | 218 75.0 M 26.29 NA NA Al
119 | 219 81.0 M 26.0 NA NA Ar
120 | 220 66.0 M 35.4 NA NA Al
121 | 221 74.0 F 29.0 NA NA Pl
122 | 222 60.0 M NA NA NA Ar
123 | 223 NA NA NA NA NA Ll
124 | 224 10.0 F NA 32.3 143.0 Tc
125 | 225 0.83 M NA 7.8 74.0 Pl
126 | 226 4.0 M NA 16.7 103.0 Ll
127 |
--------------------------------------------------------------------------------
/data/icbhi_dataset/official_split.txt:
--------------------------------------------------------------------------------
1 | 101_1b1_Al_sc_Meditron test
2 | 101_1b1_Pr_sc_Meditron test
3 | 102_1b1_Ar_sc_Meditron test
4 | 103_2b2_Ar_mc_LittC2SE train
5 | 104_1b1_Al_sc_Litt3200 test
6 | 104_1b1_Ar_sc_Litt3200 test
7 | 104_1b1_Ll_sc_Litt3200 test
8 | 104_1b1_Lr_sc_Litt3200 test
9 | 104_1b1_Pl_sc_Litt3200 test
10 | 104_1b1_Pr_sc_Litt3200 test
11 | 105_1b1_Tc_sc_Meditron train
12 | 106_2b1_Pl_mc_LittC2SE train
13 | 106_2b1_Pr_mc_LittC2SE train
14 | 107_2b3_Al_mc_AKGC417L train
15 | 107_2b3_Ar_mc_AKGC417L train
16 | 107_2b3_Ll_mc_AKGC417L train
17 | 107_2b3_Lr_mc_AKGC417L train
18 | 107_2b3_Pl_mc_AKGC417L train
19 | 107_2b3_Pr_mc_AKGC417L train
20 | 107_2b3_Tc_mc_AKGC417L train
21 | 107_2b4_Al_mc_AKGC417L train
22 | 107_2b4_Ar_mc_AKGC417L train
23 | 107_2b4_Ll_mc_AKGC417L train
24 | 107_2b4_Lr_mc_AKGC417L train
25 | 107_2b4_Pl_mc_AKGC417L train
26 | 107_2b4_Pr_mc_AKGC417L train
27 | 107_2b4_Tc_mc_AKGC417L train
28 | 107_2b5_Al_mc_AKGC417L train
29 | 107_2b5_Ar_mc_AKGC417L train
30 | 107_2b5_Ll_mc_AKGC417L train
31 | 107_2b5_Lr_mc_AKGC417L train
32 | 107_2b5_Pl_mc_AKGC417L train
33 | 107_2b5_Pr_mc_AKGC417L train
34 | 107_2b5_Tc_mc_AKGC417L train
35 | 107_3p2_Al_mc_AKGC417L train
36 | 107_3p2_Ar_mc_AKGC417L train
37 | 107_3p2_Ll_mc_AKGC417L train
38 | 107_3p2_Lr_mc_AKGC417L train
39 | 107_3p2_Pl_mc_AKGC417L train
40 | 107_3p2_Pr_mc_AKGC417L train
41 | 107_3p2_Tc_mc_AKGC417L train
42 | 108_1b1_Al_sc_Meditron train
43 | 109_1b1_Al_sc_Litt3200 test
44 | 109_1b1_Ar_sc_Litt3200 test
45 | 109_1b1_Ll_sc_Litt3200 test
46 | 109_1b1_Lr_sc_Litt3200 test
47 | 109_1b1_Pl_sc_Litt3200 test
48 | 109_1b1_Pr_sc_Litt3200 test
49 | 110_1b1_Pr_sc_Meditron train
50 | 110_1p1_Al_sc_Meditron train
51 | 110_1p1_Ll_sc_Meditron train
52 | 110_1p1_Lr_sc_Meditron train
53 | 110_1p1_Pr_sc_Meditron train
54 | 111_1b2_Tc_sc_Meditron train
55 | 111_1b3_Tc_sc_Meditron train
56 | 112_1b1_Ar_sc_Meditron train
57 | 112_1b1_Lr_sc_Meditron train
58 | 112_1p1_Ll_sc_Litt3200 train
59 | 112_1p1_Pl_sc_Litt3200 train
60 | 112_1p1_Pr_sc_Litt3200 train
61 | 113_1b1_Al_sc_Litt3200 test
62 | 113_1b1_Ar_sc_Litt3200 test
63 | 113_1b1_Ll_sc_Litt3200 test
64 | 113_1b1_Lr_sc_Litt3200 test
65 | 113_1b1_Pl_sc_Litt3200 test
66 | 113_1b1_Pr_sc_Litt3200 test
67 | 114_1b4_Al_mc_AKGC417L train
68 | 114_1b4_Ar_mc_AKGC417L train
69 | 114_1b4_Lr_mc_AKGC417L train
70 | 114_1b4_Pl_mc_AKGC417L train
71 | 114_1b4_Pr_mc_AKGC417L train
72 | 115_1b1_Ar_sc_Meditron train
73 | 116_1b2_Pl_sc_Meditron train
74 | 116_1b2_Tc_sc_Meditron train
75 | 117_1b2_Tc_mc_LittC2SE train
76 | 117_1b3_Tc_mc_LittC2SE train
77 | 118_1b1_Al_sc_Litt3200 test
78 | 118_1b1_Ar_sc_Litt3200 test
79 | 118_1b1_Ll_sc_Litt3200 test
80 | 118_1b1_Lr_sc_Litt3200 test
81 | 118_1b1_Pl_sc_Litt3200 test
82 | 118_1b1_Pr_sc_Litt3200 test
83 | 119_1b1_Ar_sc_Meditron test
84 | 120_1b1_Al_sc_Meditron train
85 | 120_1b1_Ar_sc_Meditron train
86 | 120_1b1_Lr_sc_Meditron train
87 | 120_1b1_Pl_sc_Meditron train
88 | 120_1b1_Pr_sc_Meditron train
89 | 121_1b1_Tc_sc_Meditron train
90 | 121_1p1_Tc_sc_Meditron train
91 | 122_2b1_Al_mc_LittC2SE train
92 | 122_2b1_Ar_mc_LittC2SE train
93 | 122_2b1_Tc_mc_LittC2SE train
94 | 122_2b2_Al_mc_LittC2SE train
95 | 122_2b2_Ar_mc_LittC2SE train
96 | 122_2b2_Tc_mc_LittC2SE train
97 | 122_2b3_Al_mc_LittC2SE train
98 | 122_2b3_Ar_mc_LittC2SE train
99 | 122_2b3_Tc_mc_LittC2SE train
100 | 123_1b1_Al_sc_Meditron test
101 | 124_1b1_Al_sc_Litt3200 test
102 | 124_1b1_Ar_sc_Litt3200 test
103 | 124_1b1_Ll_sc_Litt3200 test
104 | 124_1b1_Lr_sc_Litt3200 test
105 | 124_1b1_Pl_sc_Litt3200 test
106 | 124_1b1_Pr_sc_Litt3200 test
107 | 125_1b1_Tc_sc_Meditron train
108 | 126_1b1_Al_sc_Meditron test
109 | 127_1b1_Ar_sc_Meditron train
110 | 128_1b3_Tc_mc_LittC2SE train
111 | 129_1b1_Ar_sc_Meditron train
112 | 130_1p2_Al_mc_AKGC417L train
113 | 130_1p2_Ar_mc_AKGC417L train
114 | 130_1p2_Ll_mc_AKGC417L train
115 | 130_1p2_Lr_mc_AKGC417L train
116 | 130_1p2_Pl_mc_AKGC417L train
117 | 130_1p2_Pr_mc_AKGC417L train
118 | 130_1p2_Tc_mc_AKGC417L train
119 | 130_1p3_Al_mc_AKGC417L train
120 | 130_1p3_Ar_mc_AKGC417L train
121 | 130_1p3_Ll_mc_AKGC417L train
122 | 130_1p3_Lr_mc_AKGC417L train
123 | 130_1p3_Pl_mc_AKGC417L train
124 | 130_1p3_Pr_mc_AKGC417L train
125 | 130_1p3_Tc_mc_AKGC417L train
126 | 130_1p4_Al_mc_AKGC417L train
127 | 130_1p4_Ar_mc_AKGC417L train
128 | 130_1p4_Ll_mc_AKGC417L train
129 | 130_1p4_Lr_mc_AKGC417L train
130 | 130_1p4_Pl_mc_AKGC417L train
131 | 130_1p4_Pr_mc_AKGC417L train
132 | 130_1p4_Tc_mc_AKGC417L train
133 | 130_2b2_Al_mc_AKGC417L train
134 | 130_2b2_Ar_mc_AKGC417L train
135 | 130_2b2_Ll_mc_AKGC417L train
136 | 130_2b2_Lr_mc_AKGC417L train
137 | 130_2b2_Pl_mc_AKGC417L train
138 | 130_2b2_Pr_mc_AKGC417L train
139 | 130_2b2_Tc_mc_AKGC417L train
140 | 130_2b3_Al_mc_AKGC417L train
141 | 130_2b3_Ar_mc_AKGC417L train
142 | 130_2b3_Ll_mc_AKGC417L train
143 | 130_2b3_Lr_mc_AKGC417L train
144 | 130_2b3_Pl_mc_AKGC417L train
145 | 130_2b3_Pr_mc_AKGC417L train
146 | 130_2b3_Tc_mc_AKGC417L train
147 | 130_2b4_Al_mc_AKGC417L train
148 | 130_2b4_Ar_mc_AKGC417L train
149 | 130_2b4_Ll_mc_AKGC417L train
150 | 130_2b4_Lr_mc_AKGC417L train
151 | 130_2b4_Pl_mc_AKGC417L train
152 | 130_2p3_Pl_mc_AKGC417L train
153 | 130_2p5_Al_mc_AKGC417L train
154 | 130_2p5_Ar_mc_AKGC417L train
155 | 130_2p5_Lr_mc_AKGC417L train
156 | 130_2p5_Pl_mc_AKGC417L train
157 | 130_2p5_Pr_mc_AKGC417L train
158 | 130_2p5_Tc_mc_AKGC417L train
159 | 130_3b3_Ll_mc_AKGC417L train
160 | 130_3b4_Al_mc_AKGC417L train
161 | 130_3b4_Ar_mc_AKGC417L train
162 | 130_3b4_Lr_mc_AKGC417L train
163 | 130_3b4_Pl_mc_AKGC417L train
164 | 130_3b4_Pr_mc_AKGC417L train
165 | 130_3p2_Al_mc_AKGC417L train
166 | 130_3p2_Ar_mc_AKGC417L train
167 | 130_3p2_Pl_mc_AKGC417L train
168 | 130_3p2_Pr_mc_AKGC417L train
169 | 130_3p2_Tc_mc_AKGC417L train
170 | 130_3p3_Al_mc_AKGC417L train
171 | 130_3p3_Pl_mc_AKGC417L train
172 | 130_3p3_Pr_mc_AKGC417L train
173 | 130_3p3_Tc_mc_AKGC417L train
174 | 130_3p4_Al_mc_AKGC417L train
175 | 130_3p4_Pl_mc_AKGC417L train
176 | 130_3p4_Pr_mc_AKGC417L train
177 | 130_3p4_Tc_mc_AKGC417L train
178 | 131_1b1_Al_sc_Meditron train
179 | 132_2b1_Lr_mc_LittC2SE train
180 | 132_2b2_Lr_mc_LittC2SE train
181 | 133_2p2_Al_mc_AKGC417L test
182 | 133_2p2_Ar_mc_AKGC417L test
183 | 133_2p2_Pl_mc_AKGC417L test
184 | 133_2p2_Tc_mc_AKGC417L test
185 | 133_2p3_Al_mc_AKGC417L test
186 | 133_2p3_Ar_mc_AKGC417L test
187 | 133_2p3_Pl_mc_AKGC417L test
188 | 133_2p3_Pr_mc_AKGC417L test
189 | 133_2p3_Tc_mc_AKGC417L test
190 | 133_2p4_Al_mc_AKGC417L test
191 | 133_2p4_Ar_mc_AKGC417L test
192 | 133_2p4_Pl_mc_AKGC417L test
193 | 133_2p4_Pr_mc_AKGC417L test
194 | 133_2p4_Tc_mc_AKGC417L test
195 | 133_3p2_Al_mc_AKGC417L test
196 | 133_3p2_Ar_mc_AKGC417L test
197 | 133_3p2_Pl_mc_AKGC417L test
198 | 133_3p2_Pr_mc_AKGC417L test
199 | 133_3p4_Tc_mc_AKGC417L test
200 | 134_2b1_Al_mc_LittC2SE train
201 | 134_2b1_Ar_mc_LittC2SE train
202 | 134_2b2_Al_mc_LittC2SE train
203 | 134_2b2_Ar_mc_LittC2SE train
204 | 134_2b3_Ar_mc_LittC2SE train
205 | 135_2b1_Al_mc_LittC2SE train
206 | 135_2b1_Ar_mc_LittC2SE train
207 | 135_2b1_Pl_mc_LittC2SE train
208 | 135_2b1_Tc_mc_LittC2SE train
209 | 135_2b2_Al_mc_LittC2SE train
210 | 135_2b2_Ar_mc_LittC2SE train
211 | 135_2b2_Pl_mc_LittC2SE train
212 | 135_2b2_Tc_mc_LittC2SE train
213 | 135_2b3_Al_mc_LittC2SE train
214 | 135_2b3_Ar_mc_LittC2SE train
215 | 135_2b3_Pl_mc_LittC2SE train
216 | 135_2b3_Pr_mc_LittC2SE train
217 | 135_2b3_Tc_mc_LittC2SE train
218 | 136_1b1_Ar_sc_Meditron test
219 | 137_1b1_Ar_sc_Meditron train
220 | 137_1b1_Ll_sc_Meditron train
221 | 138_1p2_Al_mc_AKGC417L train
222 | 138_1p2_Ar_mc_AKGC417L train
223 | 138_1p2_Ll_mc_AKGC417L train
224 | 138_1p2_Lr_mc_AKGC417L train
225 | 138_1p2_Pl_mc_AKGC417L train
226 | 138_1p2_Pr_mc_AKGC417L train
227 | 138_1p2_Tc_mc_AKGC417L train
228 | 138_1p3_Al_mc_AKGC417L train
229 | 138_1p3_Ar_mc_AKGC417L train
230 | 138_1p3_Ll_mc_AKGC417L train
231 | 138_1p3_Lr_mc_AKGC417L train
232 | 138_1p3_Pl_mc_AKGC417L train
233 | 138_1p3_Pr_mc_AKGC417L train
234 | 138_1p3_Tc_mc_AKGC417L train
235 | 138_1p4_Ar_mc_AKGC417L train
236 | 138_1p4_Ll_mc_AKGC417L train
237 | 138_1p4_Lr_mc_AKGC417L train
238 | 138_1p4_Pl_mc_AKGC417L train
239 | 138_1p4_Pr_mc_AKGC417L train
240 | 138_1p4_Tc_mc_AKGC417L train
241 | 138_2p2_Al_mc_AKGC417L train
242 | 138_2p2_Ar_mc_AKGC417L train
243 | 138_2p2_Ll_mc_AKGC417L train
244 | 138_2p2_Lr_mc_AKGC417L train
245 | 138_2p2_Pl_mc_AKGC417L train
246 | 138_2p2_Pr_mc_AKGC417L train
247 | 138_2p2_Tc_mc_AKGC417L train
248 | 139_1b1_Al_sc_Litt3200 test
249 | 139_1b1_Ar_sc_Litt3200 test
250 | 139_1b1_Ll_sc_Litt3200 test
251 | 139_1b1_Lr_sc_Litt3200 test
252 | 139_1b1_Pl_sc_Litt3200 test
253 | 139_1b1_Pr_sc_Litt3200 test
254 | 140_2b2_Ll_mc_LittC2SE train
255 | 140_2b2_Tc_mc_LittC2SE train
256 | 140_2b3_Ll_mc_LittC2SE train
257 | 140_2b3_Tc_mc_LittC2SE train
258 | 141_1b1_Pr_mc_LittC2SE train
259 | 141_1b2_Ar_mc_LittC2SE train
260 | 141_1b2_Lr_mc_LittC2SE train
261 | 141_1b2_Pr_mc_LittC2SE train
262 | 141_1b2_Tc_mc_LittC2SE train
263 | 141_1b3_Al_mc_LittC2SE train
264 | 141_1b3_Ar_mc_LittC2SE train
265 | 141_1b3_Pr_mc_LittC2SE train
266 | 142_1b1_Pl_mc_LittC2SE train
267 | 143_1b1_Al_sc_Meditron test
268 | 144_1b1_Al_sc_Meditron test
269 | 144_1b1_Tc_sc_Meditron test
270 | 145_2b2_Al_mc_AKGC417L test
271 | 145_2b2_Ar_mc_AKGC417L test
272 | 145_2b2_Lr_mc_AKGC417L test
273 | 145_2b2_Pr_mc_AKGC417L test
274 | 145_3b2_Ar_mc_AKGC417L test
275 | 145_3b2_Lr_mc_AKGC417L test
276 | 145_3b4_Pl_mc_AKGC417L test
277 | 146_2b2_Pl_mc_AKGC417L test
278 | 146_2b4_Al_mc_AKGC417L test
279 | 146_2b4_Ar_mc_AKGC417L test
280 | 146_2b4_Ll_mc_AKGC417L test
281 | 146_2b4_Lr_mc_AKGC417L test
282 | 146_2b4_Pr_mc_AKGC417L test
283 | 146_8p3_Al_mc_AKGC417L test
284 | 146_8p3_Ar_mc_AKGC417L test
285 | 146_8p3_Lr_mc_AKGC417L test
286 | 146_8p3_Pl_mc_AKGC417L test
287 | 146_8p3_Pr_mc_AKGC417L test
288 | 147_1b2_Tc_mc_AKGC417L test
289 | 147_1b3_Tc_mc_AKGC417L test
290 | 147_1b4_Tc_mc_AKGC417L test
291 | 147_2b2_Al_mc_AKGC417L test
292 | 147_2b2_Ar_mc_AKGC417L test
293 | 147_2b2_Pl_mc_AKGC417L test
294 | 147_2b3_Al_mc_AKGC417L test
295 | 147_2b3_Ar_mc_AKGC417L test
296 | 147_2b3_Ll_mc_AKGC417L test
297 | 147_2b3_Lr_mc_AKGC417L test
298 | 147_2b3_Pl_mc_AKGC417L test
299 | 147_2b4_Al_mc_AKGC417L test
300 | 147_2b4_Ar_mc_AKGC417L test
301 | 147_2b4_Ll_mc_AKGC417L test
302 | 147_2b4_Lr_mc_AKGC417L test
303 | 147_2b4_Pl_mc_AKGC417L test
304 | 148_1b1_Al_sc_Meditron train
305 | 149_1b1_Al_sc_Meditron test
306 | 149_1b1_Lr_sc_Meditron test
307 | 149_1b1_Pl_sc_Meditron test
308 | 150_1b2_Al_sc_Meditron test
309 | 151_2p2_Al_mc_AKGC417L test
310 | 151_2p2_Ar_mc_AKGC417L test
311 | 151_2p2_Ll_mc_AKGC417L test
312 | 151_2p2_Lr_mc_AKGC417L test
313 | 151_2p2_Pl_mc_AKGC417L test
314 | 151_2p2_Pr_mc_AKGC417L test
315 | 151_2p2_Tc_mc_AKGC417L test
316 | 151_2p3_Al_mc_AKGC417L test
317 | 151_2p3_Ar_mc_AKGC417L test
318 | 151_2p3_Ll_mc_AKGC417L test
319 | 151_2p3_Lr_mc_AKGC417L test
320 | 151_2p3_Pl_mc_AKGC417L test
321 | 151_2p3_Pr_mc_AKGC417L test
322 | 151_2p3_Tc_mc_AKGC417L test
323 | 151_2p4_Al_mc_AKGC417L test
324 | 151_2p4_Ar_mc_AKGC417L test
325 | 151_2p4_Ll_mc_AKGC417L test
326 | 151_2p4_Lr_mc_AKGC417L test
327 | 151_2p4_Pl_mc_AKGC417L test
328 | 151_2p4_Pr_mc_AKGC417L test
329 | 151_2p4_Tc_mc_AKGC417L test
330 | 151_3p2_Al_mc_AKGC417L test
331 | 151_3p2_Ar_mc_AKGC417L test
332 | 151_3p2_Lr_mc_AKGC417L test
333 | 151_3p2_Pl_mc_AKGC417L test
334 | 151_3p2_Pr_mc_AKGC417L test
335 | 151_3p2_Tc_mc_AKGC417L test
336 | 151_3p3_Ll_mc_AKGC417L test
337 | 152_1b1_Al_sc_Meditron train
338 | 153_1b1_Al_sc_Meditron train
339 | 154_1b3_Al_mc_AKGC417L train
340 | 154_1b3_Ar_mc_AKGC417L train
341 | 154_1b3_Ll_mc_AKGC417L train
342 | 154_1b3_Lr_mc_AKGC417L train
343 | 154_1b3_Pl_mc_AKGC417L train
344 | 154_1b3_Pr_mc_AKGC417L train
345 | 154_1b3_Tc_mc_AKGC417L train
346 | 154_2b4_Al_mc_AKGC417L train
347 | 154_2b4_Ar_mc_AKGC417L train
348 | 154_2b4_Ll_mc_AKGC417L train
349 | 154_2b4_Lr_mc_AKGC417L train
350 | 154_2b4_Pl_mc_AKGC417L train
351 | 154_2b4_Pr_mc_AKGC417L train
352 | 154_2b4_Tc_mc_AKGC417L train
353 | 154_3b3_Al_mc_AKGC417L train
354 | 154_3b3_Ar_mc_AKGC417L train
355 | 154_3b3_Ll_mc_AKGC417L train
356 | 154_4b4_Al_mc_AKGC417L train
357 | 154_4b4_Ar_mc_AKGC417L train
358 | 154_4b4_Ll_mc_AKGC417L train
359 | 154_4b4_Lr_mc_AKGC417L train
360 | 154_4b4_Pl_mc_AKGC417L train
361 | 154_4b4_Pr_mc_AKGC417L train
362 | 155_2b1_Al_mc_LittC2SE train
363 | 156_2b3_Al_mc_AKGC417L test
364 | 156_2b3_Ar_mc_AKGC417L train
365 | 156_2b3_Ll_mc_AKGC417L train
366 | 156_2b3_Lr_mc_AKGC417L test
367 | 156_2b3_Pl_mc_AKGC417L test
368 | 156_2b3_Pr_mc_AKGC417L train
369 | 156_5b3_Al_mc_AKGC417L train
370 | 156_5b3_Ar_mc_AKGC417L test
371 | 156_5b3_Ll_mc_AKGC417L test
372 | 156_5b3_Lr_mc_AKGC417L train
373 | 156_5b3_Pl_mc_AKGC417L test
374 | 156_5b3_Pr_mc_AKGC417L train
375 | 156_8b3_Al_mc_AKGC417L test
376 | 156_8b3_Ar_mc_AKGC417L train
377 | 156_8b3_Ll_mc_AKGC417L train
378 | 156_8b3_Lr_mc_AKGC417L test
379 | 156_8b3_Pl_mc_AKGC417L train
380 | 157_1b1_Al_sc_Meditron train
381 | 157_1b1_Ar_sc_Meditron train
382 | 157_1b1_Lr_sc_Meditron train
383 | 157_1b1_Pl_sc_Meditron train
384 | 157_1b1_Pr_sc_Meditron train
385 | 158_1b3_Ar_mc_LittC2SE train
386 | 158_1p2_Al_mc_AKGC417L train
387 | 158_1p2_Ar_mc_AKGC417L train
388 | 158_1p2_Ll_mc_AKGC417L train
389 | 158_1p2_Lr_mc_AKGC417L train
390 | 158_1p2_Pl_mc_AKGC417L train
391 | 158_1p2_Pr_mc_AKGC417L train
392 | 158_1p2_Tc_mc_AKGC417L train
393 | 158_1p3_Al_mc_AKGC417L train
394 | 158_1p3_Ar_mc_AKGC417L train
395 | 158_1p3_Ll_mc_AKGC417L train
396 | 158_1p3_Lr_mc_AKGC417L train
397 | 158_1p3_Pl_mc_AKGC417L train
398 | 158_1p3_Pr_mc_AKGC417L train
399 | 158_1p3_Tc_mc_AKGC417L train
400 | 158_1p4_Al_mc_AKGC417L train
401 | 158_1p4_Ar_mc_AKGC417L train
402 | 158_1p4_Lr_mc_AKGC417L train
403 | 158_1p4_Pl_mc_AKGC417L train
404 | 158_1p4_Pr_mc_AKGC417L train
405 | 158_1p4_Tc_mc_AKGC417L train
406 | 158_2p2_Ar_mc_AKGC417L train
407 | 158_2p3_Lr_mc_AKGC417L train
408 | 158_2p3_Tc_mc_AKGC417L train
409 | 159_1b1_Al_sc_Meditron train
410 | 159_1b1_Ar_sc_Meditron train
411 | 159_1b1_Ll_sc_Meditron train
412 | 159_1b1_Pr_sc_Meditron train
413 | 160_1b2_Al_mc_AKGC417L test
414 | 160_1b2_Ar_mc_AKGC417L test
415 | 160_1b2_Lr_mc_AKGC417L test
416 | 160_1b2_Pl_mc_AKGC417L test
417 | 160_1b2_Pr_mc_AKGC417L test
418 | 160_1b2_Tc_mc_AKGC417L test
419 | 160_1b3_Al_mc_AKGC417L test
420 | 160_1b3_Ar_mc_AKGC417L test
421 | 160_1b3_Lr_mc_AKGC417L test
422 | 160_1b3_Pl_mc_AKGC417L test
423 | 160_1b3_Pr_mc_AKGC417L test
424 | 160_1b3_Tc_mc_AKGC417L test
425 | 160_1b4_Al_mc_AKGC417L test
426 | 160_1b4_Ar_mc_AKGC417L test
427 | 160_1b4_Lr_mc_AKGC417L test
428 | 160_1b4_Pl_mc_AKGC417L test
429 | 160_1b4_Pr_mc_AKGC417L test
430 | 160_1b4_Tc_mc_AKGC417L test
431 | 160_2b3_Lr_mc_AKGC417L test
432 | 160_2b4_Ar_mc_AKGC417L test
433 | 160_2b4_Pl_mc_AKGC417L test
434 | 160_2b4_Pr_mc_AKGC417L test
435 | 160_2b4_Tc_mc_AKGC417L test
436 | 161_1b1_Al_sc_Meditron train
437 | 161_1b1_Pl_sc_Meditron train
438 | 162_1b2_Al_mc_AKGC417L train
439 | 162_1b2_Ar_mc_AKGC417L train
440 | 162_1b2_Ll_mc_AKGC417L train
441 | 162_1b2_Lr_mc_AKGC417L train
442 | 162_1b2_Pl_mc_AKGC417L train
443 | 162_1b2_Pr_mc_AKGC417L train
444 | 162_1b2_Tc_mc_AKGC417L train
445 | 162_2b2_Al_mc_AKGC417L train
446 | 162_2b2_Ar_mc_AKGC417L train
447 | 162_2b2_Pl_mc_AKGC417L train
448 | 162_2b2_Pr_mc_AKGC417L train
449 | 162_2b2_Tc_mc_AKGC417L train
450 | 162_2b3_Al_mc_AKGC417L train
451 | 162_2b3_Ar_mc_AKGC417L train
452 | 162_2b3_Lr_mc_AKGC417L train
453 | 162_2b3_Pl_mc_AKGC417L train
454 | 162_2b3_Pr_mc_AKGC417L train
455 | 162_2b3_Tc_mc_AKGC417L train
456 | 162_2b4_Al_mc_AKGC417L train
457 | 162_2b4_Ar_mc_AKGC417L train
458 | 162_2b4_Lr_mc_AKGC417L train
459 | 162_2b4_Pl_mc_AKGC417L train
460 | 162_2b4_Pr_mc_AKGC417L train
461 | 162_2b4_Tc_mc_AKGC417L train
462 | 163_2b2_Al_mc_AKGC417L train
463 | 163_2b2_Ar_mc_AKGC417L train
464 | 163_2b2_Ll_mc_AKGC417L train
465 | 163_2b2_Lr_mc_AKGC417L train
466 | 163_2b2_Pl_mc_AKGC417L train
467 | 163_2b2_Pr_mc_AKGC417L train
468 | 163_2b2_Tc_mc_AKGC417L train
469 | 163_8b3_Al_mc_AKGC417L train
470 | 163_8b3_Ar_mc_AKGC417L train
471 | 163_8b3_Ll_mc_AKGC417L train
472 | 163_8b3_Lr_mc_AKGC417L train
473 | 163_8b3_Pl_mc_AKGC417L train
474 | 163_8b3_Pr_mc_AKGC417L train
475 | 164_1b1_Ll_sc_Meditron train
476 | 165_1b1_Ar_sc_Meditron test
477 | 165_1b1_Pl_sc_Meditron test
478 | 165_1b1_Pr_sc_Meditron test
479 | 166_1p1_Al_sc_Meditron train
480 | 166_1p1_Ar_sc_Meditron train
481 | 166_1p1_Ll_sc_Meditron train
482 | 166_1p1_Pl_sc_Meditron train
483 | 166_1p1_Pr_sc_Meditron train
484 | 167_1b1_Al_sc_Meditron train
485 | 167_1b1_Pr_sc_Meditron train
486 | 168_1b1_Al_sc_Meditron train
487 | 169_1b1_Lr_sc_Meditron test
488 | 169_1b2_Ll_sc_Meditron test
489 | 170_1b2_Al_mc_AKGC417L test
490 | 170_1b2_Ar_mc_AKGC417L test
491 | 170_1b2_Lr_mc_AKGC417L test
492 | 170_1b2_Pl_mc_AKGC417L test
493 | 170_1b2_Pr_mc_AKGC417L test
494 | 170_1b2_Tc_mc_AKGC417L test
495 | 170_1b3_Al_mc_AKGC417L test
496 | 170_1b3_Ar_mc_AKGC417L test
497 | 170_1b3_Ll_mc_AKGC417L test
498 | 170_1b3_Lr_mc_AKGC417L test
499 | 170_1b3_Pl_mc_AKGC417L test
500 | 170_1b3_Pr_mc_AKGC417L test
501 | 170_1b3_Tc_mc_AKGC417L test
502 | 170_1b4_Al_mc_AKGC417L test
503 | 170_1b4_Ar_mc_AKGC417L test
504 | 170_1b4_Lr_mc_AKGC417L test
505 | 170_1b4_Pl_mc_AKGC417L test
506 | 170_1b4_Pr_mc_AKGC417L test
507 | 170_1b4_Tc_mc_AKGC417L test
508 | 170_2b2_Al_mc_AKGC417L test
509 | 170_2b2_Ar_mc_AKGC417L test
510 | 170_2b2_Lr_mc_AKGC417L test
511 | 170_2b2_Pl_mc_AKGC417L test
512 | 170_2b2_Pr_mc_AKGC417L test
513 | 170_2b2_Tc_mc_AKGC417L test
514 | 171_1b1_Al_sc_Meditron test
515 | 172_1b3_Al_mc_AKGC417L train
516 | 172_1b3_Ar_mc_AKGC417L train
517 | 172_1b3_Ll_mc_AKGC417L train
518 | 172_1b3_Lr_mc_AKGC417L train
519 | 172_1b3_Pl_mc_AKGC417L train
520 | 172_1b3_Pr_mc_AKGC417L train
521 | 172_1b3_Tc_mc_AKGC417L train
522 | 172_1b4_Al_mc_AKGC417L train
523 | 172_1b4_Ar_mc_AKGC417L train
524 | 172_1b4_Ll_mc_AKGC417L train
525 | 172_1b4_Lr_mc_AKGC417L train
526 | 172_1b4_Pl_mc_AKGC417L train
527 | 172_1b4_Pr_mc_AKGC417L train
528 | 172_1b4_Tc_mc_AKGC417L train
529 | 172_1b5_Al_mc_AKGC417L train
530 | 172_1b5_Ar_mc_AKGC417L train
531 | 172_1b5_Ll_mc_AKGC417L train
532 | 172_1b5_Lr_mc_AKGC417L train
533 | 172_1b5_Pl_mc_AKGC417L train
534 | 172_1b5_Pr_mc_AKGC417L train
535 | 172_1b5_Tc_mc_AKGC417L train
536 | 172_2b5_Al_mc_AKGC417L train
537 | 172_2b5_Ar_mc_AKGC417L train
538 | 172_2b5_Lr_mc_AKGC417L train
539 | 172_2b5_Pl_mc_AKGC417L train
540 | 172_2b5_Pr_mc_AKGC417L train
541 | 172_2b5_Tc_mc_AKGC417L train
542 | 173_1b1_Al_sc_Meditron test
543 | 174_1p2_Ar_mc_AKGC417L test
544 | 174_1p2_Ll_mc_AKGC417L test
545 | 174_1p2_Lr_mc_AKGC417L test
546 | 174_1p2_Pl_mc_AKGC417L test
547 | 174_1p2_Pr_mc_AKGC417L test
548 | 174_1p2_Tc_mc_AKGC417L test
549 | 174_1p3_Ar_mc_AKGC417L test
550 | 174_1p3_Ll_mc_AKGC417L test
551 | 174_1p3_Lr_mc_AKGC417L test
552 | 174_1p3_Pl_mc_AKGC417L test
553 | 174_1p3_Pr_mc_AKGC417L test
554 | 174_1p3_Tc_mc_AKGC417L test
555 | 174_1p4_Ar_mc_AKGC417L test
556 | 174_1p4_Ll_mc_AKGC417L test
557 | 174_1p4_Lr_mc_AKGC417L test
558 | 174_1p4_Pl_mc_AKGC417L test
559 | 174_1p4_Pr_mc_AKGC417L test
560 | 174_1p4_Tc_mc_AKGC417L test
561 | 174_2p3_Al_mc_AKGC417L test
562 | 174_2p3_Ar_mc_AKGC417L test
563 | 174_2p3_Pl_mc_AKGC417L test
564 | 174_2p3_Pr_mc_AKGC417L test
565 | 174_2p3_Tc_mc_AKGC417L test
566 | 175_1b1_Al_sc_Litt3200 test
567 | 175_1b1_Ar_sc_Litt3200 test
568 | 175_1b1_Ll_sc_Litt3200 test
569 | 175_1b1_Lr_sc_Litt3200 test
570 | 175_1b1_Pl_sc_Litt3200 test
571 | 175_1b1_Pr_sc_Litt3200 test
572 | 176_1b3_Al_mc_AKGC417L test
573 | 176_1b3_Ar_mc_AKGC417L test
574 | 176_1b3_Ll_mc_AKGC417L test
575 | 176_1b3_Lr_mc_AKGC417L test
576 | 176_1b3_Pl_mc_AKGC417L test
577 | 176_1b3_Pr_mc_AKGC417L test
578 | 176_1b3_Tc_mc_AKGC417L test
579 | 176_1b4_Al_mc_AKGC417L test
580 | 176_1b4_Ar_mc_AKGC417L test
581 | 176_1b4_Ll_mc_AKGC417L test
582 | 176_1b4_Lr_mc_AKGC417L test
583 | 176_1b4_Pl_mc_AKGC417L test
584 | 176_1b4_Pr_mc_AKGC417L test
585 | 176_1b4_Tc_mc_AKGC417L test
586 | 176_2b3_Al_mc_AKGC417L test
587 | 176_2b3_Ar_mc_AKGC417L test
588 | 176_2b3_Ll_mc_AKGC417L test
589 | 176_2b3_Lr_mc_AKGC417L test
590 | 176_2b3_Pl_mc_AKGC417L test
591 | 176_2b3_Pr_mc_AKGC417L test
592 | 176_2b3_Tc_mc_AKGC417L test
593 | 177_1b2_Al_mc_AKGC417L test
594 | 177_1b2_Ar_mc_AKGC417L test
595 | 177_1b2_Lr_mc_AKGC417L test
596 | 177_1b2_Pl_mc_AKGC417L test
597 | 177_1b2_Pr_mc_AKGC417L test
598 | 177_1b2_Tc_mc_AKGC417L test
599 | 177_1b4_Al_mc_AKGC417L test
600 | 177_1b4_Ar_mc_AKGC417L test
601 | 177_1b4_Lr_mc_AKGC417L test
602 | 177_1b4_Pl_mc_AKGC417L test
603 | 177_1b4_Pr_mc_AKGC417L test
604 | 177_1b4_Tc_mc_AKGC417L test
605 | 177_2b4_Al_mc_AKGC417L test
606 | 177_2b4_Lr_mc_AKGC417L test
607 | 177_2b4_Pl_mc_AKGC417L test
608 | 177_2b4_Pr_mc_AKGC417L test
609 | 177_2b4_Tc_mc_AKGC417L test
610 | 178_1b2_Al_mc_AKGC417L test
611 | 178_1b2_Ar_mc_AKGC417L test
612 | 178_1b2_Lr_mc_AKGC417L test
613 | 178_1b2_Pl_mc_AKGC417L test
614 | 178_1b2_Pr_mc_AKGC417L test
615 | 178_1b2_Tc_mc_AKGC417L test
616 | 178_1b3_Al_mc_AKGC417L test
617 | 178_1b3_Ar_mc_AKGC417L test
618 | 178_1b3_Lr_mc_AKGC417L test
619 | 178_1b3_Pl_mc_AKGC417L test
620 | 178_1b3_Pr_mc_AKGC417L test
621 | 178_1b3_Tc_mc_AKGC417L test
622 | 178_1b6_Al_mc_AKGC417L test
623 | 178_1b6_Ar_mc_AKGC417L test
624 | 178_1b6_Ll_mc_AKGC417L test
625 | 178_1b6_Lr_mc_AKGC417L test
626 | 178_1b6_Pl_mc_AKGC417L test
627 | 178_1b6_Pr_mc_AKGC417L test
628 | 178_1b6_Tc_mc_AKGC417L test
629 | 178_2b2_Al_mc_AKGC417L test
630 | 178_2b2_Ar_mc_AKGC417L test
631 | 178_2b2_Lr_mc_AKGC417L test
632 | 178_2b2_Pr_mc_AKGC417L test
633 | 178_2b2_Tc_mc_AKGC417L test
634 | 179_1b1_Al_sc_Meditron train
635 | 179_1b1_Tc_sc_Meditron train
636 | 180_1b4_Al_mc_AKGC417L train
637 | 180_1b4_Ar_mc_AKGC417L train
638 | 180_1b4_Lr_mc_AKGC417L train
639 | 180_1b4_Pl_mc_AKGC417L train
640 | 180_1b4_Pr_mc_AKGC417L train
641 | 181_1b1_Ar_mc_LittC2SE train
642 | 181_1b1_Tc_mc_LittC2SE train
643 | 181_1b2_Ar_mc_LittC2SE train
644 | 181_1b3_Tc_mc_LittC2SE train
645 | 182_1b1_Tc_sc_Meditron test
646 | 183_1b1_Pl_sc_Meditron train
647 | 183_1b1_Tc_sc_Meditron train
648 | 184_1b1_Ar_sc_Meditron train
649 | 185_1b1_Al_sc_Litt3200 test
650 | 185_1b1_Ar_sc_Litt3200 test
651 | 185_1b1_Ll_sc_Litt3200 test
652 | 185_1b1_Lr_sc_Litt3200 test
653 | 185_1b1_Pl_sc_Litt3200 test
654 | 185_1b1_Pr_sc_Litt3200 test
655 | 186_2b2_Al_mc_AKGC417L train
656 | 186_2b2_Ar_mc_AKGC417L train
657 | 186_2b2_Lr_mc_AKGC417L train
658 | 186_2b2_Pl_mc_AKGC417L train
659 | 186_2b2_Pr_mc_AKGC417L train
660 | 186_2b2_Tc_mc_AKGC417L train
661 | 186_2b3_Al_mc_AKGC417L train
662 | 186_2b3_Ar_mc_AKGC417L train
663 | 186_2b3_Lr_mc_AKGC417L train
664 | 186_2b3_Pl_mc_AKGC417L train
665 | 186_2b3_Pr_mc_AKGC417L train
666 | 186_2b3_Tc_mc_AKGC417L train
667 | 186_2b4_Al_mc_AKGC417L train
668 | 186_2b4_Ar_mc_AKGC417L train
669 | 186_2b4_Lr_mc_AKGC417L train
670 | 186_2b4_Pl_mc_AKGC417L train
671 | 186_2b4_Pr_mc_AKGC417L train
672 | 186_2b4_Tc_mc_AKGC417L train
673 | 186_3b3_Al_mc_AKGC417L train
674 | 186_3b3_Ar_mc_AKGC417L train
675 | 186_3b3_Lr_mc_AKGC417L train
676 | 186_3b3_Pl_mc_AKGC417L train
677 | 186_3b3_Pr_mc_AKGC417L train
678 | 186_3b3_Tc_mc_AKGC417L train
679 | 187_1b1_Ll_sc_Meditron test
680 | 188_1b1_Al_sc_Meditron train
681 | 188_1b1_Ar_sc_Meditron train
682 | 188_1b1_Pl_sc_Meditron train
683 | 188_1b1_Tc_sc_Meditron train
684 | 189_1b2_Lr_mc_LittC2SE train
685 | 190_1b1_Tc_sc_Meditron train
686 | 191_2b1_Pl_mc_LittC2SE train
687 | 191_2b1_Pr_mc_LittC2SE train
688 | 191_2b2_Tc_mc_LittC2SE train
689 | 192_2b1_Al_mc_LittC2SE train
690 | 192_2b1_Ar_mc_LittC2SE train
691 | 192_2b2_Al_mc_LittC2SE train
692 | 192_2b2_Ar_mc_LittC2SE train
693 | 192_2b3_Al_mc_LittC2SE train
694 | 192_2b3_Ar_mc_LittC2SE train
695 | 193_1b2_Al_mc_AKGC417L train
696 | 193_1b2_Ar_mc_AKGC417L train
697 | 193_1b2_Ll_mc_AKGC417L train
698 | 193_1b2_Pl_mc_AKGC417L train
699 | 193_1b2_Pr_mc_AKGC417L train
700 | 193_1b2_Tc_mc_AKGC417L train
701 | 193_1b4_Lr_mc_AKGC417L train
702 | 193_7b3_Al_mc_AKGC417L train
703 | 193_7b3_Ar_mc_AKGC417L train
704 | 193_7b3_Ll_mc_AKGC417L train
705 | 193_7b3_Lr_mc_AKGC417L train
706 | 193_7b3_Pl_mc_AKGC417L train
707 | 193_7b3_Pr_mc_AKGC417L train
708 | 193_7b3_Tc_mc_AKGC417L train
709 | 194_1b1_Lr_sc_Meditron test
710 | 194_1b1_Pr_sc_Meditron test
711 | 195_1b1_Al_sc_Litt3200 test
712 | 195_1b1_Ar_sc_Litt3200 test
713 | 195_1b1_Ll_sc_Litt3200 test
714 | 195_1b1_Lr_sc_Litt3200 test
715 | 195_1b1_Pl_sc_Litt3200 test
716 | 195_1b1_Pr_sc_Litt3200 test
717 | 196_1b1_Pr_sc_Meditron train
718 | 197_1b1_Al_sc_Meditron train
719 | 197_1b1_Tc_sc_Meditron train
720 | 198_1b5_Al_mc_AKGC417L test
721 | 198_1b5_Ar_mc_AKGC417L test
722 | 198_1b5_Ll_mc_AKGC417L test
723 | 198_1b5_Lr_mc_AKGC417L test
724 | 198_1b5_Pl_mc_AKGC417L test
725 | 198_1b5_Pr_mc_AKGC417L test
726 | 198_1b5_Tc_mc_AKGC417L test
727 | 198_6p1_Al_mc_AKGC417L test
728 | 198_6p1_Ar_mc_AKGC417L test
729 | 198_6p1_Ll_mc_AKGC417L test
730 | 198_6p1_Lr_mc_AKGC417L test
731 | 198_6p1_Pl_mc_AKGC417L test
732 | 198_6p1_Pr_mc_AKGC417L test
733 | 198_6p1_Tc_mc_AKGC417L test
734 | 199_2b1_Ll_mc_LittC2SE train
735 | 199_2b3_Ll_mc_LittC2SE train
736 | 200_2p2_Al_mc_AKGC417L train
737 | 200_2p2_Ar_mc_AKGC417L train
738 | 200_2p2_Lr_mc_AKGC417L train
739 | 200_2p2_Pl_mc_AKGC417L train
740 | 200_2p2_Pr_mc_AKGC417L train
741 | 200_2p2_Tc_mc_AKGC417L train
742 | 200_2p3_Al_mc_AKGC417L train
743 | 200_2p3_Ar_mc_AKGC417L train
744 | 200_2p3_Lr_mc_AKGC417L train
745 | 200_2p3_Pl_mc_AKGC417L train
746 | 200_2p3_Pr_mc_AKGC417L train
747 | 200_2p3_Tc_mc_AKGC417L train
748 | 200_2p4_Al_mc_AKGC417L train
749 | 200_2p4_Ar_mc_AKGC417L train
750 | 200_2p4_Lr_mc_AKGC417L train
751 | 200_2p4_Pl_mc_AKGC417L train
752 | 200_2p4_Pr_mc_AKGC417L train
753 | 200_2p4_Tc_mc_AKGC417L train
754 | 200_3p4_Al_mc_AKGC417L train
755 | 200_3p4_Ar_mc_AKGC417L train
756 | 200_3p4_Pl_mc_AKGC417L train
757 | 200_3p4_Pr_mc_AKGC417L train
758 | 200_3p4_Tc_mc_AKGC417L train
759 | 201_1b1_Al_sc_Meditron train
760 | 201_1b1_Ar_sc_Meditron train
761 | 201_1b2_Al_sc_Meditron train
762 | 201_1b2_Ar_sc_Meditron train
763 | 201_1b3_Al_sc_Meditron train
764 | 201_1b3_Ar_sc_Meditron train
765 | 202_1b1_Ar_sc_Meditron test
766 | 203_1p2_Al_mc_AKGC417L train
767 | 203_1p2_Ar_mc_AKGC417L train
768 | 203_1p2_Lr_mc_AKGC417L train
769 | 203_1p2_Pl_mc_AKGC417L train
770 | 203_1p2_Pr_mc_AKGC417L train
771 | 203_1p2_Tc_mc_AKGC417L train
772 | 203_1p3_Al_mc_AKGC417L train
773 | 203_1p3_Ar_mc_AKGC417L train
774 | 203_1p3_Pl_mc_AKGC417L train
775 | 203_1p3_Pr_mc_AKGC417L train
776 | 203_1p3_Tc_mc_AKGC417L train
777 | 203_1p4_Al_mc_AKGC417L train
778 | 203_1p4_Ar_mc_AKGC417L train
779 | 203_1p4_Pl_mc_AKGC417L train
780 | 203_1p4_Pr_mc_AKGC417L train
781 | 203_1p4_Tc_mc_AKGC417L train
782 | 203_2p3_Al_mc_AKGC417L train
783 | 203_2p3_Ar_mc_AKGC417L train
784 | 203_2p3_Pl_mc_AKGC417L train
785 | 203_2p3_Pr_mc_AKGC417L train
786 | 203_2p3_Tc_mc_AKGC417L train
787 | 204_2b5_Al_mc_AKGC417L train
788 | 204_2b5_Ar_mc_AKGC417L train
789 | 204_2b5_Ll_mc_AKGC417L train
790 | 204_7p5_Al_mc_AKGC417L train
791 | 204_7p5_Ar_mc_AKGC417L train
792 | 204_7p5_Ll_mc_AKGC417L train
793 | 204_7p5_Lr_mc_AKGC417L train
794 | 204_7p5_Pr_mc_AKGC417L train
795 | 204_7p5_Tc_mc_AKGC417L train
796 | 205_1b3_Al_mc_AKGC417L test
797 | 205_1b3_Ar_mc_AKGC417L test
798 | 205_1b3_Ll_mc_AKGC417L test
799 | 205_1b3_Lr_mc_AKGC417L test
800 | 205_1b3_Pl_mc_AKGC417L test
801 | 205_1b3_Pr_mc_AKGC417L test
802 | 205_2b2_Pr_mc_AKGC417L test
803 | 205_2b3_Al_mc_AKGC417L test
804 | 205_2b3_Ar_mc_AKGC417L test
805 | 205_2b3_Ll_mc_AKGC417L test
806 | 205_2b4_Pl_mc_AKGC417L test
807 | 205_3b4_Al_mc_AKGC417L test
808 | 205_3b4_Ar_mc_AKGC417L test
809 | 205_3b4_Pl_mc_AKGC417L test
810 | 205_3b4_Pr_mc_AKGC417L test
811 | 205_4b2_Al_mc_AKGC417L test
812 | 205_4b2_Ar_mc_AKGC417L test
813 | 205_4b2_Lr_mc_AKGC417L test
814 | 205_4b2_Pl_mc_AKGC417L test
815 | 205_4b2_Pr_mc_AKGC417L test
816 | 206_1b1_Ar_sc_Meditron train
817 | 206_1b1_Lr_sc_Meditron train
818 | 206_1b1_Pl_sc_Meditron train
819 | 207_2b2_Al_mc_AKGC417L test
820 | 207_2b2_Ar_mc_AKGC417L test
821 | 207_2b2_Pl_mc_AKGC417L test
822 | 207_2b2_Pr_mc_AKGC417L test
823 | 207_2b2_Tc_mc_AKGC417L test
824 | 207_2b3_Al_mc_AKGC417L test
825 | 207_2b3_Ar_mc_AKGC417L test
826 | 207_2b3_Pl_mc_AKGC417L test
827 | 207_2b3_Pr_mc_AKGC417L test
828 | 207_2b3_Tc_mc_AKGC417L test
829 | 207_2b4_Al_mc_AKGC417L test
830 | 207_2b4_Ar_mc_AKGC417L test
831 | 207_2b4_Pl_mc_AKGC417L test
832 | 207_2b4_Pr_mc_AKGC417L test
833 | 207_2b4_Tc_mc_AKGC417L test
834 | 207_3b2_Al_mc_AKGC417L test
835 | 207_3b2_Ar_mc_AKGC417L test
836 | 207_3b2_Lr_mc_AKGC417L test
837 | 207_3b2_Pl_mc_AKGC417L test
838 | 207_3b2_Pr_mc_AKGC417L test
839 | 207_3b2_Tc_mc_AKGC417L test
840 | 208_1b1_Ll_sc_Meditron train
841 | 209_1b1_Tc_sc_Meditron train
842 | 210_1b1_Al_sc_Meditron train
843 | 210_1b1_Ar_sc_Meditron train
844 | 211_1p2_Ar_mc_AKGC417L test
845 | 211_1p2_Pl_mc_AKGC417L test
846 | 211_1p2_Pr_mc_AKGC417L test
847 | 211_1p3_Ar_mc_AKGC417L test
848 | 211_1p5_Ar_mc_AKGC417L test
849 | 211_2p2_Tc_mc_AKGC417L test
850 | 211_2p3_Tc_mc_AKGC417L test
851 | 211_2p4_Tc_mc_AKGC417L test
852 | 212_2b2_Tc_mc_LittC2SE train
853 | 213_1p2_Al_mc_AKGC417L train
854 | 213_1p2_Ar_mc_AKGC417L train
855 | 213_1p2_Lr_mc_AKGC417L train
856 | 213_1p2_Pl_mc_AKGC417L train
857 | 213_1p2_Pr_mc_AKGC417L train
858 | 213_1p2_Tc_mc_AKGC417L train
859 | 213_1p3_Al_mc_AKGC417L train
860 | 213_1p3_Ar_mc_AKGC417L train
861 | 213_1p3_Pl_mc_AKGC417L train
862 | 213_1p3_Pr_mc_AKGC417L train
863 | 213_1p5_Al_mc_AKGC417L train
864 | 213_1p5_Ar_mc_AKGC417L train
865 | 213_1p5_Pl_mc_AKGC417L train
866 | 213_1p5_Pr_mc_AKGC417L train
867 | 213_1p5_Tc_mc_AKGC417L train
868 | 213_2p2_Al_mc_AKGC417L train
869 | 213_2p2_Ar_mc_AKGC417L train
870 | 213_2p2_Pl_mc_AKGC417L train
871 | 213_2p2_Pr_mc_AKGC417L train
872 | 213_2p2_Tc_mc_AKGC417L train
873 | 214_1b1_Ar_sc_Meditron test
874 | 215_1b2_Ar_sc_Meditron train
875 | 215_1b3_Tc_sc_Meditron train
876 | 216_1b1_Al_sc_Meditron test
877 | 216_1b1_Pl_sc_Meditron test
878 | 217_1b1_Tc_sc_Meditron train
879 | 218_1b1_Al_sc_Meditron test
880 | 218_1b1_Ar_sc_Meditron train
881 | 218_1b1_Lr_sc_Meditron train
882 | 218_1b1_Pl_sc_Meditron test
883 | 218_1b1_Pr_sc_Meditron test
884 | 218_1p1_Ar_sc_Litt3200 train
885 | 218_1p1_Pl_sc_Litt3200 test
886 | 218_1p1_Pr_sc_Litt3200 train
887 | 219_2b1_Ar_mc_LittC2SE train
888 | 219_2b1_Tc_mc_LittC2SE train
889 | 219_2b2_Ar_mc_LittC2SE train
890 | 219_2b2_Tc_mc_LittC2SE train
891 | 219_2b3_Tc_mc_LittC2SE train
892 | 220_1b1_Tc_mc_LittC2SE train
893 | 220_1b2_Al_mc_LittC2SE train
894 | 221_2b1_Al_mc_LittC2SE train
895 | 221_2b1_Ar_mc_LittC2SE train
896 | 221_2b1_Lr_mc_LittC2SE train
897 | 221_2b1_Pl_mc_LittC2SE train
898 | 221_2b2_Al_mc_LittC2SE train
899 | 221_2b2_Ar_mc_LittC2SE train
900 | 221_2b2_Lr_mc_LittC2SE train
901 | 221_2b2_Pl_mc_LittC2SE train
902 | 221_2b3_Al_mc_LittC2SE train
903 | 221_2b3_Ar_mc_LittC2SE train
904 | 221_2b3_Lr_mc_LittC2SE train
905 | 221_2b3_Pr_mc_LittC2SE train
906 | 222_1b1_Ar_sc_Meditron train
907 | 222_1b1_Lr_sc_Meditron train
908 | 222_1b1_Pr_sc_Meditron train
909 | 223_1b1_Pr_sc_Meditron test
910 | 223_1b1_Pl_sc_Meditron test
911 | 223_1b1_Ar_sc_Meditron test
912 | 223_1b1_Al_sc_Meditron test
913 | 223_1b1_Lr_sc_Meditron test
914 | 223_1b1_Ll_sc_Meditron test
915 | 224_1b1_Tc_sc_Meditron test
916 | 224_1b2_Al_sc_Meditron test
917 | 225_1b1_Pl_sc_Meditron test
918 | 226_1b1_Al_sc_Meditron train
919 | 226_1b1_Ll_sc_Meditron train
920 | 226_1b1_Pl_sc_LittC2SE train
--------------------------------------------------------------------------------
/data/icbhi_dataset/patient_diagnosis.txt:
--------------------------------------------------------------------------------
1 | 101 URTI
2 | 102 Healthy
3 | 103 Asthma
4 | 104 COPD
5 | 105 URTI
6 | 106 COPD
7 | 107 COPD
8 | 108 LRTI
9 | 109 COPD
10 | 110 COPD
11 | 111 Bronchiectasis
12 | 112 COPD
13 | 113 COPD
14 | 114 COPD
15 | 115 LRTI
16 | 116 Bronchiectasis
17 | 117 COPD
18 | 118 COPD
19 | 119 URTI
20 | 120 COPD
21 | 121 Healthy
22 | 122 Pneumonia
23 | 123 Healthy
24 | 124 COPD
25 | 125 Healthy
26 | 126 Healthy
27 | 127 Healthy
28 | 128 COPD
29 | 129 URTI
30 | 130 COPD
31 | 131 URTI
32 | 132 COPD
33 | 133 COPD
34 | 134 COPD
35 | 135 Pneumonia
36 | 136 Healthy
37 | 137 URTI
38 | 138 COPD
39 | 139 COPD
40 | 140 Pneumonia
41 | 141 COPD
42 | 142 COPD
43 | 143 Healthy
44 | 144 Healthy
45 | 145 COPD
46 | 146 COPD
47 | 147 COPD
48 | 148 URTI
49 | 149 Bronchiolitis
50 | 150 URTI
51 | 151 COPD
52 | 152 Healthy
53 | 153 Healthy
54 | 154 COPD
55 | 155 COPD
56 | 156 COPD
57 | 157 COPD
58 | 158 COPD
59 | 159 Healthy
60 | 160 COPD
61 | 161 Bronchiolitis
62 | 162 COPD
63 | 163 COPD
64 | 164 URTI
65 | 165 URTI
66 | 166 COPD
67 | 167 Bronchiolitis
68 | 168 Bronchiectasis
69 | 169 Bronchiectasis
70 | 170 COPD
71 | 171 Healthy
72 | 172 COPD
73 | 173 Bronchiolitis
74 | 174 COPD
75 | 175 COPD
76 | 176 COPD
77 | 177 COPD
78 | 178 COPD
79 | 179 Healthy
80 | 180 COPD
81 | 181 COPD
82 | 182 Healthy
83 | 183 Healthy
84 | 184 Healthy
85 | 185 COPD
86 | 186 COPD
87 | 187 Healthy
88 | 188 URTI
89 | 189 COPD
90 | 190 URTI
91 | 191 Pneumonia
92 | 192 COPD
93 | 193 COPD
94 | 194 Healthy
95 | 195 COPD
96 | 196 Bronchiectasis
97 | 197 URTI
98 | 198 COPD
99 | 199 COPD
100 | 200 COPD
101 | 201 Bronchiectasis
102 | 202 Healthy
103 | 203 COPD
104 | 204 COPD
105 | 205 COPD
106 | 206 Bronchiolitis
107 | 207 COPD
108 | 208 Healthy
109 | 209 Healthy
110 | 210 URTI
111 | 211 COPD
112 | 212 COPD
113 | 213 COPD
114 | 214 Healthy
115 | 215 Bronchiectasis
116 | 216 Bronchiolitis
117 | 217 Healthy
118 | 218 COPD
119 | 219 Pneumonia
120 | 220 COPD
121 | 221 COPD
122 | 222 COPD
123 | 223 COPD
124 | 224 Healthy
125 | 225 Healthy
126 | 226 Pneumonia
--------------------------------------------------------------------------------
/data/icbhi_dataset/patient_list_foldwise.txt:
--------------------------------------------------------------------------------
1 | 158 0
2 | 193 0
3 | 177 0
4 | 170 0
5 | 180 0
6 | 211 0
7 | 147 0
8 | 107 0
9 | 162 0
10 | 156 0
11 | 146 0
12 | 200 0
13 | 138 0
14 | 160 0
15 | 203 0
16 | 204 0
17 | 172 0
18 | 207 0
19 | 163 0
20 | 205 0
21 | 213 0
22 | 114 0
23 | 130 0
24 | 154 0
25 | 186 0
26 | 184 0
27 | 153 1
28 | 115 1
29 | 224 1
30 | 223 1
31 | 201 1
32 | 218 1
33 | 127 1
34 | 137 1
35 | 215 1
36 | 161 1
37 | 206 1
38 | 101 1
39 | 168 1
40 | 131 1
41 | 216 1
42 | 120 1
43 | 188 1
44 | 167 1
45 | 210 1
46 | 197 1
47 | 183 1
48 | 152 1
49 | 173 1
50 | 108 1
51 | 208 1
52 | 105 2
53 | 110 2
54 | 116 2
55 | 196 2
56 | 182 2
57 | 222 2
58 | 166 2
59 | 209 2
60 | 144 2
61 | 111 2
62 | 165 2
63 | 148 2
64 | 164 2
65 | 159 2
66 | 121 2
67 | 157 2
68 | 217 2
69 | 123 2
70 | 169 2
71 | 179 2
72 | 190 2
73 | 125 2
74 | 129 2
75 | 225 2
76 | 136 2
77 | 118 3
78 | 185 3
79 | 112 3
80 | 124 3
81 | 104 3
82 | 195 3
83 | 175 3
84 | 212 3
85 | 140 3
86 | 219 3
87 | 132 3
88 | 142 3
89 | 220 3
90 | 122 3
91 | 191 3
92 | 128 3
93 | 226 3
94 | 141 3
95 | 103 3
96 | 134 3
97 | 117 3
98 | 192 3
99 | 106 3
100 | 155 3
101 | 199 3
102 | 174 4
103 | 145 4
104 | 151 4
105 | 176 4
106 | 178 4
107 | 133 4
108 | 198 4
109 | 214 4
110 | 149 4
111 | 143 4
112 | 187 4
113 | 202 4
114 | 119 4
115 | 194 4
116 | 126 4
117 | 150 4
118 | 171 4
119 | 102 4
120 | 109 4
121 | 113 4
122 | 139 4
123 | 189 4
124 | 181 4
125 | 221 4
126 | 135 4
127 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | import os
3 | import sys
4 | import json
5 | import warnings
6 | warnings.filterwarnings("ignore")
7 |
8 | import math
9 | import time
10 | import random
11 | import pickle
12 | import argparse
13 | import numpy as np
14 |
15 | import logging
16 | logging.disable(logging.INFO) # disable INFO and DEBUG logging everywhere
17 | # or
18 | logging.disable(logging.WARNING) # disable WARNING, INFO and DEBUG logging everywhere
19 |
20 | import torch
21 | import torch.nn as nn
22 | import torch.optim as optim
23 | import torch.backends.cudnn as cudnn
24 | from torchvision import transforms
25 | from transformers import set_seed
26 |
27 | from util.icbhi_dataset import ICBHIDataset
28 | from util.icbhi_util import get_score
29 | from util.misc import adjust_learning_rate, warmup_learning_rate, set_optimizer, update_moving_average
30 | from util.misc import AverageMeter, accuracy, save_model, update_json
31 |
32 | from transformers import ClapProcessor
33 | Processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused", sampling_rate=48000)
34 |
35 | import os
36 | os.environ["TOKENIZERS_PARALLELISM"] = "false"
37 | #CUBLAS_WORKSPACE_CONFIG=:16:8
38 |
39 | def parse_args():
40 | parser = argparse.ArgumentParser('argument for supervised training')
41 |
42 | parser.add_argument('--seed', type=int, default=0)
43 | parser.add_argument('--print_freq', type=int, default=10)
44 | parser.add_argument('--save_freq', type=int, default=100)
45 | parser.add_argument('--save_dir', type=str, default='./save') ## have to change
46 | parser.add_argument('--tag', type=str, default='',
47 | help='tag for experiment name')
48 | parser.add_argument('--resume', type=str, default=None,
49 | help='path of model checkpoint to resume')
50 | parser.add_argument('--eval', action='store_true',
51 | help='only evaluation with pretrained encoder and classifier')
52 | parser.add_argument('--two_cls_eval', action='store_true',
53 | help='evaluate with two classes')
54 |
55 | # optimization
56 | parser.add_argument('--optimizer', type=str, default='adam')
57 | parser.add_argument('--epochs', type=int, default=50)
58 | parser.add_argument('--learning_rate', type=float, default=5e-5)
59 | parser.add_argument('--lr_decay_epochs', type=str, default='120,160')
60 | parser.add_argument('--lr_decay_rate', type=float, default=0.1)
61 | parser.add_argument('--weight_decay', type=float, default=1e-4)
62 | parser.add_argument('--momentum', type=float, default=0.9)
63 | parser.add_argument('--cosine', action='store_true',
64 | help='using cosine annealing')
65 | parser.add_argument('--warm', action='store_true',
66 | help='warm-up for large batch training')
67 | parser.add_argument('--warm_epochs', type=int, default=0,
68 | help='warmup epochs')
69 | parser.add_argument('--weighted_loss', action='store_true',
70 | help='weighted cross entropy loss (higher weights on abnormal class)')
71 | # dataset
72 | parser.add_argument('--dataset', type=str, default='icbhi')
73 | parser.add_argument('--data_folder', type=str, default='./data/')
74 | parser.add_argument('--batch_size', type=int, default=128)
75 | parser.add_argument('--num_workers', type=int, default=8)
76 | # icbhi dataset
77 | parser.add_argument('--class_split', type=str, default='lungsound',
78 | help='lungsound: (normal, crackles, wheezes, both), diagnosis: (healthy, chronic diseases, non-chronic diseases)')
79 | parser.add_argument('--n_cls', type=int, default=0,
80 | help='set k-way classification problem for class')
81 | parser.add_argument('--test_fold', type=str, default='official', choices=['official', '0', '1', '2', '3', '4'],
82 | help='test fold to use official 60-40 split or 80-20 split from RespireNet')
83 | parser.add_argument('--sample_rate', type=int, default=16000,
84 | help='sampling rate when load audio data, and it denotes the number of samples per one second')
85 | parser.add_argument('--desired_length', type=int, default=8,
86 | help='fixed length size of individual cycle')
87 | parser.add_argument('--pad_types', type=str, default='repeat',
88 | help='zero: zero-padding, repeat: padding with duplicated samples, aug: padding with augmented samples')
89 |
90 | # model
91 | parser.add_argument('--model', type=str, default='ast')
92 | parser.add_argument('--model_type', type=str, default='ClapAudioModelWithProjection', choices=['ClapAudioModelWithProjection', 'ClapModel'])
93 | parser.add_argument('--test_drop_key', action='store_true')
94 | parser.add_argument('--test_drop_key_prob', type=float, default=0.1)
95 | parser.add_argument('--test_unknown_all', action='store_true')
96 | parser.add_argument('--test_bmi', action='store_true')
97 | parser.add_argument('--test_wrong_label', action='store_true')
98 | parser.add_argument('--analysis', action='store_true')
99 | parser.add_argument('--clap_final', type=str, default='concat', choices=['concat', 'add'])
100 |
101 | # Meta for generate descriptions
102 | parser.add_argument('--meta_mode', type=str, default='none',
103 | help='the meta information for selecting', choices=['age', 'sex', 'loc', 'dev', 'age_sex', 'age_loc', 'age_dev',
104 | 'sex_loc', 'sex_dev', 'loc_dev', 'age_sex_loc', 'age_sex_dev', 'age_loc_dev', 'sex_loc_dev', 'all'])
105 |
106 | parser.add_argument('--pretrained', action='store_true')
107 | parser.add_argument('--pretrained_ckpt', type=str, default=None,
108 | help='path to pre-trained encoder model')
109 | parser.add_argument('--ma_update', action='store_true',
110 | help='whether to use moving average update for model')
111 | parser.add_argument('--ma_beta', type=float, default=0,
112 | help='moving average value')
113 | parser.add_argument('--method', type=str, default='ce')
114 |
115 |
116 |
117 | args = parser.parse_args()
118 |
119 | iterations = args.lr_decay_epochs.split(',')
120 | args.lr_decay_epochs = list([])
121 | for it in iterations:
122 | args.lr_decay_epochs.append(int(it))
123 | args.model_name = '{}_{}'.format(args.dataset, args.model) if args.meta_mode == 'none' else '{}_{}_{}'.format(args.dataset, args.model, args.meta_mode)
124 | if args.tag:
125 | args.model_name += '_{}'.format(args.tag)
126 |
127 | args.save_folder = os.path.join(args.save_dir, args.model_name)
128 | os.makedirs(args.save_folder, exist_ok=True)
129 |
130 | if args.warm:
131 | args.warmup_from = args.learning_rate * 0.1
132 | args.warm_epochs = 10
133 | if args.cosine:
134 | eta_min = args.learning_rate * (args.lr_decay_rate ** 3)
135 | args.warmup_to = eta_min + (args.learning_rate - eta_min) * (
136 | 1 + math.cos(math.pi * args.warm_epochs / args.epochs)) / 2
137 | else:
138 | args.warmup_to = args.learning_rate
139 |
140 |
141 | if args.dataset == 'icbhi':
142 | if args.class_split == 'lungsound':
143 | if args.n_cls == 4:
144 | args.cls_list = ['normal', 'crackle', 'wheeze', 'both']
145 | elif args.n_cls == 2:
146 | args.cls_list = ['normal', 'abnormal']
147 |
148 | elif args.class_split == 'diagnosis':
149 | if args.n_cls == 3:
150 | args.cls_list = ['healthy', 'chronic_diseases', 'non-chronic_diseases']
151 | elif args.n_cls == 2:
152 | args.cls_list = ['healthy', 'unhealthy']
153 | else:
154 | raise NotImplementedError
155 |
156 | else:
157 | raise NotImplementedError
158 |
159 |
160 | return args
161 |
162 |
163 | def set_loader(args):
164 | if args.dataset == 'icbhi':
165 |
166 | train_dataset = ICBHIDataset(train_flag=True, transform=None, args=args, print_flag=True)
167 | val_dataset = ICBHIDataset(train_flag=False, transform=None, args=args, print_flag=True)
168 |
169 | else:
170 | raise NotImplemented
171 |
172 | train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
173 | num_workers=args.num_workers, pin_memory=True, drop_last=True)
174 | val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
175 | num_workers=args.num_workers, pin_memory=True)
176 |
177 | return train_loader, val_loader, args
178 |
179 |
180 | def set_model(args):
181 | kwargs = {}
182 |
183 | if args.model_type == 'ClapAudioModelWithProjection':
184 | from models.clap import PretrainedCLAPWithProjection
185 | model = PretrainedCLAPWithProjection(args.model, 512)
186 | elif args.model_type == 'ClapModel':
187 | from models.clap import PretrainedCLAP
188 | model = PretrainedCLAP(args.model, 512)
189 |
190 | if args.model_type == 'ClapModel':
191 | classifier = nn.Linear(model.final_feat_dim * 2, args.n_cls)
192 | else:
193 | classifier = nn.Linear(model.final_feat_dim, args.n_cls)
194 |
195 | projector = nn.Identity()
196 |
197 | criterion = nn.CrossEntropyLoss()
198 |
199 | # load SSL pretrained checkpoint for linear evaluation
200 | if args.pretrained and args.pretrained_ckpt is not None:
201 | ckpt = torch.load(args.pretrained_ckpt, map_location='cpu')
202 | state_dict = ckpt['model']
203 |
204 | # HOTFIX: always use dataparallel during SSL pretraining
205 | new_state_dict = {}
206 | for k, v in state_dict.items():
207 | if "module." in k:
208 | k = k.replace("module.", "")
209 | if "backbone." in k:
210 | k = k.replace("backbone.", "")
211 | if not 'mlp_head' in k: #del mlp_head
212 | new_state_dict[k] = v
213 | state_dict = new_state_dict
214 | model.load_state_dict(state_dict, strict=False)
215 |
216 | if ckpt.get('classifier', None) is not None:
217 | classifier.load_state_dict(ckpt['classifier'], strict=True)
218 |
219 | print('pretrained model loaded from: {}'.format(args.pretrained_ckpt))
220 |
221 | criterion = [criterion.cuda()]
222 |
223 | if torch.cuda.device_count() > 1:
224 | model = torch.nn.DataParallel(model)
225 |
226 | model.cuda()
227 | classifier.cuda()
228 | projector.cuda()
229 |
230 | optim_params = list(model.parameters()) + list(classifier.parameters()) + list(projector.parameters())
231 | optimizer = set_optimizer(args, optim_params)
232 |
233 | return model, classifier, projector, criterion, optimizer
234 |
235 | def train(train_loader, model, classifier, projector, criterion, optimizer, epoch, args, scaler=None):
236 | model.train()
237 | classifier.train()
238 | projector.train()
239 |
240 | batch_time = AverageMeter()
241 | data_time = AverageMeter()
242 | losses = AverageMeter()
243 | top1 = AverageMeter()
244 |
245 | end = time.time()
246 | for idx, (images, labels) in enumerate(train_loader):
247 | # data load
248 | data_time.update(time.time() - end)
249 | images = images.cuda(non_blocking=True)
250 | if args.model_type == 'ClapModel':
251 | class_labels = labels[0].cuda(non_blocking=True)
252 | meta_texts = labels[1].cuda(non_blocking=True)
253 | meta_masks = labels[2].cuda(non_blocking=True)
254 | labels = class_labels
255 | else:
256 | labels = labels.cuda(non_blocking=True)
257 | bsz = labels.shape[0]
258 |
259 | if args.ma_update:
260 | # store the previous iter checkpoint
261 | with torch.no_grad():
262 | ma_ckpt = [deepcopy(model.state_dict()), deepcopy(classifier.state_dict()), deepcopy(projector.state_dict())]
263 | alpha = None
264 |
265 | warmup_learning_rate(args, epoch, idx, len(train_loader), optimizer)
266 |
267 | with torch.cuda.amp.autocast():
268 | if args.model_type == 'ClapAudioModelWithProjection':
269 | features = model(images, args=args, alpha=alpha, training=True)
270 | elif args.model_type == 'ClapModel':
271 | features = model((meta_texts, meta_masks, images), args=args, alpha=alpha, training=True)
272 | output = classifier(features)
273 | loss = criterion[0](output, labels)
274 |
275 | losses.update(loss.item(), bsz)
276 | [acc1], _ = accuracy(output[:bsz], labels, topk=(1,))
277 | top1.update(acc1[0], bsz)
278 |
279 | optimizer.zero_grad()
280 | scaler.scale(loss).backward()
281 | scaler.step(optimizer)
282 | scaler.update()
283 |
284 | # measure elapsed time
285 | batch_time.update(time.time() - end)
286 | end = time.time()
287 |
288 | if args.ma_update:
289 | with torch.no_grad():
290 | # exponential moving average update
291 | model = update_moving_average(args.ma_beta, model, ma_ckpt[0])
292 | classifier = update_moving_average(args.ma_beta, classifier, ma_ckpt[1])
293 | projector = update_moving_average(args.ma_beta, projector, ma_ckpt[2])
294 |
295 | # print info
296 | if (idx + 1) % args.print_freq == 0:
297 | print('Train: [{0}][{1}/{2}]\t'
298 | 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
299 | 'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
300 | 'loss {loss.val:.3f} ({loss.avg:.3f})\t'
301 | 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
302 | epoch, idx + 1, len(train_loader), batch_time=batch_time,
303 | data_time=data_time, loss=losses, top1=top1))
304 | sys.stdout.flush()
305 |
306 | return losses.avg, top1.avg
307 |
308 | def validate(val_loader, model, classifier, criterion, args, best_acc, best_model=None):
309 | save_bool = False
310 | model.eval()
311 |
312 | classifier.eval()
313 |
314 | batch_time = AverageMeter()
315 | losses = AverageMeter()
316 | top1 = AverageMeter()
317 | hits, counts = [0.0] * args.n_cls, [0.0] * args.n_cls
318 |
319 | with torch.no_grad():
320 | end = time.time()
321 | for idx, (images, labels) in enumerate(val_loader):
322 |
323 | images = images.cuda(non_blocking=True)
324 | if args.model_type == 'ClapModel':
325 | class_labels = labels[0].cuda(non_blocking=True)
326 | meta_texts = labels[1].cuda(non_blocking=True)
327 | meta_masks = labels[2].cuda(non_blocking=True)
328 | labels = class_labels
329 | else:
330 | labels = labels.cuda(non_blocking=True)
331 |
332 | bsz = labels.shape[0]
333 |
334 | with torch.cuda.amp.autocast():
335 | if args.model in ['laion/clap-htsat-unfused']:
336 | if args.model_type == 'ClapAudioModelWithProjection':
337 | features = model(images, args=args, training=False)
338 | elif args.model_type == 'ClapModel':
339 | features = model((meta_texts, meta_masks, images), args=args, training=False)
340 | else:
341 | features = model(images, args=args, training=False)
342 |
343 | output = classifier(features)
344 | loss = criterion[0](output, labels)
345 |
346 | losses.update(loss.item(), bsz)
347 | [acc1], _ = accuracy(output, labels, topk=(1,))
348 | top1.update(acc1[0], bsz)
349 |
350 | _, preds = torch.max(output, 1)
351 | for idx in range(preds.shape[0]):
352 | counts[labels[idx].item()] += 1.0
353 | if not args.two_cls_eval:
354 | if preds[idx].item() == labels[idx].item():
355 | hits[labels[idx].item()] += 1.0
356 | else: # only when args.n_cls == 4
357 | if labels[idx].item() == 0 and preds[idx].item() == labels[idx].item():
358 | hits[labels[idx].item()] += 1.0
359 | elif labels[idx].item() != 0 and preds[idx].item() > 0: # abnormal
360 | hits[labels[idx].item()] += 1.0
361 |
362 | sp, se, sc = get_score(hits, counts)
363 |
364 | batch_time.update(time.time() - end)
365 | end = time.time()
366 |
367 | if (idx + 1) % args.print_freq == 0:
368 | print('Test: [{0}/{1}]\t'
369 | 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
370 | 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
371 | 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
372 | idx + 1, len(val_loader), batch_time=batch_time,
373 | loss=losses, top1=top1))
374 |
375 | if sc > best_acc[-1] and se > 5:
376 | save_bool = True
377 | best_acc = [sp, se, sc]
378 | best_model = [deepcopy(model.state_dict()), deepcopy(classifier.state_dict())]
379 |
380 | print(' * S_p: {:.2f}, S_e: {:.2f}, Score: {:.2f} (Best S_p: {:.2f}, S_e: {:.2f}, Score: {:.2f})'.format(sp, se, sc, best_acc[0], best_acc[1], best_acc[-1]))
381 | print(' * Acc@1 {top1.avg:.2f}'.format(top1=top1))
382 | print(' * Loss is {loss.avg:.2f} '.format(loss=losses))
383 |
384 | return best_acc, best_model, save_bool
385 |
386 |
387 | def main():
388 | args = parse_args()
389 | with open(os.path.join(args.save_folder, 'train_args.json'), 'w') as f:
390 | json.dump(vars(args), f, indent=4)
391 |
392 | #torch.autograd.set_detect_anomaly(True)
393 |
394 | # fix seed
395 | random.seed(args.seed)
396 | np.random.seed(args.seed)
397 | torch.manual_seed(args.seed)
398 | torch.cuda.manual_seed(args.seed)
399 | cudnn.deterministic = True
400 | cudnn.benchmark = True
401 |
402 | #cudnn.benchmark = False
403 | #torch.use_deterministic_algorithms(True, warn_only=True)
404 | set_seed(args.seed)
405 | #os.environ['PYTHONHASHSEED'] = str(args.seed)
406 |
407 | best_model = None
408 | if args.dataset == 'icbhi':
409 | best_acc = [0, 0, 0] # Specificity, Sensitivity, Score
410 |
411 | train_loader, val_loader, args = set_loader(args)
412 | model, classifier, projector, criterion, optimizer = set_model(args)
413 |
414 | print('# of params', sum(p.numel() for p in model.parameters() if p.requires_grad))
415 |
416 | if args.resume:
417 | if os.path.isfile(args.resume):
418 | print("=> loading checkpoint '{}'".format(args.resume))
419 | checkpoint = torch.load(args.resume)
420 | args.start_epoch = checkpoint['epoch']
421 | model.load_state_dict(checkpoint['model'])
422 | optimizer.load_state_dict(checkpoint['optimizer'])
423 | args.start_epoch += 1
424 | print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
425 | else:
426 | print("=> no checkpoint found at '{}'".format(args.resume))
427 | else:
428 | args.start_epoch = 1
429 |
430 | # use mix_precision:
431 | scaler = torch.cuda.amp.GradScaler()
432 |
433 | print('*' * 20)
434 |
435 | if not args.eval:
436 | print('Experiments {} start'.format(args.model_name))
437 | print('Training for {} epochs on {} dataset'.format(args.epochs, args.dataset))
438 |
439 | for epoch in range(args.start_epoch, args.epochs+1):
440 | adjust_learning_rate(args, optimizer, epoch)
441 |
442 | # train for one epoch
443 | time1 = time.time()
444 | loss, acc = train(train_loader, model, classifier, projector, criterion, optimizer, epoch, args, scaler)
445 | time2 = time.time()
446 | print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(epoch, time2-time1, acc))
447 |
448 | # eval for one epoch
449 | best_acc, best_model, save_bool = validate(val_loader, model, classifier, criterion, args, best_acc, best_model)
450 |
451 | # save a checkpoint of model and classifier when the best score is updated
452 | if save_bool:
453 | save_file = os.path.join(args.save_folder, 'best_epoch_{}.pth'.format(epoch))
454 | print('Best ckpt is modified with Score = {:.2f} when Epoch = {}'.format(best_acc[2], epoch))
455 |
456 | # save a checkpoint of classifier with the best accuracy or score
457 | save_file = os.path.join(args.save_folder, 'best.pth')
458 | model.load_state_dict(best_model[0])
459 | classifier.load_state_dict(best_model[1])
460 | save_model(model, optimizer, args, epoch, save_file, classifier)
461 | else:
462 | print('Testing the pretrained checkpoint on {} dataset'.format(args.dataset))
463 | best_acc, _, _ = validate(val_loader, model, classifier, criterion, args, best_acc)
464 |
465 | print('{} finished'.format(args.model_name))
466 | update_json('%s' % args.model_name, best_acc, path=os.path.join(args.save_dir, 'results.json'))
467 |
468 | if __name__ == '__main__':
469 | main()
470 |
--------------------------------------------------------------------------------
/models/clap.py:
--------------------------------------------------------------------------------
1 | from transformers import ClapModel, ClapAudioModelWithProjection
2 | import torch
3 | import torch.nn as nn
4 | #from transformers import set_seed
5 | #set_seed(1)
6 | class PretrainedCLAPWithProjection(nn.Module):
7 | def __init__(self, pretrained_name, final_feat_dim):
8 | super().__init__()
9 |
10 | self.pretrained = pretrained_name
11 | self.audio_features = ClapAudioModelWithProjection.from_pretrained(pretrained_name)
12 | self.final_feat_dim = final_feat_dim
13 |
14 | def forward(self, x, args=None, alpha=None, training=False):
15 | x = self.audio_features(x)
16 | return x.audio_embeds
17 |
18 |
19 |
20 | class PretrainedCLAP(nn.Module):
21 | def __init__(self, pretrained_name, final_feat_dim):
22 | super().__init__()
23 |
24 | self.pretrained = pretrained_name
25 | self.audio_features = ClapModel.from_pretrained(pretrained_name)
26 | self.final_feat_dim = final_feat_dim
27 |
28 | def forward(self, x, args=None, alpha=None, training=False):
29 | text_inputs, attention_mask, audio_inputs = x
30 | x = self.audio_features(input_ids=text_inputs, attention_mask=attention_mask, input_features=audio_inputs)
31 |
32 | text_embeds = x.text_embeds
33 | audio_embeds = x.audio_embeds
34 | if args.clap_final == 'concat':
35 | return torch.cat((text_embeds, audio_embeds), dim=-1)
36 | elif args.clap_final == 'add':
37 | return (text_embeds * args.te_alpha) + (audio_embeds * (1 - args.te_alpha))
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 | pandas
3 | cmapy
4 | transformers
--------------------------------------------------------------------------------
/scripts/eval_bts.sh:
--------------------------------------------------------------------------------
1 |
2 | MODEL="laion/clap-htsat-unfused"
3 | SEED="1"
4 |
5 | for s in $SEED
6 | do
7 | for m in $MODEL
8 | do
9 | TAG="BTS_eval"
10 | CUDA_VISIBLE_DEVICES=0 python main.py --tag $TAG \
11 | --dataset icbhi \
12 | --seed $s \
13 | --class_split lungsound \
14 | --n_cls 4 \
15 | --epochs 50 \
16 | --batch_size 8 \
17 | --optimizer adam \
18 | --learning_rate 5e-5 \
19 | --weight_decay 1e-6 \
20 | --cosine \
21 | --sample_rate 48000 \
22 | --model $m \
23 | --model_type ClapModel \
24 | --meta_mode all \
25 | --test_fold official \
26 | --pad_types repeat \
27 | --ma_update \
28 | --ma_beta 0.5 \
29 | --method ce \
30 | --print_freq 100 \
31 | --eval \
32 | --pretrained \
33 | --pretrained_ckpt /home2/jw/workspace/mcl/BTS/save/icbhi_laion/clap-htsat-unfused_ce_all_BTS_bs8_lr5e-5_ep50_seed1_meta_all/best.pth
34 |
35 | done
36 | done
37 |
--------------------------------------------------------------------------------
/scripts/icbhi_audio-clap_ce.sh:
--------------------------------------------------------------------------------
1 |
2 | MODEL="laion/clap-htsat-unfused"
3 | SEED="1 2 3 4 5"
4 |
5 | for s in $SEED
6 | do
7 | for m in $MODEL
8 | do
9 | TAG="audio-clap_bs8_lr5e-5_ep50_seed${s}"
10 | CUDA_VISIBLE_DEVICES=0 python main.py --tag $TAG \
11 | --dataset icbhi \
12 | --seed $s \
13 | --class_split lungsound \
14 | --n_cls 4 \
15 | --epochs 50 \
16 | --batch_size 8 \
17 | --optimizer adam \
18 | --learning_rate 5e-5 \
19 | --weight_decay 1e-6 \
20 | --cosine \
21 | --sample_rate 48000 \
22 | --model $m \
23 | --model_type ClapAudioModelWithProjection \
24 | --test_fold official \
25 | --pad_types repeat \
26 | --ma_update \
27 | --ma_beta 0.5 \
28 | --method ce \
29 | --print_freq 100
30 |
31 | # only for evaluation, add the following arguments
32 | # --eval \
33 | # --pretrained \
34 | # --pretrained_ckpt ./save/icbhi_ast_ce_bs8_lr5e-5_ep50_seed1/best.pth
35 |
36 | done
37 | done
38 |
--------------------------------------------------------------------------------
/scripts/icbhi_bts_meta_all.sh:
--------------------------------------------------------------------------------
1 |
2 | MODEL="laion/clap-htsat-unfused"
3 | SEED="1 2 3 4 5"
4 |
5 | for s in $SEED
6 | do
7 | for m in $MODEL
8 | do
9 | TAG="BTS_bs8_lr5e-5_ep50_seed${s}_meta_all"
10 | CUDA_VISIBLE_DEVICES=0 python main.py --tag $TAG \
11 | --dataset icbhi \
12 | --seed $s \
13 | --class_split lungsound \
14 | --n_cls 4 \
15 | --epochs 50 \
16 | --batch_size 8 \
17 | --optimizer adam \
18 | --learning_rate 5e-5 \
19 | --weight_decay 1e-6 \
20 | --cosine \
21 | --sample_rate 48000 \
22 | --model $m \
23 | --model_type ClapModel \
24 | --meta_mode all \
25 | --test_fold official \
26 | --pad_types repeat \
27 | --ma_update \
28 | --ma_beta 0.5 \
29 | --method ce \
30 | --print_freq 100
31 |
32 | # only for evaluation, add the following arguments
33 | # --eval \
34 | # --pretrained \
35 | # --pretrained_ckpt ./save/icbhi_ast_ce_bs8_lr5e-5_ep50_seed1/best.pth
36 |
37 | done
38 | done
39 |
--------------------------------------------------------------------------------
/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kaen2891/bts/9a3d6a3bb3057588da1739e5734fef2c13222e42/util/__init__.py
--------------------------------------------------------------------------------
/util/icbhi_dataset.py:
--------------------------------------------------------------------------------
1 | from curses import meta
2 | import os
3 | import pickle
4 | import random
5 | import numpy as np
6 | import pandas as pd
7 | from tqdm import tqdm
8 | from glob import glob
9 | import random
10 |
11 | import torch
12 | from torch.utils.data import Dataset
13 | from copy import deepcopy
14 |
15 | from .icbhi_util import get_annotations, get_individual_cycles_torchaudio, get_meta_text_descriptions
16 | from transformers import ClapProcessor
17 |
18 | Processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused", sampling_rate=48000)
19 |
20 | class ICBHIDataset(Dataset):
21 | def __init__(self, train_flag, transform, args, print_flag=True, mean_std=False):
22 | data_folder = os.path.join(args.data_folder, 'icbhi_dataset/audio_test_data')
23 | folds_file = os.path.join(args.data_folder, 'icbhi_dataset/patient_list_foldwise.txt')
24 | official_folds_file = os.path.join(args.data_folder, 'icbhi_dataset/official_split.txt')
25 | test_fold = args.test_fold
26 |
27 | self.data_folder = data_folder
28 | self.train_flag = train_flag
29 | self.split = 'train' if train_flag else 'test'
30 | self.transform = transform
31 | self.args = args
32 | self.mean_std = mean_std
33 |
34 | # parameters for spectrograms
35 | self.sample_rate = args.sample_rate
36 |
37 | cache_path = './data/training.pt' if self.train_flag else './data/test.pt'
38 | if not os.path.isfile(cache_path):
39 |
40 |
41 | # ==========================================================================
42 | """ get ICBHI dataset meta information """
43 | # store stethoscope device information for each file or patient
44 | device_id, self.device_to_id = 0, {}
45 | self.device_id_to_patient, self.file_to_device = {}, {}
46 | self.device_to_id = {'Meditron': 0, 'LittC2SE': 1, 'Litt3200': 2, 'AKGC417L': 3}
47 | self.device_id_to_patient = {0: [], 1: [], 2: [], 3: []}
48 |
49 | filenames = sorted(glob(data_folder+'/*')) #--> 1840: 920 wav + 920 txt
50 | filenames = set(f.strip().split('/')[-1].split('.')[0] for f in filenames if '.wav' in f or '.txt' in f)
51 | filenames = sorted(list(set(filenames))) #--> 920
52 |
53 | for f in filenames:
54 | f += '.wav'
55 | # get the total number of devices from original dataset (icbhi dataset has 4 stethoscope devices)
56 | device = f.strip().split('_')[-1].split('.')[0] #-->Meditron / LittC2SE / Litt3200 / AKGC417L
57 | # get the device information for each wav file
58 | self.file_to_device[f.strip().split('.')[0]] = self.device_to_id[device]
59 |
60 | pat_id = f.strip().split('_')[0] # 101
61 | if pat_id not in self.device_id_to_patient[self.device_to_id[device]]:
62 | self.device_id_to_patient[self.device_to_id[device]].append(pat_id) #0: ['101']
63 |
64 | # store all metadata (age, sex, adult_BMI, child_weight, child_height, device_index)
65 | self.file_to_metadata = {}
66 | meta_file = pd.read_csv(os.path.join(args.data_folder, 'icbhi_dataset/metadata.txt'), names=['age', 'sex', 'adult_BMI', 'child_weight', 'child_height', 'chest_location'], delimiter= '\t')
67 |
68 | meta_file['chest_location'].replace({'Tc':0, 'Al':1, 'Ar':2, 'Pl':3, 'Pr':4, 'Ll':5, 'Lr':6}, inplace=True) # Tc --> 0, Al -> 1, ...
69 | for f in filenames:
70 | pat_idx = int(f.strip().split('_')[0])
71 | info = list(meta_file.loc[pat_idx])
72 | info[1] = 0 if info[1] == 'M' else 1 # --> Man:0, Woman:1
73 |
74 | info = np.array(info)
75 | for idx in np.argwhere(np.isnan(info)):
76 | info[idx] = -1
77 |
78 | self.file_to_metadata[f] = torch.tensor(np.append(info, self.file_to_device[f.strip()])) #age, sex, adult_BMI, child_weight, child_height, chest_location, device
79 |
80 |
81 | """ train-test split based on train_flag and test_fold """
82 | if test_fold in ['0', '1', '2', '3', '4']: # from RespireNet, 80-20% split
83 |
84 | patient_dict = {}
85 | all_patients = open(folds_file).read().splitlines()
86 | for line in all_patients:
87 | idx, fold = line.strip().split(' ')
88 | if train_flag and int(fold) != int(test_fold):
89 | patient_dict[idx] = fold
90 | elif train_flag == False and int(fold) == int(test_fold):
91 | patient_dict[idx] = fold
92 |
93 | if print_flag:
94 | print('*' * 20)
95 | print('Train and test 80-20% split with test_fold {}'.format(test_fold))
96 | print('Patience number in {} dataset: {}'.format(self.split, len(patient_dict)))
97 | else:
98 | patient_dict = {}
99 | all_fpath = open(official_folds_file).read().splitlines()
100 |
101 | for line in all_fpath:
102 | fpath, fold = line.strip().split('\t') #--> fpath: '101_1b1_Al_sc_Meditron' / fold --> test
103 | if train_flag and fold == 'train': # using for training set
104 | patient_dict[fpath] = fold
105 | elif not train_flag and fold == 'test': # using for test set
106 | patient_dict[fpath] = fold
107 |
108 | if print_flag:
109 | print('*' * 20)
110 | print('Train and test 60-40% split with test_fold {}'.format(test_fold))
111 | print('File number in {} dataset: {}'.format(self.split, len(patient_dict)))
112 |
113 | annotation_dict = get_annotations(args, data_folder)
114 |
115 | self.filenames = []
116 | for f in filenames:
117 | # for 'official' test_fold, two patient dataset contain both train and test samples
118 | idx = f.split('_')[0] if test_fold in ['0', '1', '2', '3', '4'] else f
119 |
120 | if idx in patient_dict:
121 | self.filenames.append(f)
122 |
123 | self.audio_data = [] # each sample is a tuple with (audio_data, label, metadata)
124 |
125 | if print_flag:
126 | print('*' * 20)
127 | print("Extracting individual breathing cycles..")
128 |
129 | self.cycle_list = []
130 |
131 | audio_image_array = []
132 | label_array = []
133 | meta_str_array = []
134 |
135 | for idx, filename in enumerate(self.filenames):
136 | sample_data = get_individual_cycles_torchaudio(args, annotation_dict[filename], data_folder, filename, self.sample_rate, args.n_cls)
137 | for samples in sample_data:
138 | data1, data2 = samples[0], samples[1] # audio_image, label
139 | audio_image_array.append(data1.squeeze(0).numpy())
140 | label_array.append(data2)
141 | meta_str_array.append(self.file_to_metadata[filename])
142 |
143 |
144 | cycles_with_labels = [(data[0], data[1]) for data in sample_data]
145 | self.cycle_list.extend(cycles_with_labels)
146 |
147 | audio_image_array = np.array(audio_image_array)
148 | for sample in self.cycle_list:
149 | self.audio_data.append(sample)
150 |
151 | self.class_nums = np.zeros(args.n_cls)
152 |
153 | for sample in self.audio_data:
154 | self.class_nums[sample[1]] += 1
155 |
156 | self.class_ratio = self.class_nums / sum(self.class_nums) * 100
157 |
158 | if print_flag:
159 | print('[Preprocessed {} dataset information]'.format(self.split))
160 | print('total number of audio data: {}'.format(len(self.audio_data)))
161 | print('*' * 25)
162 | print('For the Label Distribution')
163 | for i, (n, p) in enumerate(zip(self.class_nums, self.class_ratio)):
164 | print('Class {} {:<9}: {:<4} ({:.1f}%)'.format(i, '('+args.cls_list[i]+')', int(n), p))
165 |
166 | """ convert fbank """
167 | self.audio_images = []
168 | inputs = Processor(audios=audio_image_array, return_tensors="pt")
169 | audio_inputs = inputs["input_features"]
170 | for audio, label, meta in zip(audio_inputs, label_array, meta_str_array):
171 | self.audio_images.append((audio, label, meta))
172 |
173 | if self.train_flag:
174 | torch.save(self.audio_images, './data/training.pt')
175 | else:
176 | torch.save(self.audio_images, './data/test.pt')
177 |
178 | else:
179 | if self.train_flag:
180 | self.audio_images = torch.load('./data/training.pt')
181 | else:
182 | self.audio_images = torch.load('./data/test.pt')
183 |
184 |
185 | def __getitem__(self, index):
186 | audio_image, label, meta_str = self.audio_images[index][0], self.audio_images[index][1], self.audio_images[index][2]
187 | if self.args.model_type in ['ClapModel']:
188 | meta_sentence = get_meta_text_descriptions(meta_str, self.args, training=self.train_flag)
189 | inputs = Processor.tokenizer(text=meta_sentence, return_tensors="pt", padding="max_length", max_length=64)
190 | text_inputs = inputs['input_ids']
191 | attn_masks = inputs['attention_mask']
192 |
193 | if self.transform is not None:
194 | audio_image = self.transform(audio_image)
195 |
196 | if self.args.model_type == 'ClapModel':
197 | return audio_image.squeeze(1), (label, text_inputs.squeeze(0), attn_masks.squeeze(0))
198 | else:
199 | return audio_image.squeeze(1), label
200 |
201 | def __len__(self):
202 | return len(self.audio_images)
--------------------------------------------------------------------------------
/util/icbhi_util.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 | import os
3 | import math
4 | import random
5 | import pandas as pd
6 | import numpy as np
7 | from tqdm import tqdm
8 | from glob import glob
9 |
10 | import torch
11 | import torchaudio
12 | from torchaudio import transforms as T
13 | from .meta_description import generate_meta_description
14 |
15 | __all__ = ['get_annotations', 'get_individual_cycles_torchaudio', 'generate_mel_spectrogram', 'generate_fbank', 'get_score']
16 |
17 |
18 | # ==========================================================================
19 | """ ICBHI dataset information """
20 | def _extract_lungsound_annotation(file_name, data_folder):
21 | tokens = file_name.strip().split('_')
22 | recording_info = pd.DataFrame(data = [tokens], columns = ['Patient Number', 'Recording index', 'Chest location','Acquisition mode','Recording equipment'])
23 | recording_annotations = pd.read_csv(os.path.join(data_folder, file_name + '.txt'), names = ['Start', 'End', 'Crackles', 'Wheezes'], delimiter= '\t')
24 |
25 | return recording_info, recording_annotations
26 |
27 |
28 | def get_annotations(args, data_folder):
29 | if args.class_split == 'lungsound' or args.class_split in ['lungsound_meta', 'meta']: # 4-class
30 | filenames = sorted(glob(data_folder+'/*')) #--> 1840: 920 wav + 920 txt
31 | filenames = set(f.strip().split('/')[-1].split('.')[0] for f in filenames if '.txt' in f)
32 | filenames = sorted(list(set(filenames))) #--> 920
33 |
34 | annotation_dict = {}
35 | for f in filenames:
36 | info, ann = _extract_lungsound_annotation(f, data_folder)
37 | annotation_dict[f] = ann
38 |
39 | elif args.class_split == 'diagnosis':
40 | filenames = sorted(glob(data_folder+'/*')) #--> 1840: 920 wav + 920 txt
41 | filenames = set(f.strip().split('/')[-1].split('.')[0] for f in filenames if '.txt' in f)
42 | filenames = sorted(list(set(filenames))) #--> 920
43 | tmp = pd.read_csv(os.path.join(args.data_folder, 'icbhi_dataset/patient_diagnosis.txt'), names=['Disease'], delimiter='\t')
44 |
45 | annotation_dict = {}
46 | for f in filenames:
47 | info, ann = _extract_lungsound_annotation(f, data_folder)
48 | ann.drop(['Crackles', 'Wheezes'], axis=1, inplace=True)
49 |
50 | disease = tmp.loc[int(f.strip().split('_')[0]), 'Disease']
51 | ann['Disease'] = disease
52 |
53 | annotation_dict[f] = ann
54 |
55 | return annotation_dict
56 |
57 | """ data preprocessing """
58 |
59 | def _get_lungsound_label(crackle, wheeze, n_cls, args):
60 | if n_cls == 4 or args.method in ['mscl']:
61 | if crackle == 0 and wheeze == 0:
62 | return 0
63 | elif crackle == 1 and wheeze == 0:
64 | return 1
65 | elif crackle == 0 and wheeze == 1:
66 | return 2
67 | elif crackle == 1 and wheeze == 1:
68 | return 3
69 |
70 | elif n_cls == 2:
71 | if crackle == 0 and wheeze == 0:
72 | return 0
73 | else:
74 | return 1
75 |
76 |
77 | def _get_diagnosis_label(disease, n_cls):
78 | if n_cls == 3:
79 | if disease in ['COPD', 'Bronchiectasis', 'Asthma']:
80 | return 1
81 | elif disease in ['URTI', 'LRTI', 'Pneumonia', 'Bronchiolitis']:
82 | return 2
83 | else:
84 | return 0
85 |
86 | elif n_cls == 2:
87 | if disease == 'Healthy':
88 | return 0
89 | else:
90 | return 1
91 |
92 |
93 | def _slice_data_torchaudio(start, end, data, sample_rate):
94 | """
95 | SCL paper..
96 | sample_rate denotes how many sample points for one second
97 | """
98 | max_ind = data.shape[1]
99 | start_ind = min(int(start * sample_rate), max_ind)
100 | end_ind = min(int(end * sample_rate), max_ind)
101 |
102 | return data[:, start_ind: end_ind]
103 |
104 |
105 | def cut_pad_sample_torchaudio(data, args):
106 | fade_samples_ratio = 16
107 | fade_samples = int(args.sample_rate / fade_samples_ratio)
108 | fade_out = T.Fade(fade_in_len=0, fade_out_len=fade_samples, fade_shape='linear')
109 | target_duration = args.desired_length * args.sample_rate
110 |
111 | if data.shape[-1] > target_duration:
112 | data = data[..., :target_duration]
113 | if data.dim() == 1:
114 | data = data.unsqueeze(0)
115 | else:
116 | if args.pad_types == 'zero':
117 | tmp = torch.zeros(1, target_duration, dtype=torch.float32)
118 | diff = target_duration - data.shape[-1]
119 | tmp[..., diff//2:data.shape[-1]+diff//2] = data
120 | data = tmp
121 | elif args.pad_types == 'repeat':
122 | ratio = math.ceil(target_duration / data.shape[-1])
123 | data = data.repeat(1, ratio)
124 | data = data[..., :target_duration]
125 | data = fade_out(data)
126 |
127 | return data
128 |
129 | def get_meta_text_descriptions(metadata, args, training=False):
130 | age = int(metadata[0])
131 | sex = int(metadata[1])
132 | loc = int(metadata[5])
133 | dev = int(metadata[6])
134 |
135 |
136 | if age >= 19:
137 | age_str = 'adult'
138 | age_tmp = 0
139 | else:
140 | age_str = 'pediatric'
141 | age_tmp = 1
142 |
143 | if sex == 0:
144 | sex_str = 'male'
145 | else:
146 | sex_str = 'female'
147 | #'Tc':0, 'Al':1, 'Ar':2, 'Pl':3, 'Pr':4, 'Ll':5, 'Lr':6
148 | if loc == 0:
149 | loc_str = 'trachea'
150 | elif loc == 1:
151 | loc_str = 'left anterior chest'
152 | elif loc == 2:
153 | loc_str = 'right anterior chest'
154 | elif loc == 3:
155 | loc_str = 'left posterior chest'
156 | elif loc == 4:
157 | loc_str = 'right posterior chest'
158 | elif loc == 5:
159 | loc_str = 'left lateral chest'
160 | elif loc == 6:
161 | loc_str = 'right lateral chest'
162 |
163 | #Meditron': 0, 'LittC2SE': 1, 'Litt3200': 2, 'AKGC417L': 3
164 | if dev == 0:
165 | dev_str = 'Meditron'
166 | elif dev == 1:
167 | dev_str = 'LittC2SE'
168 | elif dev == 2:
169 | dev_str = 'Litt3200'
170 | elif dev == 3:
171 | dev_str = 'AKGC417L'
172 |
173 | age_str_list = []
174 | sex_str_list = []
175 | loc_str_list = []
176 | dev_str_list = []
177 |
178 | if args.test_wrong_label:
179 | age_str_list.append('adult')
180 | age_str_list.append('pediatric')
181 |
182 | sex_str_list.append('male')
183 | sex_str_list.append('female')
184 |
185 | loc_str_list.append('trachea')
186 | loc_str_list.append('left anterior chest')
187 | loc_str_list.append('right anterior chest')
188 | loc_str_list.append('left posterior chest')
189 | loc_str_list.append('right posterior chest')
190 | loc_str_list.append('left lateral chest')
191 | loc_str_list.append('right lateral chest')
192 |
193 | dev_str_list.append('Meditron')
194 | dev_str_list.append('LittC2SE')
195 | dev_str_list.append('Litt3200')
196 | dev_str_list.append('AKGC417L')
197 |
198 | ##
199 | age_str_list.pop(age_tmp)
200 | sex_str_list.pop(sex)
201 | loc_str_list.pop(loc)
202 | dev_str_list.pop(dev)
203 |
204 | age_str = random.choice(age_str_list)
205 | sex_str = random.choice(sex_str_list)
206 | loc_str = random.choice(loc_str_list)
207 | dev_str = random.choice(dev_str_list)
208 |
209 |
210 | if training:
211 | if args.meta_mode == 'age':
212 | meta_dict = {"age": age_str, "sex": None, "loc": None, "dev": None}
213 | elif args.meta_mode == 'sex':
214 | meta_dict = {"age": None, "sex": sex_str, "loc": None, "dev": None}
215 | elif args.meta_mode == 'loc':
216 | meta_dict = {"age": None, "sex": None, "loc": loc_str, "dev": None}
217 | elif args.meta_mode == 'dev':
218 | meta_dict = {"age": None, "sex": None, "loc": None, "dev": dev_str}
219 | elif args.meta_mode == 'age_sex':
220 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": None}
221 | elif args.meta_mode == 'age_loc':
222 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": None}
223 | elif args.meta_mode == 'age_dev':
224 | meta_dict = {"age": age_str, "sex": None, "loc": None, "dev": dev_str}
225 | elif args.meta_mode == 'sex_loc':
226 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": None}
227 | elif args.meta_mode == 'sex_dev':
228 | meta_dict = {"age": None, "sex": sex_str, "loc": None, "dev": dev_str}
229 | elif args.meta_mode == 'loc_dev':
230 | meta_dict = {"age": None, "sex": None, "loc": loc_str, "dev": dev_str}
231 | elif args.meta_mode == 'age_sex_loc':
232 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": None}
233 | elif args.meta_mode == 'age_sex_dev':
234 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": dev_str}
235 | elif args.meta_mode == 'age_loc_dev':
236 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": dev_str}
237 | elif args.meta_mode == 'sex_loc_dev':
238 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": dev_str}
239 | elif args.meta_mode == 'all':
240 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": dev_str}
241 |
242 | output_str = generate_meta_description(**meta_dict)
243 | return output_str
244 |
245 | else:
246 | if args.test_drop_key:
247 | drop_key = False
248 | if random.random() < args.test_drop_key_prob:
249 | drop_key = True
250 |
251 | if args.meta_mode == 'age':
252 | meta_dict = {"age": 'unknown' if drop_key else age_str, "sex": None, "loc": None, "dev": None}
253 | elif args.meta_mode == 'sex':
254 | meta_dict = {"age": None, "sex": 'unknown' if drop_key else sex_str, "loc": None, "dev": None}
255 | elif args.meta_mode == 'loc':
256 | meta_dict = {"age": None, "sex": None, "loc": 'unknown' if drop_key else loc_str, "dev": None}
257 | elif args.meta_mode == 'dev':
258 | meta_dict = {"age": None, "sex": None, "loc": None, "dev": 'unknown' if drop_key else dev_str}
259 |
260 | elif args.meta_mode == 'age_sex':
261 | if drop_key:
262 | dice = random.randint(0, 1)
263 | if dice == 0:
264 | age_str = 'unknown'
265 | else:
266 | sex_str = 'unknown'
267 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": None}
268 |
269 | elif args.meta_mode == 'age_loc':
270 | if drop_key:
271 | dice = random.randint(0, 1)
272 | if dice == 0:
273 | age_str = 'unknown'
274 | else:
275 | loc_str = 'unknown'
276 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": None}
277 |
278 | elif args.meta_mode == 'age_dev':
279 | if drop_key:
280 | dice = random.randint(0, 1)
281 | if dice == 0:
282 | age_str = 'unknown'
283 | else:
284 | dev_str = 'unknown'
285 | meta_dict = {"age": age_str, "sex": None, "loc": None, "dev": dev_str}
286 |
287 | elif args.meta_mode == 'sex_loc':
288 | if drop_key:
289 | dice = random.randint(0, 1)
290 | if dice == 0:
291 | sex_str = 'unknown'
292 | else:
293 | loc_str = 'unknown'
294 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": None}
295 |
296 | elif args.meta_mode == 'sex_dev':
297 | if drop_key:
298 | dice = random.randint(0, 1)
299 | if dice == 0:
300 | sex_str = 'unknown'
301 | else:
302 | dev_str = 'unknown'
303 | meta_dict = {"age": None, "sex": sex_str, "loc": None, "dev": dev_str}
304 |
305 | elif args.meta_mode == 'loc_dev':
306 | if drop_key:
307 | dice = random.randint(0, 1)
308 | if dice == 0:
309 | loc_str = 'unknown'
310 | else:
311 | dev_str = 'unknown'
312 | meta_dict = {"age": None, "sex": None, "loc": loc_str, "dev": dev_str}
313 |
314 | elif args.meta_mode == 'age_sex_loc':
315 | if drop_key:
316 | dice = random.randint(0, 2)
317 | if dice == 0:
318 | age_str = 'unknown'
319 | elif dice == 1:
320 | sex_str = 'unknown'
321 | else:
322 | loc_str = 'unknown'
323 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": None}
324 |
325 | elif args.meta_mode == 'age_sex_dev':
326 | if drop_key:
327 | dice = random.randint(0, 2)
328 | if dice == 0:
329 | age_str = 'unknown'
330 | elif dice == 1:
331 | sex_str = 'unknown'
332 | else:
333 | dev_str = 'unknown'
334 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": dev_str}
335 |
336 | elif args.meta_mode == 'age_loc_dev':
337 | if drop_key:
338 | dice = random.randint(0, 2)
339 | if dice == 0:
340 | age_str = 'unknown'
341 | elif dice == 1:
342 | loc_str = 'unknown'
343 | else:
344 | dev_str = 'unknown'
345 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": dev_str}
346 |
347 | elif args.meta_mode == 'sex_loc_dev':
348 | if drop_key:
349 | dice = random.randint(0, 2)
350 | if dice == 0:
351 | sex_str = 'unknown'
352 | elif dice == 1:
353 | loc_str = 'unknown'
354 | else:
355 | dev_str = 'unknown'
356 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": dev_str}
357 |
358 | elif args.meta_mode == 'all':
359 | if drop_key:
360 | dice = random.randint(0, 3)
361 | if dice == 0:
362 | age_str = 'unknown'
363 | elif dice == 1:
364 | sex_str = 'unknown'
365 | elif dice == 2:
366 | loc_str = 'unknown'
367 | else:
368 | dev_str = 'unknown'
369 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": dev_str}
370 |
371 | elif args.test_unknown_all:
372 | return 'No description.'
373 |
374 | else:
375 | if args.meta_mode == 'age':
376 | meta_dict = {"age": age_str, "sex": None, "loc": None, "dev": None}
377 | elif args.meta_mode == 'sex':
378 | meta_dict = {"age": None, "sex": sex_str, "loc": None, "dev": None}
379 | elif args.meta_mode == 'loc':
380 | meta_dict = {"age": None, "sex": None, "loc": loc_str, "dev": None}
381 | elif args.meta_mode == 'dev':
382 | meta_dict = {"age": None, "sex": None, "loc": None, "dev": dev_str}
383 | elif args.meta_mode == 'age_sex':
384 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": None}
385 | elif args.meta_mode == 'age_loc':
386 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": None}
387 | elif args.meta_mode == 'age_dev':
388 | meta_dict = {"age": age_str, "sex": None, "loc": None, "dev": dev_str}
389 | elif args.meta_mode == 'sex_loc':
390 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": None}
391 | elif args.meta_mode == 'sex_dev':
392 | meta_dict = {"age": None, "sex": sex_str, "loc": None, "dev": dev_str}
393 | elif args.meta_mode == 'loc_dev':
394 | meta_dict = {"age": None, "sex": None, "loc": loc_str, "dev": dev_str}
395 | elif args.meta_mode == 'age_sex_loc':
396 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": None}
397 | elif args.meta_mode == 'age_sex_dev':
398 | meta_dict = {"age": age_str, "sex": sex_str, "loc": None, "dev": dev_str}
399 | elif args.meta_mode == 'age_loc_dev':
400 | meta_dict = {"age": age_str, "sex": None, "loc": loc_str, "dev": dev_str}
401 | elif args.meta_mode == 'sex_loc_dev':
402 | meta_dict = {"age": None, "sex": sex_str, "loc": loc_str, "dev": dev_str}
403 | elif args.meta_mode == 'all':
404 | meta_dict = {"age": age_str, "sex": sex_str, "loc": loc_str, "dev": dev_str}
405 |
406 | output_str = generate_meta_description(**meta_dict)
407 |
408 | if args.test_bmi:
409 | adult_bmi = float(metadata[2])
410 | child_weight = float(metadata[3])
411 | child_height = float(metadata[4])
412 |
413 | if adult_bmi == -1.0:
414 | bmi = round(child_weight / ((child_height * 0.01) ** 2), 1)
415 | else:
416 | bmi = adult_bmi
417 | bmi_sentence = ' The BMI of the patient was {}.'.format(bmi)
418 | output_str += bmi_sentence
419 |
420 | return output_str
421 |
422 |
423 | def get_individual_cycles_torchaudio(args, recording_annotations, data_folder, filename, sample_rate, n_cls):
424 | sample_data = []
425 | fpath = os.path.join(data_folder, filename+'.wav')
426 | data, sr = torchaudio.load(fpath)
427 |
428 | if sr != sample_rate:
429 | resample = T.Resample(sr, sample_rate)
430 | data = resample(data)
431 |
432 | fade_samples_ratio = 16
433 | fade_samples = int(sample_rate / fade_samples_ratio)
434 | fade = T.Fade(fade_in_len=fade_samples, fade_out_len=fade_samples, fade_shape='linear')
435 | data = fade(data)
436 |
437 | for idx in recording_annotations.index:
438 | row = recording_annotations.loc[idx]
439 |
440 | start = row['Start'] # start time (second)
441 | end = row['End'] # end time (second)
442 | audio_chunk = _slice_data_torchaudio(start, end, data, sample_rate)
443 |
444 | if args.class_split == 'lungsound':
445 | crackles = row['Crackles']
446 | wheezes = row['Wheezes']
447 | label = _get_lungsound_label(crackles, wheezes, n_cls, args)
448 | #meta_str = get_meta_infor(metadata, args)
449 |
450 | #sample_data.append((audio_chunk, _get_lungsound_label(crackles, wheezes, n_cls, args), meta_str))
451 | sample_data.append((audio_chunk, _get_lungsound_label(crackles, wheezes, n_cls, args)))
452 |
453 | elif args.class_split == 'diagnosis':
454 | disease = row['Disease']
455 | sample_data.append((audio_chunk, _get_diagnosis_label(disease, n_cls)))
456 |
457 | padded_sample_data = []
458 | for data, label in sample_data:
459 | data = cut_pad_sample_torchaudio(data, args) # --> resample to [1, 128000] --> 8 seconds
460 | padded_sample_data.append((data, label))
461 |
462 | return padded_sample_data
463 |
464 |
465 | def generate_fbank(args, audio, sample_rate, n_mels=128):
466 | """
467 | use torchaudio library to convert mel fbank for AST model
468 | """
469 | assert sample_rate == 16000, 'input audio sampling rate must be 16kHz'
470 | fbank = torchaudio.compliance.kaldi.fbank(audio, htk_compat=True, sample_frequency=sample_rate, use_energy=False, window_type='hanning', num_mel_bins=n_mels, dither=0.0, frame_shift=10)
471 |
472 | if args.model in ['ast']:
473 | mean, std = -4.2677393, 4.5689974
474 | else:
475 | mean, std = fbank.mean(), fbank.std()
476 | fbank = (fbank - mean) / (std * 2) # mean / std
477 | fbank = fbank.unsqueeze(-1).numpy()
478 | return fbank
479 |
480 |
481 | # ==========================================================================
482 |
483 |
484 | # ==========================================================================
485 | """ evaluation metric """
486 | def get_score(hits, counts, pflag=False):
487 | # normal accuracy
488 | sp = hits[0] / (counts[0] + 1e-10) * 100
489 | # abnormal accuracy
490 | se = sum(hits[1:]) / (sum(counts[1:]) + 1e-10) * 100
491 | sc = (sp + se) / 2.0
492 |
493 | if pflag:
494 | # print("************* Metrics ******************")
495 | print("S_p: {}, S_e: {}, Score: {}".format(sp, se, sc))
496 |
497 | return sp, se, sc
498 | # ==========================================================================
499 |
--------------------------------------------------------------------------------
/util/meta_description.py:
--------------------------------------------------------------------------------
1 | from itertools import product
2 |
3 | import pandas as pd
4 |
5 |
6 | def generate_meta_description(age=None, sex=None, loc=None, dev=None):
7 | # set up the prefix
8 | if loc or dev:
9 | output_sent = "This sound was recorded"
10 | else:
11 | output_sent = "This patient is"
12 |
13 | # prepare the main descriptions
14 | if loc:
15 | s_loc = "an unknown region" if loc == "unknown" else f"the {loc}"
16 | s_loc = f"from {s_loc}"
17 |
18 | if age and sex:
19 | if age == "unknown":
20 | s_age_sex = f"a {sex} patient of unknown age"
21 | elif sex == "unknown":
22 | s_age_sex = "an " if age == "adult" else "a "
23 | s_age_sex += f"{age} patient of unknown sex"
24 | elif age == "pediatric":
25 | s_age_sex = f"a {sex} {age} patient"
26 | else:
27 | s_age_sex = f"an {age} {sex} patient"
28 | elif age:
29 | if age == "unknown":
30 | s_age_sex = "a patient of unknown age"
31 | else:
32 | s_age_sex = "an " if age == "adult" else "a "
33 | s_age_sex += f"{age} patient"
34 | elif sex:
35 | if sex == "unknown":
36 | s_age_sex = "a patient of unknown sex"
37 | else:
38 | s_age_sex = f"a {sex} patient"
39 |
40 | if dev:
41 | if dev == "unknown":
42 | s_dev = "an unknown device"
43 | else:
44 | s_dev_prefix = "an" if dev == "AKGC417L" else "a"
45 | s_dev_suffix = "microphone" if dev == "AKGC417L" else "stethoscope"
46 | s_dev = f"{s_dev_prefix} {dev} {s_dev_suffix}"
47 |
48 | # combine the descriptions
49 | if "s_loc" in locals():
50 | output_sent += f" {s_loc}"
51 | if "s_age_sex" in locals():
52 | s_age_sex = s_age_sex.replace("of", "with")
53 |
54 | if "s_age_sex" in locals():
55 | if "s_loc" not in locals() and "s_dev" not in locals():
56 | output_sent += f" {s_age_sex}"
57 | elif "s_loc" in locals():
58 | output_sent += f" of {s_age_sex}"
59 | else:
60 | output_sent += f" from {s_age_sex}"
61 |
62 | if "s_age_sex" in locals() and "s_dev" in locals():
63 | output_sent += f", using {s_dev}"
64 | elif "s_dev" in locals():
65 | output_sent += f" with {s_dev}"
66 |
67 | output_sent += "."
68 |
69 | return output_sent
70 |
71 |
72 | def generate_clap_meta_description(
73 | age=None, sex=None, loc=None, dev=None, sex_debiasing=True
74 | ):
75 | output_sent = ""
76 |
77 | # prep the descriptions about the patient (age & sex)
78 | if age and sex:
79 | if age == "unknown":
80 | s_patient = f"A {sex} patient of unknown age"
81 | elif sex == "unknown":
82 | s_patient = "An " if age == "adult" else "A "
83 | s_patient += f"{age} patient of unknown sex"
84 | elif age == "pediatric":
85 | s_patient = f"A {sex} {age} patient"
86 | else:
87 | s_patient = f"An {age} {sex} patient"
88 | elif age:
89 | if age == "unknown":
90 | s_patient = "A patient of unknown age"
91 | else:
92 | s_patient = "An " if age == "adult" else "A "
93 | s_patient += f"{age} patient"
94 | elif sex:
95 | if sex == "unknown":
96 | s_patient = "A patient of unknown sex"
97 | else:
98 | s_patient = f"A {sex} patient"
99 | else:
100 | s_patient = "A patient"
101 |
102 | # prep descriptions about the location
103 | if loc:
104 | s_loc = "an unknown region" if loc == "unknown" else f"the {loc}"
105 | s_loc = f"body sounds recorded from {s_loc}"
106 | else:
107 | s_loc = "body sounds recorded"
108 |
109 | if sex_debiasing:
110 | s_loc = f"their {s_loc}"
111 | else:
112 | if sex == "male":
113 | s_loc = f"his {s_loc}"
114 | elif sex == "female":
115 | s_loc = f"her {s_loc}"
116 | else:
117 | s_loc = f"their {s_loc}"
118 |
119 | # prep descriptions about the device
120 | if dev:
121 | if dev == "unknown":
122 | s_dev = "an unknown device"
123 | else:
124 | s_dev_prefix = "an" if dev == "AKGC417L" else "a"
125 | s_dev_suffix = "microphone" if dev == "AKGC417L" else "stethoscope"
126 | s_dev = f"{s_dev_prefix} {dev} {s_dev_suffix}"
127 |
128 | # combine the descriptions
129 | output_sent += f"{s_patient} had {s_loc}"
130 |
131 | if "s_dev" in locals():
132 | if loc:
133 | output_sent += f", with {s_dev}"
134 | else:
135 | output_sent += f" with {s_dev}"
136 |
137 | output_sent += "."
138 |
139 | return output_sent
140 |
141 | '''
142 | # set values for each variable
143 | age_values = ["adult", "pediatric", "unknown", None]
144 | sex_values = ["male", "female", "unknown", None]
145 | loc_values = [
146 | "trachea",
147 | "left anterior chest",
148 | "right anterior chest",
149 | "left posterior chest",
150 | "right posterior chest",
151 | "left lateral chest",
152 | "right lateral chest",
153 | "unknown",
154 | None,
155 | ]
156 | dev_values = ["Meditron", "LittC2SE", "Litt3200", "AKGC417L", "unknown", None]
157 |
158 | # generate all possible combinations
159 | meta_combinations = list(product(age_values, sex_values, loc_values, dev_values))
160 | # remove combinations with more than 1 "unknown"
161 | meta_combinations = [comb for comb in meta_combinations if comb.count("unknown") <= 1]
162 | meta_combinations = [
163 | comb for comb in meta_combinations if comb != (None, None, None, None)
164 | ]
165 |
166 | # # generate all meta descriptions
167 | # meta_descriptions = []
168 | # for comb in meta_combinations:
169 | # meta_dict = {"age": comb[0], "sex": comb[1], "loc": comb[2], "dev": comb[3]}
170 | # meta_description = generate_meta_description(**meta_dict)
171 | # meta_dict["meta_description"] = meta_description
172 | # meta_descriptions.append(meta_dict)
173 |
174 | # meta_descriptions_df = pd.DataFrame(meta_descriptions)
175 | # meta_descriptions_df.to_excel("meta_descriptions.xlsx", index=False)
176 |
177 | # generate all meta descriptions
178 | output_descriptions = []
179 | for comb in meta_combinations:
180 | meta_dict = {"age": comb[0], "sex": comb[1], "loc": comb[2], "dev": comb[3]}
181 | output_dict = dict(meta_dict)
182 |
183 | # w/o sex debiasing (w/ "his", "her", "their")
184 | meta_description_wo_debiasing = generate_clap_meta_description(
185 | **meta_dict, sex_debiasing=False
186 | )
187 | output_dict["meta_description_wo_debiasing"] = meta_description_wo_debiasing
188 |
189 | # w/ sex debiasing (only w/ "their")
190 | meta_description = generate_clap_meta_description(**meta_dict)
191 | output_dict["meta_description"] = meta_description
192 |
193 | output_descriptions.append(output_dict)
194 |
195 | output_descriptions_df = pd.DataFrame(output_descriptions)
196 | output_descriptions_df.to_excel("meta_descriptions_clap_style.xlsx", index=False)
197 | '''
198 |
--------------------------------------------------------------------------------
/util/misc.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import math
4 | import numpy as np
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 |
10 |
11 | """ train util """
12 | def adjust_learning_rate(args, optimizer, epoch):
13 | lr = args.learning_rate
14 | if args.cosine:
15 | eta_min = lr * (args.lr_decay_rate ** 3)
16 | lr = eta_min + (lr - eta_min) * (
17 | 1 + math.cos(math.pi * epoch / args.epochs)) / 2
18 | else:
19 | steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))
20 | if steps > 0:
21 | lr = lr * (args.lr_decay_rate ** steps)
22 |
23 | for i, param_group in enumerate(optimizer.param_groups):
24 | param_group['lr'] = lr
25 |
26 |
27 | def adjust_lr_wd(args, optimizer, epoch):
28 | lr = args.learning_rate
29 | if args.cosine:
30 | eta_min = lr * (args.lr_decay_rate ** 3)
31 | lr = eta_min + (lr - eta_min) * (
32 | 1 + math.cos(math.pi * epoch / args.epochs)) / 2
33 | else:
34 | steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))
35 | if steps > 0:
36 | lr = lr * (args.lr_decay_rate ** steps)
37 |
38 | wd = args.weight_decay
39 | if args.wd_scheduler:
40 | wd_min = args.weight_decay_end
41 | wd = wd_min + (wd - wd_min) * (
42 | 1 + math.cos(math.pi * epoch / args.epochs)) / 2
43 |
44 | for i, param_group in enumerate(optimizer.param_groups):
45 | param_group['lr'] = lr
46 | if i == 0: # in case of DINO and ViT, only wd for regularized params
47 | param_group['weight_decay'] = wd
48 |
49 |
50 | def warmup_learning_rate(args, epoch, batch_id, total_batches, optimizer):
51 | if args.warm and epoch <= args.warm_epochs:
52 | p = (batch_id + (epoch - 1) * total_batches) / \
53 | (args.warm_epochs * total_batches)
54 | lr = args.warmup_from + p * (args.warmup_to - args.warmup_from)
55 |
56 | for param_group in optimizer.param_groups:
57 | param_group['lr'] = lr
58 |
59 |
60 | def set_optimizer(args, optim_params=None, model=None):
61 | if model is not None:
62 | optim_params = model.parameters()
63 |
64 | if args.optimizer == 'sgd':
65 | optimizer = optim.SGD(optim_params,
66 | lr=args.learning_rate,
67 | momentum=args.momentum,
68 | weight_decay=args.weight_decay)
69 | elif args.optimizer == 'adam':
70 | optimizer = optim.Adam(optim_params,
71 | lr=args.learning_rate,
72 | weight_decay=args.weight_decay)
73 | else:
74 | raise NotImplemented
75 |
76 | return optimizer
77 |
78 |
79 | class MA:
80 | def __init__(self, beta):
81 | super().__init__()
82 | self.beta = beta
83 |
84 | def update_average(self, old, new):
85 | if old is None:
86 | return new
87 | return old * self.beta + (1 - self.beta) * new
88 |
89 |
90 | def update_moving_average(ma_beta, current_model, ma_ckpt):
91 | ma_updater = MA(ma_beta)
92 | new_state_dict = {}
93 | for (k1, current_params), (k2, ma_params) in zip(current_model.state_dict().items(), ma_ckpt.items()):
94 | assert k1 == k2
95 | old_weight, up_weight = ma_params.data, current_params.data
96 | new_state_dict[k1] = ma_updater.update_average(old_weight, up_weight)
97 |
98 | current_model.load_state_dict(new_state_dict)
99 | return current_model
100 |
101 |
102 | """ eval util """
103 | class AverageMeter(object):
104 | """ Computes and stores the average and current value """
105 | def __init__(self):
106 | self.reset()
107 |
108 | def reset(self):
109 | self.val = 0
110 | self.avg = 0
111 | self.sum = 0
112 | self.count = 0
113 |
114 | def update(self, val, n=1):
115 | self.val = val
116 | self.sum += val * n
117 | self.count += n
118 | self.avg = self.sum / self.count
119 |
120 |
121 | def accuracy(output, target, topk=(1,)):
122 | """Computes the accuracy over the k top predictions for the specified values of k"""
123 | with torch.no_grad():
124 | n_cls = output.shape[1]
125 | valid_topk = [k for k in topk if k <= n_cls]
126 |
127 | maxk = max(valid_topk)
128 | bsz = target.size(0)
129 |
130 | _, pred = output.topk(maxk, 1, True, True)
131 | pred = pred.t()
132 | correct = pred.eq(target.view(1, -1).expand_as(pred))
133 |
134 | res = []
135 | for k in topk:
136 | if k in valid_topk:
137 | correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
138 | res.append(correct_k.mul_(100.0 / bsz))
139 | else: res.append(torch.tensor([0.]))
140 |
141 | return res, bsz
142 |
143 | def save_model(model, optimizer, args, epoch, save_file, classifier):
144 | print('==> Saving...')
145 | state = {
146 | 'args': args,
147 | 'model': model.state_dict(),
148 | 'optimizer': optimizer.state_dict(),
149 | 'epoch': epoch,
150 | 'classifier': classifier.state_dict()
151 | }
152 |
153 | torch.save(state, save_file)
154 | del state
155 |
156 |
157 | def update_json(exp_name, acc, path='./save/results.json'):
158 | acc = [round(a, 2) for a in acc]
159 | if not os.path.exists(path):
160 | with open(path, 'w') as f:
161 | json.dump({}, f)
162 |
163 | with open(path, 'r', encoding="UTF-8") as f:
164 | result_dict = json.load(f)
165 | result_dict[exp_name] = acc
166 |
167 | with open(path, 'w') as f:
168 | json.dump(result_dict, f)
169 |
170 | print('best Score: {} (sp, se, sc)'.format(acc))
171 | print('results updated to %s' % path)
--------------------------------------------------------------------------------